aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c310
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c266
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c58
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c68
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c100
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c276
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c304
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h7
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c218
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c41
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c164
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h)45
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c175
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c234
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c218
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c103
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h37
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h104
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c65
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h14
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn301.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn302.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c60
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h16
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c19
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c18
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h13
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h10
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c20
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h355
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h531
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h58
-rw-r--r--drivers/gpu/drm/amd/include/navi10_enum.h2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c114
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h57
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h3
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v13_0.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c34
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c59
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c123
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c86
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c51
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c63
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c116
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c24
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c311
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c86
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c63
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h4
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c17
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c28
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c4
-rw-r--r--drivers/gpu/drm/drm_auth.c3
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h4
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c68
-rw-r--r--drivers/gpu/drm/drm_ioctl.c9
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c229
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c2461
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h68
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c175
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c165
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c215
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c203
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c3
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c27
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c7
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c55
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c11
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h27
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c32
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c52
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h21
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c119
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c1
-rw-r--r--drivers/gpu/drm/lima/lima_trace.h2
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c129
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c17
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c9
-rw-r--r--drivers/gpu/drm/msm/Makefile3
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h60
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h40
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h38
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h95
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c29
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h2141
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c55
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h34
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c460
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c44
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c33
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h30
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c26
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h13
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h119
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c230
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c110
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c58
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c54
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c139
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c202
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c1508
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h132
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c70
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h63
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c14
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h34
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h38
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c35
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h7
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h5
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c135
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_common.xml.h34
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.c125
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.h136
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c187
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c181
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h8
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c18
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c87
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c121
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c21
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c4
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h1722
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c30
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h228
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h310
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h238
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h385
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h287
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_5nm.xml.h480
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h482
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h34
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c31
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c8
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h34
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h34
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h34
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h34
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c31
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c33
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h24
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c45
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h1
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c18
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c1
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c51
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h29
-rw-r--r--drivers/gpu/drm/msm/msm_gpummu.c5
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c22
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h9
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if000c.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c156
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c1
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r300.c4
-rw-r--r--drivers/gpu/drm/radeon/r420.c5
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c16
-rw-r--r--drivers/gpu/drm/radeon/rs400.c4
-rw-r--r--drivers/gpu/drm/radeon/rs600.c4
-rw-r--r--drivers/gpu/drm/radeon/rs690.c4
-rw-r--r--drivers/gpu/drm/radeon/rv515.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c4
-rw-r--r--drivers/gpu/drm/radeon/si.c4
-rw-r--r--drivers/gpu/drm/selftests/test-drm_framebuffer.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c31
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h5
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c41
-rw-r--r--drivers/gpu/drm/tegra/dc.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.h26
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c4
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/hub.c182
-rw-r--r--drivers/gpu/drm/tegra/plane.c23
-rw-r--r--drivers/gpu/drm/tegra/plane.h3
-rw-r--r--drivers/gpu/drm/tegra/sor.c70
-rw-r--r--drivers/gpu/drm/tegra/vic.c2
-rw-r--r--drivers/gpu/drm/tegra/vic.h1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c44
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c2
-rw-r--r--drivers/gpu/host1x/bus.c30
428 files changed, 13359 insertions, 11103 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index db16b3e83694..cf62f43a03da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -269,7 +269,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
uint64_t *size);
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed);
+ struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index fdee98fae2ba..4fb15750b9bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -47,12 +47,6 @@ static struct {
spinlock_t mem_limit_lock;
} kfd_mem_limit;
-/* Struct used for amdgpu_amdkfd_bo_validate */
-struct amdgpu_vm_parser {
- uint32_t domain;
- bool wait;
-};
-
static const char * const domain_bit_to_string[] = {
"CPU",
"GTT",
@@ -348,11 +342,9 @@ validate_fail:
return ret;
}
-static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
+static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
{
- struct amdgpu_vm_parser *p = param;
-
- return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
+ return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
}
/* vm_validate_pt_pd_bos - Validate page table and directory BOs
@@ -364,28 +356,23 @@ static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
*/
static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
{
- struct amdgpu_bo *pd = vm->root.base.bo;
+ struct amdgpu_bo *pd = vm->root.bo;
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
- struct amdgpu_vm_parser param;
int ret;
- param.domain = AMDGPU_GEM_DOMAIN_VRAM;
- param.wait = false;
-
- ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
- &param);
+ ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
if (ret) {
pr_err("failed to validate PT BOs\n");
return ret;
}
- ret = amdgpu_amdkfd_validate(&param, pd);
+ ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
if (ret) {
pr_err("failed to validate PD\n");
return ret;
}
- vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
+ vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
if (vm->use_cpu_for_update) {
ret = amdgpu_bo_kmap(pd, NULL);
@@ -400,7 +387,7 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
{
- struct amdgpu_bo *pd = vm->root.base.bo;
+ struct amdgpu_bo *pd = vm->root.bo;
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
int ret;
@@ -652,7 +639,7 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
}
}
- gobj = amdgpu_gem_prime_import(&adev->ddev, mem->dmabuf);
+ gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
if (IS_ERR(gobj))
return PTR_ERR(gobj);
@@ -1070,8 +1057,7 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
static int update_gpuvm_pte(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
- struct amdgpu_sync *sync,
- bool *table_freed)
+ struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = entry->bo_va;
struct amdgpu_device *adev = entry->adev;
@@ -1082,7 +1068,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
return ret;
/* Update the page tables */
- ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
if (ret) {
pr_err("amdgpu_vm_bo_update failed\n");
return ret;
@@ -1094,8 +1080,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
static int map_bo_to_gpuvm(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync,
- bool no_update_pte,
- bool *table_freed)
+ bool no_update_pte)
{
int ret;
@@ -1112,7 +1097,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
if (no_update_pte)
return 0;
- ret = update_gpuvm_pte(mem, entry, sync, table_freed);
+ ret = update_gpuvm_pte(mem, entry, sync);
if (ret) {
pr_err("update_gpuvm_pte() failed\n");
goto update_gpuvm_pte_failed;
@@ -1166,7 +1151,7 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
- struct amdgpu_bo *pd = peer_vm->root.base.bo;
+ struct amdgpu_bo *pd = peer_vm->root.bo;
ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
AMDGPU_SYNC_NE_OWNER,
@@ -1233,7 +1218,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
vm->process_info = *process_info;
/* Validate page directory and attach eviction fence */
- ret = amdgpu_bo_reserve(vm->root.base.bo, true);
+ ret = amdgpu_bo_reserve(vm->root.bo, true);
if (ret)
goto reserve_pd_fail;
ret = vm_validate_pt_pd_bos(vm);
@@ -1241,16 +1226,16 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
pr_err("validate_pt_pd_bos() failed\n");
goto validate_pd_fail;
}
- ret = amdgpu_bo_sync_wait(vm->root.base.bo,
+ ret = amdgpu_bo_sync_wait(vm->root.bo,
AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
goto wait_pd_fail;
- ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
+ ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
- amdgpu_bo_fence(vm->root.base.bo,
+ amdgpu_bo_fence(vm->root.bo,
&vm->process_info->eviction_fence->base, true);
- amdgpu_bo_unreserve(vm->root.base.bo);
+ amdgpu_bo_unreserve(vm->root.bo);
/* Update process info */
mutex_lock(&vm->process_info->lock);
@@ -1264,7 +1249,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
reserve_shared_fail:
wait_pd_fail:
validate_pd_fail:
- amdgpu_bo_unreserve(vm->root.base.bo);
+ amdgpu_bo_unreserve(vm->root.bo);
reserve_pd_fail:
vm->process_info = NULL;
if (info) {
@@ -1319,7 +1304,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
struct amdkfd_process_info *process_info = vm->process_info;
- struct amdgpu_bo *pd = vm->root.base.bo;
+ struct amdgpu_bo *pd = vm->root.bo;
if (!process_info)
return;
@@ -1375,7 +1360,7 @@ void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
- struct amdgpu_bo *pd = avm->root.base.bo;
+ struct amdgpu_bo *pd = avm->root.bo;
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
if (adev->asic_type < CHIP_VEGA10)
@@ -1406,8 +1391,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
@@ -1610,8 +1594,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
}
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem,
- void *drm_priv, bool *table_freed)
+ struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
@@ -1699,7 +1682,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
entry->va, entry->va + bo_size, entry);
ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
- is_invalid_userptr, table_freed);
+ is_invalid_userptr);
if (ret) {
pr_err("Failed to map bo to gpuvm\n");
goto out_unreserve;
@@ -2149,7 +2132,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
- ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
+ ret = update_gpuvm_pte(mem, attachment, &sync);
if (ret) {
pr_err("%s: update PTE failed\n", __func__);
/* make sure this gets validated again */
@@ -2355,7 +2338,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
- ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
+ ret = update_gpuvm_pte(mem, attachment, &sync_obj);
if (ret) {
pr_debug("Memory eviction: update PTE failed. Try again\n");
goto validate_map_fail;
@@ -2402,7 +2385,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
/* Attach eviction fence to PD / PT BOs */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
- struct amdgpu_bo *bo = peer_vm->root.base.bo;
+ struct amdgpu_bo *bo = peer_vm->root.bo;
amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 25655414e9c0..a152363b0254 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -799,7 +799,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
if (r)
return r;
@@ -810,7 +810,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
@@ -829,7 +829,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (bo_va == NULL)
continue;
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
@@ -850,7 +850,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
+ p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
if (amdgpu_vm_debug) {
/* Invalidate all BOs to test for userspace bugs */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a9bbb0034e1e..536005bff24a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1304,11 +1304,11 @@ static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
seq_printf(m, "pid:%d\tProcess:%s ----------\n",
vm->task_info.pid, vm->task_info.process_name);
- r = amdgpu_bo_reserve(vm->root.base.bo, true);
+ r = amdgpu_bo_reserve(vm->root.bo, true);
if (r)
break;
amdgpu_debugfs_vm_bo_info(vm, m);
- amdgpu_bo_unreserve(vm->root.base.bo);
+ amdgpu_bo_unreserve(vm->root.bo);
}
mutex_unlock(&dev->filelist_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6a242ec3f7ef..d303e88e3c23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1369,6 +1369,38 @@ def_value:
adev->pm.smu_prv_buffer_size = 0;
}
+static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
+{
+ if (!(adev->flags & AMD_IS_APU) ||
+ adev->asic_type < CHIP_RAVEN)
+ return 0;
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ if (adev->pdev->device == 0x15dd)
+ adev->apu_flags |= AMD_APU_IS_RAVEN;
+ if (adev->pdev->device == 0x15d8)
+ adev->apu_flags |= AMD_APU_IS_PICASSO;
+ break;
+ case CHIP_RENOIR:
+ if ((adev->pdev->device == 0x1636) ||
+ (adev->pdev->device == 0x164c))
+ adev->apu_flags |= AMD_APU_IS_RENOIR;
+ else
+ adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
+ break;
+ case CHIP_VANGOGH:
+ adev->apu_flags |= AMD_APU_IS_VANGOGH;
+ break;
+ case CHIP_YELLOW_CARP:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* amdgpu_device_check_arguments - validate module params
*
@@ -3386,6 +3418,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock);
+ r = amdgpu_device_init_apu_flags(adev);
+ if (r)
+ return r;
+
r = amdgpu_device_check_arguments(adev);
if (r)
return r;
@@ -4124,6 +4160,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
{
struct dma_fence *fence = NULL, *next = NULL;
struct amdgpu_bo *shadow;
+ struct amdgpu_bo_vm *vmbo;
long r = 1, tmo;
if (amdgpu_sriov_runtime(adev))
@@ -4133,8 +4170,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
dev_info(adev->dev, "recover vram bo from shadow start\n");
mutex_lock(&adev->shadow_list_lock);
- list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
-
+ list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
+ shadow = &vmbo->bo;
/* No need to recover an evicted BO */
if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
@@ -4303,6 +4340,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_BEIGE_GOBY:
case CHIP_VANGOGH:
case CHIP_ALDEBARAN:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index e1b6f5891759..43e7b61d1c5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -325,7 +325,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
return 0;
}
-int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
+int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
int *major, int *minor, int *revision)
{
struct binary_header *bhdr;
@@ -357,7 +357,7 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
for (j = 0; j < num_ips; j++) {
ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
- if (le16_to_cpu(ip->hw_id) == hw_id) {
+ if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) {
if (major)
*major = ip->major;
if (minor)
@@ -373,6 +373,14 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
return -EINVAL;
}
+
+int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance,
+ int *major, int *minor, int *revision)
+{
+ return amdgpu_discovery_get_ip_version(adev, VCN_HWID,
+ vcn_instance, major, minor, revision);
+}
+
void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 1b1ae21b1037..48e6b88cfdfe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -30,8 +30,11 @@
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev);
-int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
+int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
int *major, int *minor, int *revision);
+
+int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance,
+ int *major, int *minor, int *revision);
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 2b6a66c849d4..8e5a7ac8c36f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1046,11 +1046,12 @@ int amdgpu_display_gem_fb_init(struct drm_device *dev,
rfb->base.obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+
+ ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
if (ret)
goto err;
- ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
@@ -1070,9 +1071,6 @@ int amdgpu_display_gem_fb_verify_and_init(
rfb->base.obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
- if (ret)
- goto err;
/* Verify that the modifier is supported. */
if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
@@ -1088,6 +1086,10 @@ int amdgpu_display_gem_fb_verify_and_init(
if (ret)
goto err;
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (ret)
+ goto err;
+
return 0;
err:
drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 23219fc3b28c..ae6ab93c868b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -105,9 +105,21 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ int r;
/* pin buffer into GTT */
- return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ return r;
+
+ if (bo->tbo.moving) {
+ r = dma_fence_wait(bo->tbo.moving, true);
+ if (r) {
+ amdgpu_bo_unpin(bo);
+ return r;
+ }
+ }
+ return 0;
}
/**
@@ -383,7 +395,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
- struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
+ struct dma_resv *resv = vm->root.bo->tbo.base.resv;
if (ticket) {
/* When we get an error here it means that somebody
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index d2673119e0d1..a959624d576b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -160,6 +160,7 @@ int amdgpu_smu_pptable_id = -1;
* highest. That helps saving some idle power.
* DISABLE_FRACTIONAL_PWM (bit 2) disabled by default
* PSR (bit 3) disabled by default
+ * EDP NO POWER SEQUENCING (bit 4) disabled by default
*/
uint amdgpu_dc_feature_mask = 2;
uint amdgpu_dc_debug_mask;
@@ -837,8 +838,23 @@ module_param_named(tmz, amdgpu_tmz, int, 0444);
/**
* DOC: freesync_video (uint)
- * Enabled the optimization to adjust front porch timing to achieve seamless mode change experience
- * when setting a freesync supported mode for which full modeset is not needed.
+ * Enable the optimization to adjust front porch timing to achieve seamless
+ * mode change experience when setting a freesync supported mode for which full
+ * modeset is not needed.
+ *
+ * The Display Core will add a set of modes derived from the base FreeSync
+ * video mode into the corresponding connector's mode list based on commonly
+ * used refresh rates and VRR range of the connected display, when users enable
+ * this feature. From the userspace perspective, they can see a seamless mode
+ * change experience when the change between different refresh rates under the
+ * same resolution. Additionally, userspace applications such as Video playback
+ * can read this modeset list and change the refresh rate based on the video
+ * frame rate. Finally, the userspace can also derive an appropriate mode for a
+ * particular refresh rate based on the FreeSync Mode and add it to the
+ * connector's mode list.
+ *
+ * Note: This is an experimental feature.
+ *
* The default value: 0 (off).
*/
MODULE_PARM_DESC(
@@ -1152,6 +1168,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */
+ {0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
@@ -1183,6 +1200,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
/* Aldebaran */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index dbebbe16e3b3..d94c5419ec25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -69,13 +69,13 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
dev = PCI_SLOT(adev->pdev->devfn);
fn = PCI_FUNC(adev->pdev->devfn);
- ret = amdgpu_bo_reserve(fpriv->vm.root.base.bo, false);
+ ret = amdgpu_bo_reserve(fpriv->vm.root.bo, false);
if (ret) {
DRM_ERROR("Fail to reserve bo\n");
return;
}
amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, &gtt_mem, &cpu_mem);
- amdgpu_bo_unreserve(fpriv->vm.root.base.bo);
+ amdgpu_bo_unreserve(fpriv->vm.root.bo);
seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus,
dev, fn, fpriv->vm.pasid);
seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 1313784605b0..b36405170ff3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -300,7 +300,6 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
* @adev: amdgpu_device pointer
* @offset: offset into the GPU's gart aperture
* @pages: number of pages to bind
- * @pagelist: pages to bind
* @dma_addr: DMA addresses of pages
* @flags: page table entry flags
*
@@ -309,7 +308,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
* Returns 0 for success, -EINVAL for failure.
*/
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
- int pages, struct page **pagelist, dma_addr_t *dma_addr,
+ int pages, dma_addr_t *dma_addr,
uint64_t flags)
{
if (!adev->gart.ready) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index f53f6a7b9b60..78895413cf9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -64,7 +64,6 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
int pages, dma_addr_t *dma_addr, uint64_t flags,
void *dst);
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
- int pages, struct page **pagelist,
- dma_addr_t *dma_addr, uint64_t flags);
+ int pages, dma_addr_t *dma_addr, uint64_t flags);
void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 0780e8d18992..a25160797b61 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -170,7 +170,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
return -EPERM;
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
- abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
+ abo->tbo.base.resv != vm->root.bo->tbo.base.resv)
return -EPERM;
r = amdgpu_bo_reserve(abo, false);
@@ -320,11 +320,11 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
}
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
- r = amdgpu_bo_reserve(vm->root.base.bo, false);
+ r = amdgpu_bo_reserve(vm->root.bo, false);
if (r)
return r;
- resv = vm->root.base.bo->tbo.base.resv;
+ resv = vm->root.bo->tbo.base.resv;
}
initial_domain = (u32)(0xffffffff & args->in.domains);
@@ -353,9 +353,9 @@ retry:
if (!r) {
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
- abo->parent = amdgpu_bo_ref(vm->root.base.bo);
+ abo->parent = amdgpu_bo_ref(vm->root.bo);
}
- amdgpu_bo_unreserve(vm->root.base.bo);
+ amdgpu_bo_unreserve(vm->root.bo);
}
if (r)
return r;
@@ -612,7 +612,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (operation == AMDGPU_VA_OP_MAP ||
operation == AMDGPU_VA_OP_REPLACE) {
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
goto error;
}
@@ -842,7 +842,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
}
for (base = robj->vm_bo; base; base = base->next)
if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
- amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
+ amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
r = -EINVAL;
amdgpu_bo_unreserve(robj);
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 0174f7817ce2..d0b8d415b63b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -562,6 +562,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case CHIP_NAVI14:
case CHIP_NAVI12:
case CHIP_VANGOGH:
+ case CHIP_YELLOW_CARP:
/* Don't enable it by default yet.
*/
if (amdgpu_tmz < 1) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 7dad44e73cf6..0d01cfaca77e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -278,6 +278,21 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
return true;
}
+static void amdgpu_restore_msix(struct amdgpu_device *adev)
+{
+ u16 ctrl;
+
+ pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
+ return;
+
+ /* VF FLR */
+ ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
+ ctrl |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
+}
+
/**
* amdgpu_irq_init - initialize interrupt handling
*
@@ -569,6 +584,9 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
{
int i, j, k;
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_restore_msix(adev);
+
for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 425596c2f6f8..96ef3f1051d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -124,6 +124,22 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
mutex_unlock(&mgpu_info.mutex);
}
+static void amdgpu_get_audio_func(struct amdgpu_device *adev)
+{
+ struct pci_dev *p = NULL;
+
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+ adev->pdev->bus->number, 1);
+ if (p) {
+ pm_runtime_get_sync(&p->dev);
+
+ pm_runtime_mark_last_busy(&p->dev);
+ pm_runtime_put_autosuspend(&p->dev);
+
+ pci_dev_put(p);
+ }
+}
+
/**
* amdgpu_driver_load_kms - Main load function for KMS.
*
@@ -213,9 +229,35 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
DPM_FLAG_MAY_SKIP_RESUME);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
+
pm_runtime_allow(dev->dev);
+
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
+
+ /*
+ * For runpm implemented via BACO, PMFW will handle the
+ * timing for BACO in and out:
+ * - put ASIC into BACO state only when both video and
+ * audio functions are in D3 state.
+ * - pull ASIC out of BACO state when either video or
+ * audio function is in D0 state.
+ * Also, at startup, PMFW assumes both functions are in
+ * D0 state.
+ *
+ * So if snd driver was loaded prior to amdgpu driver
+ * and audio function was put into D3 state, there will
+ * be no PMFW-aware D-state transition(D0->D3) on runpm
+ * suspend. Thus the BACO will be not correctly kicked in.
+ *
+ * Via amdgpu_get_audio_func(), the audio dev is put
+ * into D0 state. Then there will be a PMFW-aware D-state
+ * transition(D0->D3) on runpm suspend.
+ */
+ if (amdgpu_device_supports_baco(dev) &&
+ !(adev->flags & AMD_IS_APU) &&
+ (adev->asic_type >= CHIP_NAVI10))
+ amdgpu_get_audio_func(adev);
}
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
@@ -1220,7 +1262,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
}
pasid = fpriv->vm.pasid;
- pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
+ pd = amdgpu_bo_ref(fpriv->vm.root.bo);
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
amdgpu_vm_fini(adev, &fpriv->vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index d6c54c7f7679..4b153daf283d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -160,7 +160,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
struct mm_struct *mm, struct page **pages,
uint64_t start, uint64_t npages,
struct hmm_range **phmm_range, bool readonly,
- bool mmap_locked)
+ bool mmap_locked, void *owner)
{
struct hmm_range *hmm_range;
unsigned long timeout;
@@ -185,6 +185,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
hmm_range->hmm_pfns = pfns;
hmm_range->start = start;
hmm_range->end = start + npages * PAGE_SIZE;
+ hmm_range->dev_private_owner = owner;
/* Assuming 512MB takes maxmium 1 second to fault page address */
timeout = max(npages >> 17, 1ULL) * HMM_RANGE_DEFAULT_TIMEOUT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
index 7f7d37a457c3..14a3c1864085 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
@@ -34,7 +34,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
struct mm_struct *mm, struct page **pages,
uint64_t start, uint64_t npages,
struct hmm_range **phmm_range, bool readonly,
- bool mmap_locked);
+ bool mmap_locked, void *owner);
int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
#if defined(CONFIG_HMM_MIRROR)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 25ee53545837..45295dce5c3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -93,6 +93,8 @@ struct amdgpu_nbio_funcs {
void (*enable_aspm)(struct amdgpu_device *adev,
bool enable);
void (*program_aspm)(struct amdgpu_device *adev);
+ void (*apply_lc_spc_mode_wa)(struct amdgpu_device *adev);
+ void (*apply_l1_link_width_reconfig_wa)(struct amdgpu_device *adev);
};
struct amdgpu_nbio {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 22fb4ab43eef..261283f5b424 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -54,29 +54,42 @@
static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
- struct amdgpu_bo_user *ubo;
amdgpu_bo_kunmap(bo);
if (bo->tbo.base.import_attach)
drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
drm_gem_object_release(&bo->tbo.base);
+ amdgpu_bo_unref(&bo->parent);
+ kvfree(bo);
+}
+
+static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
+{
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+ struct amdgpu_bo_user *ubo;
+
+ ubo = to_amdgpu_bo_user(bo);
+ kfree(ubo->metadata);
+ amdgpu_bo_destroy(tbo);
+}
+
+static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+ struct amdgpu_bo_vm *vmbo;
+
+ vmbo = to_amdgpu_bo_vm(bo);
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
- if (!list_empty(&bo->shadow_list)) {
+ if (!list_empty(&vmbo->shadow_list)) {
mutex_lock(&adev->shadow_list_lock);
- list_del_init(&bo->shadow_list);
+ list_del_init(&vmbo->shadow_list);
mutex_unlock(&adev->shadow_list_lock);
}
- amdgpu_bo_unref(&bo->parent);
-
- if (bo->tbo.type != ttm_bo_type_kernel) {
- ubo = to_amdgpu_bo_user(bo);
- kfree(ubo->metadata);
- }
- kvfree(bo);
+ amdgpu_bo_destroy(tbo);
}
/**
@@ -91,8 +104,11 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
*/
bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
{
- if (bo->destroy == &amdgpu_bo_destroy)
+ if (bo->destroy == &amdgpu_bo_destroy ||
+ bo->destroy == &amdgpu_bo_user_destroy ||
+ bo->destroy == &amdgpu_bo_vm_destroy)
return true;
+
return false;
}
@@ -545,7 +561,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (bo == NULL)
return -ENOMEM;
drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
- INIT_LIST_HEAD(&bo->shadow_list);
bo->vm_bo = NULL;
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
bp->domain;
@@ -568,9 +583,12 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (bp->type == ttm_bo_type_kernel)
bo->tbo.priority = 1;
+ if (!bp->destroy)
+ bp->destroy = &amdgpu_bo_destroy;
+
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
&bo->placement, page_align, &ctx, NULL,
- bp->resv, &amdgpu_bo_destroy);
+ bp->resv, bp->destroy);
if (unlikely(r != 0))
return r;
@@ -634,6 +652,7 @@ int amdgpu_bo_create_user(struct amdgpu_device *adev,
int r;
bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
+ bp->destroy = &amdgpu_bo_user_destroy;
r = amdgpu_bo_create(adev, bp, &bo_ptr);
if (r)
return r;
@@ -665,11 +684,13 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
* num of amdgpu_vm_pt entries.
*/
BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
+ bp->destroy = &amdgpu_bo_vm_destroy;
r = amdgpu_bo_create(adev, bp, &bo_ptr);
if (r)
return r;
*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
+ INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
return r;
}
@@ -714,12 +735,12 @@ retry:
*
* Insert a BO to the shadow list.
*/
-void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo *bo)
+void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev);
mutex_lock(&adev->shadow_list_lock);
- list_add_tail(&bo->shadow_list, &adev->shadow_list);
+ list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
mutex_unlock(&adev->shadow_list_lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index c03dfd298f45..e72f329e7f18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -55,7 +55,8 @@ struct amdgpu_bo_param {
u64 flags;
enum ttm_bo_type type;
bool no_wait_gpu;
- struct dma_resv *resv;
+ struct dma_resv *resv;
+ void (*destroy)(struct ttm_buffer_object *bo);
};
/* bo virtual addresses in a vm */
@@ -107,9 +108,6 @@ struct amdgpu_bo {
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_interval_notifier notifier;
#endif
-
- struct list_head shadow_list;
-
struct kgd_mem *kfd_bo;
};
@@ -125,7 +123,8 @@ struct amdgpu_bo_user {
struct amdgpu_bo_vm {
struct amdgpu_bo bo;
struct amdgpu_bo *shadow;
- struct amdgpu_vm_pt entries[];
+ struct list_head shadow_list;
+ struct amdgpu_vm_bo_base entries[];
};
static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
@@ -331,7 +330,7 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
uint64_t *gtt_mem, uint64_t *cpu_mem);
-void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo *bo);
+void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 6046123d0562..3ec5099ffeb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -171,11 +171,81 @@ Err_out:
return ret;
}
+/*
+ * Helper funciton to query psp runtime database entry
+ *
+ * @adev: amdgpu_device pointer
+ * @entry_type: the type of psp runtime database entry
+ * @db_entry: runtime database entry pointer
+ *
+ * Return false if runtime database doesn't exit or entry is invalid
+ * or true if the specific database entry is found, and copy to @db_entry
+ */
+static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
+ enum psp_runtime_entry_type entry_type,
+ void *db_entry)
+{
+ uint64_t db_header_pos, db_dir_pos;
+ struct psp_runtime_data_header db_header = {0};
+ struct psp_runtime_data_directory db_dir = {0};
+ bool ret = false;
+ int i;
+
+ db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
+ db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
+
+ /* read runtime db header from vram */
+ amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
+ sizeof(struct psp_runtime_data_header), false);
+
+ if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
+ /* runtime db doesn't exist, exit */
+ dev_warn(adev->dev, "PSP runtime database doesn't exist\n");
+ return false;
+ }
+
+ /* read runtime database entry from vram */
+ amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
+ sizeof(struct psp_runtime_data_directory), false);
+
+ if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
+ /* invalid db entry count, exit */
+ dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
+ return false;
+ }
+
+ /* look up for requested entry type */
+ for (i = 0; i < db_dir.entry_count && !ret; i++) {
+ if (db_dir.entry_list[i].entry_type == entry_type) {
+ switch (entry_type) {
+ case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
+ if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
+ /* invalid db entry size */
+ dev_warn(adev->dev, "Invalid PSP runtime database entry size\n");
+ return false;
+ }
+ /* read runtime database entry */
+ amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
+ (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
+ ret = true;
+ break;
+ default:
+ ret = false;
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
static int psp_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
int ret;
+ struct psp_runtime_boot_cfg_entry boot_cfg_entry;
+ struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
if (!amdgpu_sriov_vf(adev)) {
ret = psp_init_microcode(psp);
@@ -191,15 +261,39 @@ static int psp_sw_init(void *handle)
}
}
- ret = psp_memory_training_init(psp);
- if (ret) {
- DRM_ERROR("Failed to initialize memory training!\n");
- return ret;
+ memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
+ if (psp_get_runtime_db_entry(adev,
+ PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
+ &boot_cfg_entry)) {
+ psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
+ if ((psp->boot_cfg_bitmask) &
+ BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
+ /* If psp runtime database exists, then
+ * only enable two stage memory training
+ * when TWO_STAGE_DRAM_TRAINING bit is set
+ * in runtime database */
+ mem_training_ctx->enable_mem_training = true;
+ }
+
+ } else {
+ /* If psp runtime database doesn't exist or
+ * is invalid, force enable two stage memory
+ * training */
+ mem_training_ctx->enable_mem_training = true;
}
- ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
- if (ret) {
- DRM_ERROR("Failed to process memory training!\n");
- return ret;
+
+ if (mem_training_ctx->enable_mem_training) {
+ ret = psp_memory_training_init(psp);
+ if (ret) {
+ DRM_ERROR("Failed to initialize memory training!\n");
+ return ret;
+ }
+
+ ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
+ if (ret) {
+ DRM_ERROR("Failed to process memory training!\n");
+ return ret;
+ }
}
if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) {
@@ -551,7 +645,30 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
return ret;
}
-static int psp_boot_config_set(struct amdgpu_device *adev)
+static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
+{
+ struct psp_context *psp = &adev->psp;
+ struct psp_gfx_cmd_resp *cmd = psp->cmd;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
+
+ cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
+ cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+ if (!ret) {
+ *boot_cfg =
+ (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
+ }
+
+ return ret;
+}
+
+static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
{
struct psp_context *psp = &adev->psp;
struct psp_gfx_cmd_resp *cmd = psp->cmd;
@@ -563,8 +680,8 @@ static int psp_boot_config_set(struct amdgpu_device *adev)
cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
- cmd->cmd.boot_cfg.boot_config = BOOT_CONFIG_GECC;
- cmd->cmd.boot_cfg.boot_config_valid = BOOT_CONFIG_GECC;
+ cmd->cmd.boot_cfg.boot_config = boot_cfg;
+ cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
return psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
}
@@ -1212,19 +1329,62 @@ static int psp_ras_terminate(struct psp_context *psp)
static int psp_ras_initialize(struct psp_context *psp)
{
int ret;
+ uint32_t boot_cfg = 0xFF;
+ struct amdgpu_device *adev = psp->adev;
/*
* TODO: bypass the initialize in sriov for now
*/
- if (amdgpu_sriov_vf(psp->adev))
+ if (amdgpu_sriov_vf(adev))
return 0;
- if (!psp->adev->psp.ta_ras_ucode_size ||
- !psp->adev->psp.ta_ras_start_addr) {
- dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n");
+ if (!adev->psp.ta_ras_ucode_size ||
+ !adev->psp.ta_ras_start_addr) {
+ dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
return 0;
}
+ if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
+ /* query GECC enablement status from boot config
+ * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
+ */
+ ret = psp_boot_config_get(adev, &boot_cfg);
+ if (ret)
+ dev_warn(adev->dev, "PSP get boot config failed\n");
+
+ if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
+ if (!boot_cfg) {
+ dev_info(adev->dev, "GECC is disabled\n");
+ } else {
+ /* disable GECC in next boot cycle if ras is
+ * disabled by module parameter amdgpu_ras_enable
+ * and/or amdgpu_ras_mask, or boot_config_get call
+ * is failed
+ */
+ ret = psp_boot_config_set(adev, 0);
+ if (ret)
+ dev_warn(adev->dev, "PSP set boot config failed\n");
+ else
+ dev_warn(adev->dev, "GECC will be disabled in next boot cycle "
+ "if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
+ }
+ } else {
+ if (1 == boot_cfg) {
+ dev_info(adev->dev, "GECC is enabled\n");
+ } else {
+ /* enable GECC in next boot cycle if it is disabled
+ * in boot config, or force enable GECC if failed to
+ * get boot configuration
+ */
+ ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
+ if (ret)
+ dev_warn(adev->dev, "PSP set boot config failed\n");
+ else
+ dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
+ }
+ }
+ }
+
if (!psp->ras.ras_initialized) {
ret = psp_ras_init_shared_buf(psp);
if (ret)
@@ -1945,12 +2105,6 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
- if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
- ret = psp_boot_config_set(adev);
- if (ret)
- dev_warn(adev->dev, "PSP set boot config failed\n");
- }
-
ret = psp_tmr_init(psp);
if (ret) {
DRM_ERROR("PSP tmr init failed!\n");
@@ -2188,10 +2342,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
if ((amdgpu_in_reset(adev) &&
ras && adev->ras_enabled &&
(adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_VEGA20)) ||
- (adev->in_runpm &&
- adev->asic_type >= CHIP_NAVI10 &&
- adev->asic_type <= CHIP_NAVI12)) {
+ adev->asic_type == CHIP_VEGA20))) {
ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
if (ret) {
DRM_WARN("Failed to set MP1 state prepare for reload\n");
@@ -2562,10 +2713,12 @@ static int psp_resume(void *handle)
DRM_INFO("PSP is resuming...\n");
- ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
- if (ret) {
- DRM_ERROR("Failed to process memory training!\n");
- return ret;
+ if (psp->mem_train_ctx.enable_mem_training) {
+ ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
+ if (ret) {
+ DRM_ERROR("Failed to process memory training!\n");
+ return ret;
+ }
}
mutex_lock(&adev->firmware.mutex);
@@ -2749,7 +2902,7 @@ int psp_init_asd_microcode(struct psp_context *psp,
asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
- adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
+ adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
@@ -2785,7 +2938,7 @@ int psp_init_toc_microcode(struct psp_context *psp,
toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
adev->psp.toc_fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
- adev->psp.toc_feature_version = le32_to_cpu(toc_hdr->ucode_feature_version);
+ adev->psp.toc_feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
adev->psp.toc_bin_size = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
adev->psp.toc_start_addr = (uint8_t *)toc_hdr +
le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
@@ -2797,6 +2950,50 @@ out:
return err;
}
+static int psp_init_sos_base_fw(struct amdgpu_device *adev)
+{
+ const struct psp_firmware_header_v1_0 *sos_hdr;
+ const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
+ uint8_t *ucode_array_start_addr;
+
+ sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
+ ucode_array_start_addr = (uint8_t *)sos_hdr +
+ le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
+
+ if (adev->gmc.xgmi.connected_to_cpu || (adev->asic_type != CHIP_ALDEBARAN)) {
+ adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
+ adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
+
+ adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos.offset_bytes);
+ adev->psp.sys_start_addr = ucode_array_start_addr;
+
+ adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos.size_bytes);
+ adev->psp.sos_start_addr = ucode_array_start_addr +
+ le32_to_cpu(sos_hdr->sos.offset_bytes);
+ } else {
+ /* Load alternate PSP SOS FW */
+ sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
+
+ adev->psp.sos_fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
+ adev->psp.sos_feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
+
+ adev->psp.sys_bin_size = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
+ adev->psp.sys_start_addr = ucode_array_start_addr +
+ le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
+
+ adev->psp.sos_bin_size = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
+ adev->psp.sos_start_addr = ucode_array_start_addr +
+ le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
+ }
+
+ if ((adev->psp.sys_bin_size == 0) || (adev->psp.sos_bin_size == 0)) {
+ dev_warn(adev->dev, "PSP SOS FW not available");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int psp_init_sos_microcode(struct psp_context *psp,
const char *chip_name)
{
@@ -2807,6 +3004,7 @@ int psp_init_sos_microcode(struct psp_context *psp,
const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
int err = 0;
+ uint8_t *ucode_array_start_addr;
if (!chip_name) {
dev_err(adev->dev, "invalid chip name for sos microcode\n");
@@ -2823,47 +3021,45 @@ int psp_init_sos_microcode(struct psp_context *psp,
goto out;
sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
+ ucode_array_start_addr = (uint8_t *)sos_hdr +
+ le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
switch (sos_hdr->header.header_version_major) {
case 1:
- adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
- adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
- adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
- adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
- adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
- le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
- adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr->sos_offset_bytes);
+ err = psp_init_sos_base_fw(adev);
+ if (err)
+ goto out;
+
if (sos_hdr->header.header_version_minor == 1) {
sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
- adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
+ adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
- adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
+ le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
+ adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
+ le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
}
if (sos_hdr->header.header_version_minor == 2) {
sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
- adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
+ adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
+ le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
}
if (sos_hdr->header.header_version_minor == 3) {
sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
- adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc_size_bytes);
- adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_3->v1_1.toc_offset_bytes);
- adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_size_bytes);
- adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_offset_bytes);
- adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl_size_bytes);
- adev->psp.spl_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_3->spl_offset_bytes);
- adev->psp.rl_bin_size = le32_to_cpu(sos_hdr_v1_3->rl_size_bytes);
- adev->psp.rl_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_3->rl_offset_bytes);
+ adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
+ adev->psp.toc_start_addr = ucode_array_start_addr +
+ le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
+ adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
+ adev->psp.kdb_start_addr = ucode_array_start_addr +
+ le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
+ adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
+ adev->psp.spl_start_addr = ucode_array_start_addr +
+ le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
+ adev->psp.rl_bin_size = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
+ adev->psp.rl_start_addr = ucode_array_start_addr +
+ le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
}
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index e5023f1de7fd..3030ec24eb3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -225,6 +225,61 @@ struct psp_memory_training_context {
enum psp_memory_training_init_flag init;
u32 training_cnt;
+ bool enable_mem_training;
+};
+
+/** PSP runtime DB **/
+#define PSP_RUNTIME_DB_SIZE_IN_BYTES 0x10000
+#define PSP_RUNTIME_DB_OFFSET 0x100000
+#define PSP_RUNTIME_DB_COOKIE_ID 0x0ed5
+#define PSP_RUNTIME_DB_VER_1 0x0100
+#define PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT 0x40
+
+enum psp_runtime_entry_type {
+ PSP_RUNTIME_ENTRY_TYPE_INVALID = 0x0,
+ PSP_RUNTIME_ENTRY_TYPE_TEST = 0x1,
+ PSP_RUNTIME_ENTRY_TYPE_MGPU_COMMON = 0x2, /* Common mGPU runtime data */
+ PSP_RUNTIME_ENTRY_TYPE_MGPU_WAFL = 0x3, /* WAFL runtime data */
+ PSP_RUNTIME_ENTRY_TYPE_MGPU_XGMI = 0x4, /* XGMI runtime data */
+ PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG = 0x5, /* Boot Config runtime data */
+};
+
+/* PSP runtime DB header */
+struct psp_runtime_data_header {
+ /* determine the existence of runtime db */
+ uint16_t cookie;
+ /* version of runtime db */
+ uint16_t version;
+};
+
+/* PSP runtime DB entry */
+struct psp_runtime_entry {
+ /* type of runtime db entry */
+ uint32_t entry_type;
+ /* offset of entry in bytes */
+ uint16_t offset;
+ /* size of entry in bytes */
+ uint16_t size;
+};
+
+/* PSP runtime DB directory */
+struct psp_runtime_data_directory {
+ /* number of valid entries */
+ uint16_t entry_count;
+ /* db entries*/
+ struct psp_runtime_entry entry_list[PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT];
+};
+
+/* PSP runtime DB boot config feature bitmask */
+enum psp_runtime_boot_cfg_feature {
+ BOOT_CFG_FEATURE_GECC = 0x1,
+ BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING = 0x2,
+};
+
+/* PSP runtime DB boot config entry */
+struct psp_runtime_boot_cfg_entry {
+ uint32_t boot_cfg_bitmask;
+ uint32_t reserved;
};
struct psp_context
@@ -325,6 +380,8 @@ struct psp_context
struct psp_securedisplay_context securedisplay_context;
struct mutex mutex;
struct psp_memory_training_context mem_train_ctx;
+
+ uint32_t boot_cfg_bitmask;
};
struct amdgpu_psp_funcs {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 9dfc1ebb41a9..fc66aca28594 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -809,7 +809,7 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
/* query/inject/cure begin */
int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
- struct ras_query_if *info)
+ struct ras_query_if *info)
{
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
struct ras_err_data err_data = {0, 0, 0, NULL};
@@ -1043,17 +1043,32 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
return ret;
}
-/* get the total error counts on all IPs */
-void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
- unsigned long *ce_count,
- unsigned long *ue_count)
+/**
+ * amdgpu_ras_query_error_count -- Get error counts of all IPs
+ * adev: pointer to AMD GPU device
+ * ce_count: pointer to an integer to be set to the count of correctible errors.
+ * ue_count: pointer to an integer to be set to the count of uncorrectible
+ * errors.
+ *
+ * If set, @ce_count or @ue_count, count and return the corresponding
+ * error counts in those integer pointers. Return 0 if the device
+ * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
+ */
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ unsigned long *ce_count,
+ unsigned long *ue_count)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
unsigned long ce, ue;
if (!adev->ras_enabled || !con)
- return;
+ return -EOPNOTSUPP;
+
+ /* Don't count since no reporting.
+ */
+ if (!ce_count && !ue_count)
+ return 0;
ce = 0;
ue = 0;
@@ -1061,9 +1076,11 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
struct ras_query_if info = {
.head = obj->head,
};
+ int res;
- if (amdgpu_ras_query_error_status(adev, &info))
- return;
+ res = amdgpu_ras_query_error_status(adev, &info);
+ if (res)
+ return res;
ce += info.ce_count;
ue += info.ue_count;
@@ -1074,6 +1091,8 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
if (ue_count)
*ue_count = ue;
+
+ return 0;
}
/* query/inject/cure end */
@@ -1984,6 +2003,9 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
ret = amdgpu_ras_load_bad_pages(adev);
if (ret)
goto free;
+
+ if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
+ adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.num_recs);
}
return 0;
@@ -2063,7 +2085,9 @@ static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
return;
if (strnstr(ctx->vbios_version, "D16406",
- sizeof(ctx->vbios_version)))
+ sizeof(ctx->vbios_version)) ||
+ strnstr(ctx->vbios_version, "D36002",
+ sizeof(ctx->vbios_version)))
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
}
@@ -2122,7 +2146,7 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
ras_counte_delay_work.work);
struct amdgpu_device *adev = con->adev;
- struct drm_device *dev = &adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
unsigned long ce_count, ue_count;
int res;
@@ -2132,9 +2156,10 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
/* Cache new values.
*/
- amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
- atomic_set(&con->ras_ce_count, ce_count);
- atomic_set(&con->ras_ue_count, ue_count);
+ if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
+ atomic_set(&con->ras_ce_count, ce_count);
+ atomic_set(&con->ras_ue_count, ue_count);
+ }
pm_runtime_mark_last_busy(dev->dev);
Out:
@@ -2307,9 +2332,10 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
/* Those are the cached values at init.
*/
- amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
- atomic_set(&con->ras_ce_count, ce_count);
- atomic_set(&con->ras_ue_count, ue_count);
+ if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
+ atomic_set(&con->ras_ce_count, ce_count);
+ atomic_set(&con->ras_ue_count, ue_count);
+ }
return 0;
cleanup:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 256cea5d34f2..b504ed8c9b50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -490,9 +490,9 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
void amdgpu_ras_resume(struct amdgpu_device *adev);
void amdgpu_ras_suspend(struct amdgpu_device *adev);
-void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
- unsigned long *ce_count,
- unsigned long *ue_count);
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ unsigned long *ce_count,
+ unsigned long *ue_count);
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 0527772fe1b8..d855cb53c7e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -176,10 +176,10 @@ TRACE_EVENT(amdgpu_cs_ioctl,
TP_fast_assign(
__entry->sched_job_id = job->base.id;
- __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
+ __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job));
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
+ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name);
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -201,10 +201,10 @@ TRACE_EVENT(amdgpu_sched_run_job,
TP_fast_assign(
__entry->sched_job_id = job->base.id;
- __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
+ __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job));
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
+ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name);
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -229,7 +229,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
TP_fast_assign(
__entry->pasid = vm->pasid;
- __assign_str(ring, ring->name)
+ __assign_str(ring, ring->name);
__entry->vmid = job->vmid;
__entry->vm_hub = ring->funcs->vmhub,
__entry->pd_addr = job->vm_pd_addr;
@@ -424,7 +424,7 @@ TRACE_EVENT(amdgpu_vm_flush,
),
TP_fast_assign(
- __assign_str(ring, ring->name)
+ __assign_str(ring, ring->name);
__entry->vmid = vmid;
__entry->vm_hub = ring->funcs->vmhub;
__entry->pd_addr = pd_addr;
@@ -525,7 +525,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
),
TP_fast_assign(
- __assign_str(ring, sched_job->base.sched->name)
+ __assign_str(ring, sched_job->base.sched->name);
__entry->id = sched_job->base.id;
__entry->fence = fence;
__entry->ctx = fence->context;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b46726e47bce..acd95d3a4434 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -592,10 +592,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
mem->bus.offset += adev->gmc.aper_base;
mem->bus.is_iomem = true;
- if (adev->gmc.xgmi.connected_to_cpu)
- mem->bus.caching = ttm_cached;
- else
- mem->bus.caching = ttm_write_combined;
break;
default:
return -EINVAL;
@@ -683,23 +679,23 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
return -ESRCH;
mmap_read_lock(mm);
- vma = find_vma(mm, start);
- mmap_read_unlock(mm);
- if (unlikely(!vma || start < vma->vm_start)) {
+ vma = vma_lookup(mm, start);
+ if (unlikely(!vma)) {
r = -EFAULT;
- goto out_putmm;
+ goto out_unlock;
}
if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
vma->vm_file)) {
r = -EPERM;
- goto out_putmm;
+ goto out_unlock;
}
readonly = amdgpu_ttm_tt_is_readonly(ttm);
r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
ttm->num_pages, &gtt->range, readonly,
- false);
-out_putmm:
+ true, NULL);
+out_unlock:
+ mmap_read_unlock(mm);
mmput(mm);
return r;
@@ -843,7 +839,7 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
uint64_t page_idx = 1;
r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
- ttm->pages, gtt->ttm.dma_address, flags);
+ gtt->ttm.dma_address, flags);
if (r)
goto gart_bind_fail;
@@ -857,11 +853,10 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
r = amdgpu_gart_bind(adev,
gtt->offset + (page_idx << PAGE_SHIFT),
ttm->num_pages - page_idx,
- &ttm->pages[page_idx],
&(gtt->ttm.dma_address[page_idx]), flags);
} else {
r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
- ttm->pages, gtt->ttm.dma_address, flags);
+ gtt->ttm.dma_address, flags);
}
gart_bind_fail:
@@ -926,7 +921,8 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
bo_mem->mem_type == AMDGPU_PL_OA)
return -EINVAL;
- if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
+ if (bo_mem->mem_type != TTM_PL_TT ||
+ !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
gtt->offset = AMDGPU_BO_INVALID_OFFSET;
return 0;
}
@@ -937,7 +933,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
/* bind pages into GART page tables */
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
- ttm->pages, gtt->ttm.dma_address, flags);
+ gtt->ttm.dma_address, flags);
if (r)
DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
@@ -1131,8 +1127,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!ttm->sg)
return -ENOMEM;
-
- ttm->page_flags |= TTM_PAGE_FLAG_SG;
return 0;
}
@@ -1158,7 +1152,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);
ttm->sg = NULL;
- ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
return;
}
@@ -1192,6 +1185,9 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
return -ENOMEM;
}
+ /* Set TTM_PAGE_FLAG_SG before populate but after create. */
+ bo->ttm->page_flags |= TTM_PAGE_FLAG_SG;
+
gtt = (void *)bo->ttm;
gtt->userptr = addr;
gtt->userflags = flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 5eb84de588eb..2834981f8c08 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -257,36 +257,36 @@ void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr)
container_of(hdr, struct psp_firmware_header_v1_0, header);
DRM_DEBUG("ucode_feature_version: %u\n",
- le32_to_cpu(psp_hdr->ucode_feature_version));
+ le32_to_cpu(psp_hdr->sos.fw_version));
DRM_DEBUG("sos_offset_bytes: %u\n",
- le32_to_cpu(psp_hdr->sos_offset_bytes));
+ le32_to_cpu(psp_hdr->sos.offset_bytes));
DRM_DEBUG("sos_size_bytes: %u\n",
- le32_to_cpu(psp_hdr->sos_size_bytes));
+ le32_to_cpu(psp_hdr->sos.size_bytes));
if (version_minor == 1) {
const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 =
container_of(psp_hdr, struct psp_firmware_header_v1_1, v1_0);
DRM_DEBUG("toc_header_version: %u\n",
- le32_to_cpu(psp_hdr_v1_1->toc_header_version));
+ le32_to_cpu(psp_hdr_v1_1->toc.fw_version));
DRM_DEBUG("toc_offset_bytes: %u\n",
- le32_to_cpu(psp_hdr_v1_1->toc_offset_bytes));
+ le32_to_cpu(psp_hdr_v1_1->toc.offset_bytes));
DRM_DEBUG("toc_size_bytes: %u\n",
- le32_to_cpu(psp_hdr_v1_1->toc_size_bytes));
+ le32_to_cpu(psp_hdr_v1_1->toc.size_bytes));
DRM_DEBUG("kdb_header_version: %u\n",
- le32_to_cpu(psp_hdr_v1_1->kdb_header_version));
+ le32_to_cpu(psp_hdr_v1_1->kdb.fw_version));
DRM_DEBUG("kdb_offset_bytes: %u\n",
- le32_to_cpu(psp_hdr_v1_1->kdb_offset_bytes));
+ le32_to_cpu(psp_hdr_v1_1->kdb.offset_bytes));
DRM_DEBUG("kdb_size_bytes: %u\n",
- le32_to_cpu(psp_hdr_v1_1->kdb_size_bytes));
+ le32_to_cpu(psp_hdr_v1_1->kdb.size_bytes));
}
if (version_minor == 2) {
const struct psp_firmware_header_v1_2 *psp_hdr_v1_2 =
container_of(psp_hdr, struct psp_firmware_header_v1_2, v1_0);
DRM_DEBUG("kdb_header_version: %u\n",
- le32_to_cpu(psp_hdr_v1_2->kdb_header_version));
+ le32_to_cpu(psp_hdr_v1_2->kdb.fw_version));
DRM_DEBUG("kdb_offset_bytes: %u\n",
- le32_to_cpu(psp_hdr_v1_2->kdb_offset_bytes));
+ le32_to_cpu(psp_hdr_v1_2->kdb.offset_bytes));
DRM_DEBUG("kdb_size_bytes: %u\n",
- le32_to_cpu(psp_hdr_v1_2->kdb_size_bytes));
+ le32_to_cpu(psp_hdr_v1_2->kdb.size_bytes));
}
if (version_minor == 3) {
const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 =
@@ -294,23 +294,23 @@ void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr)
const struct psp_firmware_header_v1_3 *psp_hdr_v1_3 =
container_of(psp_hdr_v1_1, struct psp_firmware_header_v1_3, v1_1);
DRM_DEBUG("toc_header_version: %u\n",
- le32_to_cpu(psp_hdr_v1_3->v1_1.toc_header_version));
+ le32_to_cpu(psp_hdr_v1_3->v1_1.toc.fw_version));
DRM_DEBUG("toc_offset_bytes: %u\n",
- le32_to_cpu(psp_hdr_v1_3->v1_1.toc_offset_bytes));
+ le32_to_cpu(psp_hdr_v1_3->v1_1.toc.offset_bytes));
DRM_DEBUG("toc_size_bytes: %u\n",
- le32_to_cpu(psp_hdr_v1_3->v1_1.toc_size_bytes));
+ le32_to_cpu(psp_hdr_v1_3->v1_1.toc.size_bytes));
DRM_DEBUG("kdb_header_version: %u\n",
- le32_to_cpu(psp_hdr_v1_3->v1_1.kdb_header_version));
+ le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.fw_version));
DRM_DEBUG("kdb_offset_bytes: %u\n",
- le32_to_cpu(psp_hdr_v1_3->v1_1.kdb_offset_bytes));
+ le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.offset_bytes));
DRM_DEBUG("kdb_size_bytes: %u\n",
- le32_to_cpu(psp_hdr_v1_3->v1_1.kdb_size_bytes));
+ le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.size_bytes));
DRM_DEBUG("spl_header_version: %u\n",
- le32_to_cpu(psp_hdr_v1_3->spl_header_version));
+ le32_to_cpu(psp_hdr_v1_3->spl.fw_version));
DRM_DEBUG("spl_offset_bytes: %u\n",
- le32_to_cpu(psp_hdr_v1_3->spl_offset_bytes));
+ le32_to_cpu(psp_hdr_v1_3->spl.offset_bytes));
DRM_DEBUG("spl_size_bytes: %u\n",
- le32_to_cpu(psp_hdr_v1_3->spl_size_bytes));
+ le32_to_cpu(psp_hdr_v1_3->spl.size_bytes));
}
} else {
DRM_ERROR("Unknown PSP ucode version: %u.%u\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 2c42874f7784..270309e7f5f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -71,43 +71,39 @@ struct smc_firmware_header_v2_1 {
uint32_t pptable_entry_offset;
};
+struct psp_fw_bin_desc {
+ uint32_t fw_version;
+ uint32_t offset_bytes;
+ uint32_t size_bytes;
+};
+
/* version_major=1, version_minor=0 */
struct psp_firmware_header_v1_0 {
struct common_firmware_header header;
- uint32_t ucode_feature_version;
- uint32_t sos_offset_bytes;
- uint32_t sos_size_bytes;
+ struct psp_fw_bin_desc sos;
};
/* version_major=1, version_minor=1 */
struct psp_firmware_header_v1_1 {
struct psp_firmware_header_v1_0 v1_0;
- uint32_t toc_header_version;
- uint32_t toc_offset_bytes;
- uint32_t toc_size_bytes;
- uint32_t kdb_header_version;
- uint32_t kdb_offset_bytes;
- uint32_t kdb_size_bytes;
+ struct psp_fw_bin_desc toc;
+ struct psp_fw_bin_desc kdb;
};
/* version_major=1, version_minor=2 */
struct psp_firmware_header_v1_2 {
struct psp_firmware_header_v1_0 v1_0;
- uint32_t reserve[3];
- uint32_t kdb_header_version;
- uint32_t kdb_offset_bytes;
- uint32_t kdb_size_bytes;
+ struct psp_fw_bin_desc res;
+ struct psp_fw_bin_desc kdb;
};
/* version_major=1, version_minor=3 */
struct psp_firmware_header_v1_3 {
struct psp_firmware_header_v1_1 v1_1;
- uint32_t spl_header_version;
- uint32_t spl_offset_bytes;
- uint32_t spl_size_bytes;
- uint32_t rl_header_version;
- uint32_t rl_offset_bytes;
- uint32_t rl_size_bytes;
+ struct psp_fw_bin_desc spl;
+ struct psp_fw_bin_desc rl;
+ struct psp_fw_bin_desc sys_drv_aux;
+ struct psp_fw_bin_desc sos_aux;
};
/* version_major=1, version_minor=0 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index ea6f99be070b..f4489773715e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -94,6 +94,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
if (adev->umc.ras_funcs &&
@@ -131,6 +132,9 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
err_data->err_addr_cnt);
amdgpu_ras_save_bad_pages(adev);
+
+ if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
+ adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.num_recs);
}
amdgpu_ras_reset_gpu(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index bbcccf53080d..e5a75fb788dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -22,6 +22,11 @@
#define __AMDGPU_UMC_H__
/*
+ * (addr / 256) * 4096, the higher 26 bits in ErrorAddr
+ * is the index of 4KB block
+ */
+#define ADDR_OF_4KB_BLOCK(addr) (((addr) & ~0xffULL) << 4)
+/*
* (addr / 256) * 8192, the higher 26 bits in ErrorAddr
* is the index of 8KB block
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 647d2c31e8bd..6780df0fb265 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -288,6 +288,29 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
return 0;
}
+bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
+{
+ bool ret = false;
+
+ int major;
+ int minor;
+ int revision;
+
+ /* if cannot find IP data, then this VCN does not exist */
+ if (amdgpu_discovery_get_vcn_version(adev, vcn_instance, &major, &minor, &revision) != 0)
+ return true;
+
+ if ((type == VCN_ENCODE_RING) && (revision & VCN_BLOCK_ENCODE_DISABLE_MASK)) {
+ ret = true;
+ } else if ((type == VCN_DECODE_RING) && (revision & VCN_BLOCK_DECODE_DISABLE_MASK)) {
+ ret = true;
+ } else if ((type == VCN_UNIFIED_RING) && (revision & VCN_BLOCK_QUEUE_DISABLE_MASK)) {
+ ret = true;
+ }
+
+ return ret;
+}
+
int amdgpu_vcn_suspend(struct amdgpu_device *adev)
{
unsigned size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index bc76cab67697..d74c62b49795 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -280,6 +280,16 @@ struct amdgpu_vcn_decode_buffer {
uint32_t pad[30];
};
+#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80
+#define VCN_BLOCK_DECODE_DISABLE_MASK 0x40
+#define VCN_BLOCK_QUEUE_DISABLE_MASK 0xC0
+
+enum vcn_ring_type {
+ VCN_ENCODE_RING,
+ VCN_DECODE_RING,
+ VCN_UNIFIED_RING,
+};
+
int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
int amdgpu_vcn_suspend(struct amdgpu_device *adev);
@@ -287,6 +297,9 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev);
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring);
+bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev,
+ enum vcn_ring_type type, uint32_t vcn_instance);
+
int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring);
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 18246b5b6ee3..078c068937fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -332,7 +332,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo;
bo->vm_bo = base;
- if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
+ if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
return;
vm->bulk_moveable = false;
@@ -361,14 +361,14 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
* Helper to get the parent entry for the child page table. NULL if we are at
* the root page directory.
*/
-static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
+static struct amdgpu_vm_bo_base *amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
{
- struct amdgpu_bo *parent = pt->base.bo->parent;
+ struct amdgpu_bo *parent = pt->bo->parent;
if (!parent)
return NULL;
- return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
+ return parent->vm_bo;
}
/*
@@ -376,8 +376,8 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
*/
struct amdgpu_vm_pt_cursor {
uint64_t pfn;
- struct amdgpu_vm_pt *parent;
- struct amdgpu_vm_pt *entry;
+ struct amdgpu_vm_bo_base *parent;
+ struct amdgpu_vm_bo_base *entry;
unsigned level;
};
@@ -416,17 +416,17 @@ static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
{
unsigned mask, shift, idx;
- if (!cursor->entry->entries)
+ if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
+ !cursor->entry->bo)
return false;
- BUG_ON(!cursor->entry->base.bo);
mask = amdgpu_vm_entries_mask(adev, cursor->level);
shift = amdgpu_vm_level_shift(adev, cursor->level);
++cursor->level;
idx = (cursor->pfn >> shift) & mask;
cursor->parent = cursor->entry;
- cursor->entry = &cursor->entry->entries[idx];
+ cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
return true;
}
@@ -453,7 +453,7 @@ static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
- if (cursor->entry == &cursor->parent->entries[num_entries - 1])
+ if (cursor->entry == &to_amdgpu_bo_vm(cursor->parent->bo)->entries[num_entries - 1])
return false;
cursor->pfn += 1ULL << shift;
@@ -539,7 +539,7 @@ static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
* True when the search should continue, false otherwise.
*/
static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
- struct amdgpu_vm_pt *entry)
+ struct amdgpu_vm_bo_base *entry)
{
return entry && (!start || entry != start->entry);
}
@@ -590,7 +590,7 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct amdgpu_bo_list_entry *entry)
{
entry->priority = 0;
- entry->tv.bo = &vm->root.base.bo->tbo;
+ entry->tv.bo = &vm->root.bo->tbo;
/* Two for VM updates, one for TTM and one for the CS job */
entry->tv.num_shared = 4;
entry->user_pages = NULL;
@@ -622,7 +622,7 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
- if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
+ if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv)
vm->bulk_moveable = false;
}
@@ -781,11 +781,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
entries -= ats_entries;
} else {
- struct amdgpu_vm_pt *pt;
+ struct amdgpu_vm_bo_base *pt;
- pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base);
+ pt = ancestor->vm_bo;
ats_entries = amdgpu_vm_num_ats_entries(adev);
- if ((pt - vm->root.entries) >= ats_entries) {
+ if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >= ats_entries) {
ats_entries = 0;
} else {
ats_entries = entries;
@@ -902,8 +902,8 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
bp.type = ttm_bo_type_kernel;
bp.no_wait_gpu = immediate;
- if (vm->root.base.bo)
- bp.resv = vm->root.base.bo->tbo.base.resv;
+ if (vm->root.bo)
+ bp.resv = vm->root.bo->tbo.base.resv;
r = amdgpu_bo_create_vm(adev, &bp, vmbo);
if (r)
@@ -938,7 +938,7 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
}
(*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
- amdgpu_bo_add_to_shadow_list((*vmbo)->shadow);
+ amdgpu_bo_add_to_shadow_list(*vmbo);
return 0;
}
@@ -962,19 +962,13 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm_pt_cursor *cursor,
bool immediate)
{
- struct amdgpu_vm_pt *entry = cursor->entry;
+ struct amdgpu_vm_bo_base *entry = cursor->entry;
struct amdgpu_bo *pt_bo;
struct amdgpu_bo_vm *pt;
int r;
- if (entry->base.bo) {
- if (cursor->level < AMDGPU_VM_PTB)
- entry->entries =
- to_amdgpu_bo_vm(entry->base.bo)->entries;
- else
- entry->entries = NULL;
+ if (entry->bo)
return 0;
- }
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
if (r)
@@ -984,13 +978,8 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
* freeing them up in the wrong order.
*/
pt_bo = &pt->bo;
- pt_bo->parent = amdgpu_bo_ref(cursor->parent->base.bo);
- amdgpu_vm_bo_base_init(&entry->base, vm, pt_bo);
- if (cursor->level < AMDGPU_VM_PTB)
- entry->entries = pt->entries;
- else
- entry->entries = NULL;
-
+ pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
+ amdgpu_vm_bo_base_init(entry, vm, pt_bo);
r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
if (r)
goto error_free_pt;
@@ -1008,18 +997,17 @@ error_free_pt:
*
* @entry: PDE to free
*/
-static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
+static void amdgpu_vm_free_table(struct amdgpu_vm_bo_base *entry)
{
struct amdgpu_bo *shadow;
- if (entry->base.bo) {
- shadow = amdgpu_bo_shadowed(entry->base.bo);
- entry->base.bo->vm_bo = NULL;
- list_del(&entry->base.vm_status);
- amdgpu_bo_unref(&shadow);
- amdgpu_bo_unref(&entry->base.bo);
- }
- entry->entries = NULL;
+ if (!entry->bo)
+ return;
+ shadow = amdgpu_bo_shadowed(entry->bo);
+ entry->bo->vm_bo = NULL;
+ list_del(&entry->vm_status);
+ amdgpu_bo_unref(&shadow);
+ amdgpu_bo_unref(&entry->bo);
}
/**
@@ -1036,7 +1024,7 @@ static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
struct amdgpu_vm_pt_cursor *start)
{
struct amdgpu_vm_pt_cursor cursor;
- struct amdgpu_vm_pt *entry;
+ struct amdgpu_vm_bo_base *entry;
vm->bulk_moveable = false;
@@ -1304,10 +1292,10 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
*/
static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *entry)
+ struct amdgpu_vm_bo_base *entry)
{
- struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry);
- struct amdgpu_bo *bo = parent->base.bo, *pbo;
+ struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
+ struct amdgpu_bo *bo = parent->bo, *pbo;
uint64_t pde, pt, flags;
unsigned level;
@@ -1315,8 +1303,8 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
pbo = pbo->parent;
level += params->adev->vm_manager.root_level;
- amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
- pde = (entry - parent->entries) * 8;
+ amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
+ pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
1, 0, flags);
}
@@ -1333,11 +1321,11 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
struct amdgpu_vm_pt_cursor cursor;
- struct amdgpu_vm_pt *entry;
+ struct amdgpu_vm_bo_base *entry;
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
- if (entry->base.bo && !entry->base.moved)
- amdgpu_vm_bo_relocated(&entry->base);
+ if (entry->bo && !entry->moved)
+ amdgpu_vm_bo_relocated(entry);
}
/**
@@ -1371,11 +1359,12 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
return r;
while (!list_empty(&vm->relocated)) {
- struct amdgpu_vm_pt *entry;
+ struct amdgpu_vm_bo_base *entry;
- entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
- base.vm_status);
- amdgpu_vm_bo_idle(&entry->base);
+ entry = list_first_entry(&vm->relocated,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ amdgpu_vm_bo_idle(entry);
r = amdgpu_vm_update_pde(&params, vm, entry);
if (r)
@@ -1555,7 +1544,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
continue;
}
- pt = cursor.entry->base.bo;
+ pt = cursor.entry->bo;
if (!pt) {
/* We need all PDs and PTs for mapping something, */
if (flags & AMDGPU_PTE_VALID)
@@ -1567,7 +1556,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
if (!amdgpu_vm_pt_ancestor(&cursor))
return -EINVAL;
- pt = cursor.entry->base.bo;
+ pt = cursor.entry->bo;
shift = parent_shift;
frag_end = max(frag_end, ALIGN(frag_start + 1,
1ULL << shift));
@@ -1622,7 +1611,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
*/
while (cursor.pfn < frag_start) {
/* Make sure previous mapping is freed */
- if (cursor.entry->base.bo) {
+ if (cursor.entry->bo) {
params->table_freed = true;
amdgpu_vm_free_pts(adev, params->vm, &cursor);
}
@@ -1704,7 +1693,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
struct dma_fence *tmp = dma_fence_get_stub();
- amdgpu_bo_fence(vm->root.base.bo, vm->last_unlocked, true);
+ amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
swap(vm->last_unlocked, tmp);
dma_fence_put(tmp);
}
@@ -1769,7 +1758,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
r = vm->update_funcs->commit(&params, fence);
if (table_freed)
- *table_freed = *table_freed || params.table_freed;
+ *table_freed = params.table_freed;
error_unlock:
amdgpu_vm_eviction_unlock(vm);
@@ -1827,7 +1816,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
* @adev: amdgpu_device pointer
* @bo_va: requested BO and VM object
* @clear: if true clear the entries
- * @table_freed: return true if page table is freed
*
* Fill in the page table entries for @bo_va.
*
@@ -1835,7 +1823,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
* 0 for success, -EINVAL for failure.
*/
int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
- bool clear, bool *table_freed)
+ bool clear)
{
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
@@ -1850,7 +1838,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
if (clear || !bo) {
mem = NULL;
- resv = vm->root.base.bo->tbo.base.resv;
+ resv = vm->root.bo->tbo.base.resv;
} else {
struct drm_gem_object *obj = &bo->tbo.base;
@@ -1881,7 +1869,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
}
if (clear || (bo && bo->tbo.base.resv ==
- vm->root.base.bo->tbo.base.resv))
+ vm->root.bo->tbo.base.resv))
last_update = &vm->last_update;
else
last_update = &bo_va->last_pt_update;
@@ -1914,7 +1902,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
resv, mapping->start,
mapping->last, update_flags,
mapping->offset, mem,
- pages_addr, last_update, table_freed);
+ pages_addr, last_update, NULL);
if (r)
return r;
}
@@ -1923,7 +1911,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
* the evicted list so that it gets validated again on the
* next command submission.
*/
- if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
+ if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
uint32_t mem_type = bo->tbo.resource->mem_type;
if (!(bo->preferred_domains &
@@ -2060,7 +2048,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
*/
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
+ struct dma_resv *resv = vm->root.bo->tbo.base.resv;
struct dma_fence *excl, **shared;
unsigned i, shared_count;
int r;
@@ -2106,7 +2094,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence)
{
- struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
+ struct dma_resv *resv = vm->root.bo->tbo.base.resv;
struct amdgpu_bo_va_mapping *mapping;
uint64_t init_pte_value = 0;
struct dma_fence *f = NULL;
@@ -2166,7 +2154,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
/* Per VM BOs never need to bo cleared in the page tables */
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
}
@@ -2185,7 +2173,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
else
clear = true;
- r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r)
return r;
@@ -2265,7 +2253,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
if (mapping->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
- if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv &&
+ if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
!bo_va->base.moved) {
list_move(&bo_va->base.vm_status, &vm->moved);
}
@@ -2627,7 +2615,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_vm_bo_base **base;
if (bo) {
- if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
+ if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
vm->bulk_moveable = false;
for (base = &bo_va->base.bo->vm_bo; *base;
@@ -2721,7 +2709,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
- if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
+ if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
amdgpu_vm_bo_evicted(bo_base);
continue;
}
@@ -2732,7 +2720,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
if (bo->tbo.type == ttm_bo_type_kernel)
amdgpu_vm_bo_relocated(bo_base);
- else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
+ else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
amdgpu_vm_bo_moved(bo_base);
else
amdgpu_vm_bo_invalidated(bo_base);
@@ -2862,7 +2850,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true,
+ timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true,
true, timeout);
if (timeout <= 0)
return timeout;
@@ -2948,13 +2936,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
if (r)
goto error_unreserve;
- amdgpu_vm_bo_base_init(&vm->root.base, vm, root_bo);
+ amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
r = amdgpu_vm_clear_bo(adev, vm, root, false);
if (r)
goto error_unreserve;
- amdgpu_bo_unreserve(vm->root.base.bo);
+ amdgpu_bo_unreserve(vm->root.bo);
if (pasid) {
unsigned long flags;
@@ -2974,12 +2962,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
return 0;
error_unreserve:
- amdgpu_bo_unreserve(vm->root.base.bo);
+ amdgpu_bo_unreserve(vm->root.bo);
error_free_root:
amdgpu_bo_unref(&root->shadow);
amdgpu_bo_unref(&root_bo);
- vm->root.base.bo = NULL;
+ vm->root.bo = NULL;
error_free_delayed:
dma_fence_put(vm->last_unlocked);
@@ -3005,17 +2993,14 @@ error_free_immediate:
* 0 if this VM is clean
*/
static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+ struct amdgpu_vm *vm)
{
enum amdgpu_vm_level root = adev->vm_manager.root_level;
unsigned int entries = amdgpu_vm_num_entries(adev, root);
unsigned int i = 0;
- if (!(vm->root.entries))
- return 0;
-
for (i = 0; i < entries; i++) {
- if (vm->root.entries[i].base.bo)
+ if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo)
return -EINVAL;
}
@@ -3049,7 +3034,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
int r;
- r = amdgpu_bo_reserve(vm->root.base.bo, true);
+ r = amdgpu_bo_reserve(vm->root.bo, true);
if (r)
return r;
@@ -3077,7 +3062,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (pte_support_ats != vm->pte_support_ats) {
vm->pte_support_ats = pte_support_ats;
r = amdgpu_vm_clear_bo(adev, vm,
- to_amdgpu_bo_vm(vm->root.base.bo),
+ to_amdgpu_bo_vm(vm->root.bo),
false);
if (r)
goto free_idr;
@@ -3094,7 +3079,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (vm->use_cpu_for_update) {
/* Sync with last SDMA update/clear before switching to CPU */
- r = amdgpu_bo_sync_wait(vm->root.base.bo,
+ r = amdgpu_bo_sync_wait(vm->root.bo,
AMDGPU_FENCE_OWNER_UNDEFINED, true);
if (r)
goto free_idr;
@@ -3122,7 +3107,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
/* Free the shadow bo for compute VM */
- amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.base.bo)->shadow);
+ amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
if (pasid)
vm->pasid = pasid;
@@ -3138,7 +3123,7 @@ free_idr:
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
unreserve_bo:
- amdgpu_bo_unreserve(vm->root.base.bo);
+ amdgpu_bo_unreserve(vm->root.bo);
return r;
}
@@ -3181,7 +3166,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
- root = amdgpu_bo_ref(vm->root.base.bo);
+ root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
if (vm->pasid) {
unsigned long flags;
@@ -3208,7 +3193,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_vm_free_pts(adev, vm, NULL);
amdgpu_bo_unreserve(root);
amdgpu_bo_unref(&root);
- WARN_ON(vm->root.base.bo);
+ WARN_ON(vm->root.bo);
drm_sched_entity_destroy(&vm->immediate);
drm_sched_entity_destroy(&vm->delayed);
@@ -3325,7 +3310,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
/* Wait vm idle to make sure the vmid set in SPM_VMID is
* not referenced anymore.
*/
- r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true);
+ r = amdgpu_bo_reserve(fpriv->vm.root.bo, true);
if (r)
return r;
@@ -3333,7 +3318,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r < 0)
return r;
- amdgpu_bo_unreserve(fpriv->vm.root.base.bo);
+ amdgpu_bo_unreserve(fpriv->vm.root.bo);
amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
break;
default:
@@ -3399,19 +3384,20 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
{
bool is_compute_context = false;
struct amdgpu_bo *root;
+ unsigned long irqflags;
uint64_t value, flags;
struct amdgpu_vm *vm;
int r;
- spin_lock(&adev->vm_manager.pasid_lock);
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
if (vm) {
- root = amdgpu_bo_ref(vm->root.base.bo);
+ root = amdgpu_bo_ref(vm->root.bo);
is_compute_context = vm->is_compute_context;
} else {
root = NULL;
}
- spin_unlock(&adev->vm_manager.pasid_lock);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
if (!root)
return false;
@@ -3429,11 +3415,11 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
goto error_unref;
/* Double check that the VM still exists */
- spin_lock(&adev->vm_manager.pasid_lock);
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- if (vm && vm->root.base.bo != root)
+ if (vm && vm->root.bo != root)
vm = NULL;
- spin_unlock(&adev->vm_manager.pasid_lock);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
if (!vm)
goto error_unlock;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 1f089da1e615..f8fa653d4da7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -152,13 +152,6 @@ struct amdgpu_vm_bo_base {
bool moved;
};
-struct amdgpu_vm_pt {
- struct amdgpu_vm_bo_base base;
-
- /* array of page tables, one for each directory entry */
- struct amdgpu_vm_pt *entries;
-};
-
/* provided by hw blocks that can write ptes, e.g., sdma */
struct amdgpu_vm_pte_funcs {
/* number of dw to reserve per operation */
@@ -284,7 +277,7 @@ struct amdgpu_vm {
struct list_head done;
/* contains the page directory */
- struct amdgpu_vm_pt root;
+ struct amdgpu_vm_bo_base root;
struct dma_fence *last_update;
/* Scheduler entities for page table updates */
@@ -413,7 +406,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct dma_fence **fence, bool *free_table);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
- bool clear, bool *table_freed);
+ bool clear);
bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 422958152c2b..dbb551762805 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -112,7 +112,7 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
swap(p->vm->last_unlocked, f);
dma_fence_put(tmp);
} else {
- amdgpu_bo_fence(p->vm->root.base.bo, f, true);
+ amdgpu_bo_fence(p->vm->root.bo, f, true);
}
if (fence && !p->immediate)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 436ec246a7da..2fd77c36a1ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -463,6 +463,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (i == 1)
node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
+ if (adev->gmc.xgmi.connected_to_cpu)
+ node->base.bus.caching = ttm_cached;
+ else
+ node->base.bus.caching = ttm_write_combined;
+
atomic64_add(vis_usage, &mgr->vis_usage);
*res = &node->base;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
index 5b90efd6f6d0..3ac505d954c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -36,9 +36,12 @@ athub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
{
uint32_t def, data;
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
+ return;
+
def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
+ if (enable)
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
else
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
@@ -53,10 +56,13 @@ athub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
{
uint32_t def, data;
+ if (!((adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
+ (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)))
+ return;
+
def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
- (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+ if (enable)
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
else
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 33324427b555..7e0d8c092c7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -766,7 +766,7 @@ static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1;
+ adev->crtc_irq.num_types = adev->mode_info.num_crtc;
adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 2d56b60bc058..f5e9c022960b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -5086,47 +5086,44 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
4 + /* RMI */
1); /* SQG */
- if (adev->asic_type == CHIP_NAVI10 ||
- adev->asic_type == CHIP_NAVI14 ||
- adev->asic_type == CHIP_NAVI12) {
- mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
- for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
- wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
- /*
- * Set corresponding TCP bits for the inactive WGPs in
- * GCRD_SA_TARGETS_DISABLE
- */
- gcrd_targets_disable_tcp = 0;
- /* Set TCP & SQC bits in UTCL1_UTCL0_INVREQ_DISABLE */
- utcl_invreq_disable = 0;
-
- for (k = 0; k < max_wgp_per_sh; k++) {
- if (!(wgp_active_bitmap & (1 << k))) {
- gcrd_targets_disable_tcp |= 3 << (2 * k);
- utcl_invreq_disable |= (3 << (2 * k)) |
- (3 << (2 * (max_wgp_per_sh + k)));
- }
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
+ wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
+ /*
+ * Set corresponding TCP bits for the inactive WGPs in
+ * GCRD_SA_TARGETS_DISABLE
+ */
+ gcrd_targets_disable_tcp = 0;
+ /* Set TCP & SQC bits in UTCL1_UTCL0_INVREQ_DISABLE */
+ utcl_invreq_disable = 0;
+
+ for (k = 0; k < max_wgp_per_sh; k++) {
+ if (!(wgp_active_bitmap & (1 << k))) {
+ gcrd_targets_disable_tcp |= 3 << (2 * k);
+ gcrd_targets_disable_tcp |= 1 << (k + (max_wgp_per_sh * 2));
+ utcl_invreq_disable |= (3 << (2 * k)) |
+ (3 << (2 * (max_wgp_per_sh + k)));
}
-
- tmp = RREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE);
- /* only override TCP & SQC bits */
- tmp &= 0xffffffff << (4 * max_wgp_per_sh);
- tmp |= (utcl_invreq_disable & utcl_invreq_disable_mask);
- WREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE, tmp);
-
- tmp = RREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE);
- /* only override TCP bits */
- tmp &= 0xffffffff << (2 * max_wgp_per_sh);
- tmp |= (gcrd_targets_disable_tcp & gcrd_targets_disable_mask);
- WREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE, tmp);
}
- }
- gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
+ tmp = RREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE);
+ /* only override TCP & SQC bits */
+ tmp &= (0xffffffffU << (4 * max_wgp_per_sh));
+ tmp |= (utcl_invreq_disable & utcl_invreq_disable_mask);
+ WREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE, tmp);
+
+ tmp = RREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE);
+ /* only override TCP & SQC bits */
+ tmp &= (0xffffffffU << (3 * max_wgp_per_sh));
+ tmp |= (gcrd_targets_disable_tcp & gcrd_targets_disable_mask);
+ WREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE, tmp);
+ }
}
+
+ gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
}
static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
@@ -7404,7 +7401,10 @@ static int gfx_v10_0_hw_init(void *handle)
* init golden registers and rlc resume may override some registers,
* reconfig them here
*/
- gfx_v10_0_tcp_harvest(adev);
+ if (adev->asic_type == CHIP_NAVI10 ||
+ adev->asic_type == CHIP_NAVI14 ||
+ adev->asic_type == CHIP_NAVI12)
+ gfx_v10_0_tcp_harvest(adev);
r = gfx_v10_0_cp_resume(adev);
if (r)
@@ -7777,6 +7777,9 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
{
uint32_t data, def;
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
+ return;
+
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 0 - Disable some blocks' MGCG */
@@ -7791,6 +7794,7 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK);
if (def != data)
@@ -7813,13 +7817,15 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
}
}
- } else {
+ } else if (!enable || !(adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
- RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK);
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
@@ -7845,22 +7851,34 @@ static void gfx_v10_0_update_3d_clock_gating(struct amdgpu_device *adev,
{
uint32_t data, def;
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)))
+ return;
+
/* Enable 3D CGCG/CGLS */
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+ if (enable) {
/* write cmd to clear cgcg/cgls ov */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+
/* unset CGCG override */
- data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
+ data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
+
/* update CGCG and CGLS override bits */
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
+
/* enable 3Dcgcg FSM(0x0000363f) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
- data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
- RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+ data = 0;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
+ data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+ RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
+
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
@@ -7873,9 +7891,14 @@ static void gfx_v10_0_update_3d_clock_gating(struct amdgpu_device *adev,
} else {
/* Disable CGCG/CGLS */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
+
/* disable cgcg, cgls should be disabled */
- data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
- RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
+ data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
+ data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
+
/* disable cgcg and cgls in FSM */
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
@@ -7887,25 +7910,35 @@ static void gfx_v10_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
{
uint32_t def, data;
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)))
+ return;
+
+ if (enable) {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+
/* unset CGCG override */
- data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
+ data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
+
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
- else
- data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
+
/* update CGCG and CGLS override bits */
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
/* enable cgcg FSM(0x0000363F) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
- data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
- RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
+ data = 0;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
+ data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+ RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
+
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
+
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
@@ -7917,8 +7950,14 @@ static void gfx_v10_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
} else {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
+
/* reset CGCG/CGLS bits */
- data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
+ data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
+ data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
+
/* disable cgcg and cgls in FSM */
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
@@ -7930,7 +7969,10 @@ static void gfx_v10_0_update_fine_grain_clock_gating(struct amdgpu_device *adev,
{
uint32_t def, data;
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) {
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
+ return;
+
+ if (enable) {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
/* unset FGCG override */
data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
@@ -7961,6 +8003,97 @@ static void gfx_v10_0_update_fine_grain_clock_gating(struct amdgpu_device *adev,
}
}
+static void gfx_v10_0_apply_medium_grain_clock_gating_workaround(struct amdgpu_device *adev)
+{
+ uint32_t reg_data = 0;
+ uint32_t reg_idx = 0;
+ uint32_t i;
+
+ const uint32_t tcp_ctrl_regs[] = {
+ mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP00_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP01_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP01_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP02_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP02_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP10_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP10_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP11_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP11_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP12_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP12_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP00_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP00_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP01_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP01_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP02_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP02_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP10_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP10_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP11_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP11_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP12_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP12_CU1_TCP_CTRL_REG
+ };
+
+ const uint32_t tcp_ctrl_regs_nv12[] = {
+ mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP00_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP01_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP01_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP02_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP02_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP10_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP10_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP11_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP11_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP00_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP00_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP01_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP01_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP02_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP02_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP10_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP10_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP11_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP11_CU1_TCP_CTRL_REG,
+ };
+
+ const uint32_t sm_ctlr_regs[] = {
+ mmCGTS_SA0_QUAD0_SM_CTRL_REG,
+ mmCGTS_SA0_QUAD1_SM_CTRL_REG,
+ mmCGTS_SA1_QUAD0_SM_CTRL_REG,
+ mmCGTS_SA1_QUAD1_SM_CTRL_REG
+ };
+
+ if (adev->asic_type == CHIP_NAVI12) {
+ for (i = 0; i < ARRAY_SIZE(tcp_ctrl_regs_nv12); i++) {
+ reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG_BASE_IDX] +
+ tcp_ctrl_regs_nv12[i];
+ reg_data = RREG32(reg_idx);
+ reg_data |= CGTS_SA0_WGP00_CU0_TCP_CTRL_REG__TCPI_LS_OVERRIDE_MASK;
+ WREG32(reg_idx, reg_data);
+ }
+ } else {
+ for (i = 0; i < ARRAY_SIZE(tcp_ctrl_regs); i++) {
+ reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG_BASE_IDX] +
+ tcp_ctrl_regs[i];
+ reg_data = RREG32(reg_idx);
+ reg_data |= CGTS_SA0_WGP00_CU0_TCP_CTRL_REG__TCPI_LS_OVERRIDE_MASK;
+ WREG32(reg_idx, reg_data);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sm_ctlr_regs); i++) {
+ reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_QUAD0_SM_CTRL_REG_BASE_IDX] +
+ sm_ctlr_regs[i];
+ reg_data = RREG32(reg_idx);
+ reg_data &= ~CGTS_SA0_QUAD0_SM_CTRL_REG__SM_MODE_MASK;
+ reg_data |= 2 << CGTS_SA0_QUAD0_SM_CTRL_REG__SM_MODE__SHIFT;
+ WREG32(reg_idx, reg_data);
+ }
+}
+
static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
@@ -7977,6 +8110,10 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
gfx_v10_0_update_3d_clock_gating(adev, enable);
/* === CGCG + CGLS === */
gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
+
+ if ((adev->asic_type >= CHIP_NAVI10) &&
+ (adev->asic_type <= CHIP_NAVI12))
+ gfx_v10_0_apply_medium_grain_clock_gating_workaround(adev);
} else {
/* CGCG/CGLS should be disabled before MGCG/MGLS
* === CGCG + CGLS ===
@@ -9324,17 +9461,22 @@ static void gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *
static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
{
- u32 data, wgp_bitmask;
- data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
- data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
+ u32 disabled_mask =
+ ~amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
+ u32 efuse_setting = 0;
+ u32 vbios_setting = 0;
+
+ efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
+ efuse_setting &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
+ efuse_setting >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
- data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
- data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
+ vbios_setting = RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
+ vbios_setting &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
+ vbios_setting >>= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
- wgp_bitmask =
- amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
+ disabled_mask |= efuse_setting | vbios_setting;
- return (~data) & wgp_bitmask;
+ return (~disabled_mask);
}
static u32 gfx_v10_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index c0352dcc89be..1769c4cba2ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -782,11 +782,6 @@ void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL1, PWRBRK_STALL_EN, 1);
WREG32_SOC15(GC, 0, regGC_THROTTLE_CTRL1, tmp);
- WREG32_SOC15(GC, 0, regDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL);
- tmp = 0;
- tmp = REG_SET_FIELD(tmp, DIDT_SQ_THROTTLE_CTRL, PWRBRK_STALL_EN, 1);
- WREG32_SOC15(GC, 0, regDIDT_IND_DATA, tmp);
-
WREG32_SOC15(GC, 0, regGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL);
tmp = 0;
tmp = REG_SET_FIELD(tmp, PWRBRK_STALL_PATTERN_CTRL, PWRBRK_END_STEP, 0x12);
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index 7a15e669b68d..5793977953cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -90,45 +90,56 @@ static void hdp_v5_0_update_mem_power_gating(struct amdgpu_device *adev,
RC_MEM_POWER_SD_EN, 0);
WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
- /* only one clock gating mode (LS/DS/SD) can be enabled */
- if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_LS_EN, enable);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_LS_EN, enable);
- } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_DS_EN, enable);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_DS_EN, enable);
- } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_SD_EN, enable);
- /* RC should not use shut down mode, fallback to ds */
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_DS_EN, enable);
- }
-
- /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
- * be set for SRAM LS/DS/SD */
- if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
- AMD_CG_SUPPORT_HDP_SD)) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_CTRL_EN, 1);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_CTRL_EN, 1);
+ /* Already disabled above. The actions below are for "enabled" only */
+ if (enable) {
+ /* only one clock gating mode (LS/DS/SD) can be enabled */
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_LS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_DS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_SD_EN, 1);
+ /* RC should not use shut down mode, fallback to ds or ls if allowed */
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS)
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 1);
+ else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 1);
+ }
+
+ /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
+ * be set for SRAM LS/DS/SD */
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_CTRL_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 1);
+ WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+ }
}
- WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
-
- /* restore IPH & RC clock override after clock/power mode changing */
- WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
+ /* disable IPH & RC clock override after clock/power mode changing */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ IPH_MEM_CLK_SOFT_OVERRIDE, 0);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 0);
+ WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
}
static void hdp_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index f7e93bbc4e15..7ded6b2f058e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -568,6 +568,9 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
{
uint32_t def, data, def1, data1;
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
+ return;
+
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
@@ -582,7 +585,7 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
break;
}
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
+ if (enable) {
data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
@@ -627,6 +630,9 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
{
uint32_t def, data;
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
+ return;
+
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
@@ -639,7 +645,7 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
break;
}
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
+ if (enable)
data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
else
data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 3ee481557fc9..ff2307d7ee0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -252,7 +252,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
- if (!down_read_trylock(&adev->reset_sem))
+ if (!down_write_trylock(&adev->reset_sem))
return;
amdgpu_virt_fini_data_exchange(adev);
@@ -268,7 +268,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
flr_done:
atomic_set(&adev->in_gpu_reset, 0);
- up_read(&adev->reset_sem);
+ up_write(&adev->reset_sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 48e588d3c409..9f7aac435d69 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -273,7 +273,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
- if (!down_read_trylock(&adev->reset_sem))
+ if (!down_write_trylock(&adev->reset_sem))
return;
amdgpu_virt_fini_data_exchange(adev);
@@ -289,7 +289,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
flr_done:
atomic_set(&adev->in_gpu_reset, 0);
- up_read(&adev->reset_sem);
+ up_write(&adev->reset_sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index 05ddec7ba7e2..7b79eeaa88aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -51,6 +51,8 @@
#define mmBIF_MMSCH1_DOORBELL_RANGE 0x01d8
#define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+
static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
@@ -218,8 +220,11 @@ static void nbio_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *ade
{
uint32_t def, data;
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
+ return;
+
def = data = RREG32_PCIE(smnCPM_CONTROL);
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) {
+ if (enable) {
data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
@@ -244,8 +249,11 @@ static void nbio_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev
{
uint32_t def, data;
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
+ return;
+
def = data = RREG32_PCIE(smnPCIE_CNTL2);
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
+ if (enable) {
data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
@@ -463,6 +471,43 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
}
+static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev)
+{
+ uint32_t reg_data = 0;
+ uint32_t link_width = 0;
+
+ if (!((adev->asic_type >= CHIP_NAVI10) &&
+ (adev->asic_type <= CHIP_NAVI12)))
+ return;
+
+ reg_data = RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL);
+ link_width = (reg_data & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+
+ /*
+ * Program PCIE_LC_CNTL6.LC_SPC_MODE_8GT to 0x2 (4 symbols per clock data)
+ * if link_width is 0x3 (x4)
+ */
+ if (0x3 == link_width) {
+ reg_data = RREG32_PCIE(smnPCIE_LC_CNTL6);
+ reg_data &= ~PCIE_LC_CNTL6__LC_SPC_MODE_8GT_MASK;
+ reg_data |= (0x2 << PCIE_LC_CNTL6__LC_SPC_MODE_8GT__SHIFT);
+ WREG32_PCIE(smnPCIE_LC_CNTL6, reg_data);
+ }
+}
+
+static void nbio_v2_3_apply_l1_link_width_reconfig_wa(struct amdgpu_device *adev)
+{
+ uint32_t reg_data = 0;
+
+ if (adev->asic_type != CHIP_NAVI10)
+ return;
+
+ reg_data = RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL);
+ reg_data |= PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN_MASK;
+ WREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL, reg_data);
+}
+
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
@@ -484,4 +529,6 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
.enable_aspm = nbio_v2_3_enable_aspm,
.program_aspm = nbio_v2_3_program_aspm,
+ .apply_lc_spc_mode_wa = nbio_v2_3_apply_lc_spc_mode_wa,
+ .apply_l1_link_width_reconfig_wa = nbio_v2_3_apply_l1_link_width_reconfig_wa,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 455d0425787c..94a2c0742ee5 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -64,6 +64,13 @@
#include "smuio_v11_0.h"
#include "smuio_v11_0_6.h"
+#define codec_info_build(type, width, height, level) \
+ .codec_type = type,\
+ .max_width = width,\
+ .max_height = height,\
+ .max_pixels_per_frame = height * width,\
+ .max_level = level,
+
static const struct amd_ip_funcs nv_common_ip_funcs;
/* Navi */
@@ -309,6 +316,23 @@ static struct amdgpu_video_codecs sriov_sc_video_codecs_decode =
.codec_array = sriov_sc_video_codecs_decode_array,
};
+/* Beige Goby*/
+static const struct amdgpu_video_codec_info bg_video_codecs_decode_array[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+};
+
+static const struct amdgpu_video_codecs bg_video_codecs_decode = {
+ .codec_count = ARRAY_SIZE(bg_video_codecs_decode_array),
+ .codec_array = bg_video_codecs_decode_array,
+};
+
+static const struct amdgpu_video_codecs bg_video_codecs_encode = {
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+
static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
@@ -335,6 +359,12 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
else
*codecs = &sc_video_codecs_decode;
return 0;
+ case CHIP_BEIGE_GOBY:
+ if (encode)
+ *codecs = &bg_video_codecs_encode;
+ else
+ *codecs = &bg_video_codecs_decode;
+ return 0;
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
@@ -1275,7 +1305,6 @@ static int nv_common_early_init(void *handle)
break;
case CHIP_VANGOGH:
- adev->apu_flags |= AMD_APU_IS_VANGOGH;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
@@ -1411,6 +1440,12 @@ static int nv_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->nbio.funcs->apply_lc_spc_mode_wa)
+ adev->nbio.funcs->apply_lc_spc_mode_wa(adev);
+
+ if (adev->nbio.funcs->apply_l1_link_width_reconfig_wa)
+ adev->nbio.funcs->apply_l1_link_width_reconfig_wa(adev);
+
/* enable pcie gen2/3 link */
nv_pcie_gen3_enable(adev);
/* enable aspm */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index f6d3180febc4..dd0dce254901 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -332,11 +332,16 @@ struct psp_gfx_uresp_fwar_db_info
uint32_t fwar_db_addr_hi;
};
+/* Command-specific response for boot config. */
+struct psp_gfx_uresp_bootcfg {
+ uint32_t boot_cfg; /* boot config data */
+};
+
/* Union of command-specific responses for GPCOM ring. */
-union psp_gfx_uresp
-{
- struct psp_gfx_uresp_reserved reserved;
- struct psp_gfx_uresp_fwar_db_info fwar_db_info;
+union psp_gfx_uresp {
+ struct psp_gfx_uresp_reserved reserved;
+ struct psp_gfx_uresp_bootcfg boot_cfg;
+ struct psp_gfx_uresp_fwar_db_info fwar_db_info;
};
/* Structure of GFX Response buffer.
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index fc400d95b83b..bc133db2d538 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -461,6 +461,7 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev)) {
+ ring->ring_wptr = 0;
ret = psp_v11_0_ring_stop(psp, ring_type);
if (ret) {
DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index ce7377d2368f..b86dcbabb635 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -227,6 +227,7 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
psp_v3_1_reroute_ih(psp);
if (amdgpu_sriov_vf(adev)) {
+ ring->ring_wptr = 0;
ret = psp_v3_1_ring_stop(psp, ring_type);
if (ret) {
DRM_ERROR("psp_v3_1_ring_stop_sriov failed!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index ae5464e2535a..8931000dcd41 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -144,7 +144,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
};
@@ -288,7 +288,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003fff07, 0x40000051),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
};
@@ -1896,8 +1896,11 @@ static int sdma_v4_0_late_init(void *handle)
sdma_v4_0_setup_ulv(adev);
- if (adev->sdma.funcs && adev->sdma.funcs->reset_ras_error_count)
- adev->sdma.funcs->reset_ras_error_count(adev);
+ if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
+ if (adev->sdma.funcs &&
+ adev->sdma.funcs->reset_ras_error_count)
+ adev->sdma.funcs->reset_ras_error_count(adev);
+ }
if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init)
return adev->sdma.funcs->ras_late_init(adev, &ih_info);
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c
index e9c474c217ec..b6f1322f908c 100644
--- a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c
@@ -43,9 +43,12 @@ static void smuio_v11_0_update_rom_clock_gating(struct amdgpu_device *adev, bool
if (adev->flags & AMD_IS_APU)
return;
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
+ return;
+
def = data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0);
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
+ if (enable)
data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
else
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index de85577c9cfd..b02436401d46 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1360,10 +1360,7 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RAVEN:
adev->asic_funcs = &soc15_asic_funcs;
- if (adev->pdev->device == 0x15dd)
- adev->apu_flags |= AMD_APU_IS_RAVEN;
- if (adev->pdev->device == 0x15d8)
- adev->apu_flags |= AMD_APU_IS_PICASSO;
+
if (adev->rev_id >= 0x8)
adev->apu_flags |= AMD_APU_IS_RAVEN2;
@@ -1455,11 +1452,6 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RENOIR:
adev->asic_funcs = &soc15_asic_funcs;
- if ((adev->pdev->device == 0x1636) ||
- (adev->pdev->device == 0x164c))
- adev->apu_flags |= AMD_APU_IS_RENOIR;
- else
- adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
if (adev->apu_flags & AMD_APU_IS_RENOIR)
adev->external_rev_id = adev->rev_id + 0x91;
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
index 89d20adfa001..af59a35788e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
@@ -234,7 +234,7 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
err_addr &= ~((0x1ULL << lsb) - 1);
/* translate umc channel address to soc pa, 3 parts are included */
- retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+ retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
ADDR_OF_256B_BLOCK(channel_index) |
OFFSET_IN_256B_BLOCK(err_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 4c36fc5c9738..47d4f04cbd69 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -87,21 +87,18 @@ static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
static int vcn_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
if (amdgpu_sriov_vf(adev)) {
- adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
+ for (i = 0; i < VCN_INSTANCES_SIENNA_CICHLID; i++)
+ if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, i))
+ adev->vcn.num_vcn_inst++;
adev->vcn.harvest_config = 0;
adev->vcn.num_enc_rings = 1;
- if (adev->asic_type == CHIP_BEIGE_GOBY) {
- adev->vcn.num_vcn_inst = 1;
- adev->vcn.num_enc_rings = 0;
- }
-
} else {
if (adev->asic_type == CHIP_SIENNA_CICHLID) {
u32 harvest;
- int i;
adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
@@ -156,7 +153,8 @@ static int vcn_v3_0_sw_init(void *handle)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
- if (adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID) {
+ if ((adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID) ||
+ (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)) {
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
adev->firmware.fw_size +=
@@ -330,19 +328,17 @@ static int vcn_v3_0_hw_init(void *handle)
continue;
ring = &adev->vcn.inst[i].ring_dec;
- if (ring->sched.ready) {
- ring->wptr = 0;
- ring->wptr_old = 0;
- vcn_v3_0_dec_ring_set_wptr(ring);
- }
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v3_0_dec_ring_set_wptr(ring);
+ ring->sched.ready = true;
for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
ring = &adev->vcn.inst[i].ring_enc[j];
- if (ring->sched.ready) {
- ring->wptr = 0;
- ring->wptr_old = 0;
- vcn_v3_0_enc_ring_set_wptr(ring);
- }
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v3_0_enc_ring_set_wptr(ring);
+ ring->sched.ready = true;
}
}
} else {
@@ -1309,8 +1305,6 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
uint32_t table_size;
uint32_t size, size_dw;
- bool is_vcn_ready;
-
struct mmsch_v3_0_cmd_direct_write
direct_wt = { {0} };
struct mmsch_v3_0_cmd_direct_read_modify_write
@@ -1502,30 +1496,6 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
}
}
- /* 6, check each VCN's init_status
- * if it remains as 0, then this VCN is not assigned to current VF
- * do not start ring for this VCN
- */
- size = sizeof(struct mmsch_v3_0_init_header);
- table_loc = (uint32_t *)table->cpu_addr;
- memcpy(&header, (void *)table_loc, size);
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- is_vcn_ready = (header.inst[i].init_status == 1);
- if (!is_vcn_ready)
- DRM_INFO("VCN(%d) engine is disabled by hypervisor\n", i);
-
- ring = &adev->vcn.inst[i].ring_dec;
- ring->sched.ready = is_vcn_ready;
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
- ring = &adev->vcn.inst[i].ring_enc[j];
- ring->sched.ready = is_vcn_ready;
- }
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index cf483c0db687..e48acdd03c1a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1393,7 +1393,6 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
long err = 0;
int i;
uint32_t *devices_arr = NULL;
- bool table_freed = false;
dev = kfd_device_by_id(GET_GPU_ID(args->handle));
if (!dev)
@@ -1451,8 +1450,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
goto get_mem_obj_from_handle_failed;
}
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- peer->kgd, (struct kgd_mem *)mem,
- peer_pdd->drm_priv, &table_freed);
+ peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv);
if (err) {
pr_err("Failed to map to gpu %d/%d\n",
i, args->n_devices);
@@ -1470,17 +1468,16 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
}
/* Flush TLBs after waiting for the page table updates to complete */
- if (table_freed) {
- for (i = 0; i < args->n_devices; i++) {
- peer = kfd_device_by_id(devices_arr[i]);
- if (WARN_ON_ONCE(!peer))
- continue;
- peer_pdd = kfd_get_process_device_data(peer, p);
- if (WARN_ON_ONCE(!peer_pdd))
- continue;
- kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
- }
+ for (i = 0; i < args->n_devices; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (WARN_ON_ONCE(!peer))
+ continue;
+ peer_pdd = kfd_get_process_device_data(peer, p);
+ if (WARN_ON_ONCE(!peer_pdd))
+ continue;
+ kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
}
+
kfree(devices_arr);
return err;
@@ -1566,7 +1563,6 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
i, args->n_devices);
goto unmap_memory_from_gpu_failed;
}
- kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
args->n_success = i+1;
}
kfree(devices_arr);
@@ -1780,9 +1776,6 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
struct kfd_ioctl_svm_args *args = data;
int r = 0;
- if (p->svm_disabled)
- return -EPERM;
-
pr_debug("start 0x%llx size 0x%llx op 0x%x nattr 0x%x\n",
args->start_addr, args->size, args->op, args->nattr);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index edb14e4a9f33..16a1713808c2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -486,9 +486,6 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
if (retval == -ETIME)
qpd->reset_wavefronts = true;
-
- mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
-
list_del(&q->list);
if (list_empty(&qpd->queues_list)) {
if (qpd->reset_wavefronts) {
@@ -523,6 +520,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
int retval;
uint64_t sdma_val = 0;
struct kfd_process_device *pdd = qpd_to_pdd(qpd);
+ struct mqd_manager *mqd_mgr =
+ dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
/* Get the SDMA queue stats */
if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
@@ -540,6 +539,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
pdd->sdma_past_activity_counter += sdma_val;
dqm_unlock(dqm);
+ mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+
return retval;
}
@@ -1629,7 +1630,7 @@ out:
static int process_termination_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
- struct queue *q, *next;
+ struct queue *q;
struct device_process_node *cur, *next_dpn;
int retval = 0;
bool found = false;
@@ -1637,12 +1638,19 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
dqm_lock(dqm);
/* Clear all user mode queues */
- list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+ while (!list_empty(&qpd->queues_list)) {
+ struct mqd_manager *mqd_mgr;
int ret;
+ q = list_first_entry(&qpd->queues_list, struct queue, list);
+ mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
+ q->properties.type)];
ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
if (ret)
retval = ret;
+ dqm_unlock(dqm);
+ mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+ dqm_lock(dqm);
}
/* Unregister process */
@@ -1674,36 +1682,34 @@ static int get_wave_state(struct device_queue_manager *dqm,
u32 *save_area_used_size)
{
struct mqd_manager *mqd_mgr;
- int r;
dqm_lock(dqm);
- if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
- q->properties.is_active || !q->device->cwsr_enabled) {
- r = -EINVAL;
- goto dqm_unlock;
- }
-
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
- if (!mqd_mgr->get_wave_state) {
- r = -EINVAL;
- goto dqm_unlock;
+ if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.is_active || !q->device->cwsr_enabled ||
+ !mqd_mgr->get_wave_state) {
+ dqm_unlock(dqm);
+ return -EINVAL;
}
- r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
- ctl_stack_used_size, save_area_used_size);
-
-dqm_unlock:
dqm_unlock(dqm);
- return r;
+
+ /*
+ * get_wave_state is outside the dqm lock to prevent circular locking
+ * and the queue should be protected against destruction by the process
+ * lock.
+ */
+ return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
+ ctl_stack_used_size, save_area_used_size);
}
static int process_termination_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
int retval;
- struct queue *q, *next;
+ struct queue *q;
struct kernel_queue *kq, *kq_next;
struct mqd_manager *mqd_mgr;
struct device_process_node *cur, *next_dpn;
@@ -1760,24 +1766,26 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
qpd->reset_wavefronts = false;
}
- dqm_unlock(dqm);
-
- /* Outside the DQM lock because under the DQM lock we can't do
- * reclaim or take other locks that others hold while reclaiming.
- */
- if (found)
- kfd_dec_compute_active(dqm->dev);
-
/* Lastly, free mqd resources.
* Do free_mqd() after dqm_unlock to avoid circular locking.
*/
- list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+ while (!list_empty(&qpd->queues_list)) {
+ q = list_first_entry(&qpd->queues_list, struct queue, list);
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
list_del(&q->list);
qpd->queue_count--;
+ dqm_unlock(dqm);
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+ dqm_lock(dqm);
}
+ dqm_unlock(dqm);
+
+ /* Outside the DQM lock because under the DQM lock we can't do
+ * reclaim or take other locks that others hold while reclaiming.
+ */
+ if (found)
+ kfd_dec_compute_active(dqm->dev);
return retval;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 91c50739b756..a9b329f0f862 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -405,10 +405,6 @@ int kfd_init_apertures(struct kfd_process *process)
case CHIP_POLARIS12:
case CHIP_VEGAM:
kfd_init_apertures_vi(pdd, id);
- /* VI GPUs cannot support SVM with only
- * 40 bits of virtual address space.
- */
- process->svm_disabled = true;
break;
case CHIP_VEGA10:
case CHIP_VEGA12:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 2660f03e63a7..dab290a4d19d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -218,7 +218,8 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
struct page *page;
page = pfn_to_page(pfn);
- page->zone_device_data = prange;
+ svm_range_bo_ref(prange->svm_bo);
+ page->zone_device_data = prange->svm_bo;
get_page(page);
lock_page(page);
}
@@ -293,15 +294,13 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
for (i = j = 0; i < npages; i++) {
struct page *spage;
- dst[i] = cursor.start + (j << PAGE_SHIFT);
- migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
- svm_migrate_get_vram_page(prange, migrate->dst[i]);
-
- migrate->dst[i] = migrate_pfn(migrate->dst[i]);
- migrate->dst[i] |= MIGRATE_PFN_LOCKED;
-
- if (migrate->src[i] & MIGRATE_PFN_VALID) {
- spage = migrate_pfn_to_page(migrate->src[i]);
+ spage = migrate_pfn_to_page(migrate->src[i]);
+ if (spage && !is_zone_device_page(spage)) {
+ dst[i] = cursor.start + (j << PAGE_SHIFT);
+ migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
+ svm_migrate_get_vram_page(prange, migrate->dst[i]);
+ migrate->dst[i] = migrate_pfn(migrate->dst[i]);
+ migrate->dst[i] |= MIGRATE_PFN_LOCKED;
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
DMA_TO_DEVICE);
r = dma_mapping_error(dev, src[i]);
@@ -355,6 +354,20 @@ out_free_vram_pages:
}
}
+#ifdef DEBUG_FORCE_MIXED_DOMAINS
+ for (i = 0, j = 0; i < npages; i += 4, j++) {
+ if (j & 1)
+ continue;
+ svm_migrate_put_vram_page(adev, dst[i]);
+ migrate->dst[i] = 0;
+ svm_migrate_put_vram_page(adev, dst[i + 1]);
+ migrate->dst[i + 1] = 0;
+ svm_migrate_put_vram_page(adev, dst[i + 2]);
+ migrate->dst[i + 2] = 0;
+ svm_migrate_put_vram_page(adev, dst[i + 3]);
+ migrate->dst[i + 3] = 0;
+ }
+#endif
out:
return r;
}
@@ -365,20 +378,20 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
uint64_t end)
{
uint64_t npages = (end - start) >> PAGE_SHIFT;
+ struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate;
dma_addr_t *scratch;
size_t size;
void *buf;
int r = -ENOMEM;
- int retry = 0;
memset(&migrate, 0, sizeof(migrate));
migrate.vma = vma;
migrate.start = start;
migrate.end = end;
migrate.flags = MIGRATE_VMA_SELECT_SYSTEM;
- migrate.pgmap_owner = adev;
+ migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t);
size *= npages;
@@ -390,7 +403,6 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.dst = migrate.src + npages;
scratch = (dma_addr_t *)(migrate.dst + npages);
-retry:
r = migrate_vma_setup(&migrate);
if (r) {
pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n",
@@ -398,17 +410,9 @@ retry:
goto out_free;
}
if (migrate.cpages != npages) {
- pr_debug("collect 0x%lx/0x%llx pages, retry\n", migrate.cpages,
+ pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n",
+ migrate.cpages,
npages);
- migrate_vma_finalize(&migrate);
- if (retry++ >= 3) {
- r = -ENOMEM;
- pr_debug("failed %d migrate svms 0x%p [0x%lx 0x%lx]\n",
- r, prange->svms, prange->start, prange->last);
- goto out_free;
- }
-
- goto retry;
}
if (migrate.cpages) {
@@ -425,6 +429,12 @@ retry:
out_free:
kvfree(buf);
out:
+ if (!r) {
+ pdd = svm_range_get_pdd_by_adev(prange, adev);
+ if (pdd)
+ WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages);
+ }
+
return r;
}
@@ -464,7 +474,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
prange->start, prange->last, best_loc);
/* FIXME: workaround for page locking bug with invalid pages */
- svm_range_prefault(prange, mm);
+ svm_range_prefault(prange, mm, SVM_ADEV_PGMAP_OWNER(adev));
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
@@ -493,15 +503,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
static void svm_migrate_page_free(struct page *page)
{
- /* Keep this function to avoid warning */
+ struct svm_range_bo *svm_bo = page->zone_device_data;
+
+ if (svm_bo) {
+ pr_debug("svm_bo ref left: %d\n", kref_read(&svm_bo->kref));
+ svm_range_bo_unref(svm_bo);
+ }
}
static int
svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch)
+ dma_addr_t *scratch, uint64_t npages)
{
- uint64_t npages = migrate->cpages;
struct device *dev = adev->dev;
uint64_t *src;
dma_addr_t *dst;
@@ -518,15 +532,23 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
src = (uint64_t *)(scratch + npages);
dst = scratch;
- for (i = 0, j = 0; i < npages; i++, j++, addr += PAGE_SIZE) {
+ for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
struct page *spage;
spage = migrate_pfn_to_page(migrate->src[i]);
- if (!spage) {
- pr_debug("failed get spage svms 0x%p [0x%lx 0x%lx]\n",
+ if (!spage || !is_zone_device_page(spage)) {
+ pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange->start, prange->last);
- r = -ENOMEM;
- goto out_oom;
+ if (j) {
+ r = svm_migrate_copy_memory_gart(adev, dst + i - j,
+ src + i - j, j,
+ FROM_VRAM_TO_RAM,
+ mfence);
+ if (r)
+ goto out_oom;
+ j = 0;
+ }
+ continue;
}
src[i] = svm_migrate_addr(adev, spage);
if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
@@ -559,6 +581,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
+ j++;
}
r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
@@ -581,6 +604,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end)
{
uint64_t npages = (end - start) >> PAGE_SHIFT;
+ struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate;
dma_addr_t *scratch;
@@ -593,7 +617,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.start = start;
migrate.end = end;
migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
- migrate.pgmap_owner = adev;
+ migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t);
size *= npages;
@@ -616,7 +640,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
if (migrate.cpages) {
r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
- scratch);
+ scratch, npages);
migrate_vma_pages(&migrate);
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
@@ -630,6 +654,12 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
out_free:
kvfree(buf);
out:
+ if (!r) {
+ pdd = svm_range_get_pdd_by_adev(prange, adev);
+ if (pdd)
+ WRITE_ONCE(pdd->page_out,
+ pdd->page_out + migrate.cpages);
+ }
return r;
}
@@ -859,7 +889,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
pgmap->range.start = res->start;
pgmap->range.end = res->end;
pgmap->ops = &svm_migrate_pgmap_ops;
- pgmap->owner = adev;
+ pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 329684ee5d6e..3426743ed228 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -730,6 +730,15 @@ struct kfd_process_device {
* number of CU's a device has along with number of other competing processes
*/
struct attribute attr_cu_occupancy;
+
+ /* sysfs counters for GPU retry fault and page migration tracking */
+ struct kobject *kobj_counters;
+ struct attribute attr_faults;
+ struct attribute attr_page_in;
+ struct attribute attr_page_out;
+ uint64_t faults;
+ uint64_t page_in;
+ uint64_t page_out;
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -743,6 +752,7 @@ struct svm_range_list {
spinlock_t deferred_list_lock;
atomic_t evicted_ranges;
struct delayed_work restore_work;
+ DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
};
/* Process data */
@@ -826,7 +836,6 @@ struct kfd_process {
/* shared virtual memory registered by this process */
struct svm_range_list svms;
- bool svm_disabled;
bool xnack_enabled;
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index f1f40bba5c60..8a2c6fc438c0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -416,6 +416,29 @@ static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
return 0;
}
+static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct kfd_process_device *pdd;
+
+ if (!strcmp(attr->name, "faults")) {
+ pdd = container_of(attr, struct kfd_process_device,
+ attr_faults);
+ return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
+ }
+ if (!strcmp(attr->name, "page_in")) {
+ pdd = container_of(attr, struct kfd_process_device,
+ attr_page_in);
+ return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
+ }
+ if (!strcmp(attr->name, "page_out")) {
+ pdd = container_of(attr, struct kfd_process_device,
+ attr_page_out);
+ return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
+ }
+ return 0;
+}
+
static struct attribute attr_queue_size = {
.name = "size",
.mode = KFD_SYSFS_FILE_MODE
@@ -451,13 +474,18 @@ static const struct sysfs_ops procfs_stats_ops = {
.show = kfd_procfs_stats_show,
};
-static struct attribute *procfs_stats_attrs[] = {
- NULL
-};
-
static struct kobj_type procfs_stats_type = {
.sysfs_ops = &procfs_stats_ops,
- .default_attrs = procfs_stats_attrs,
+ .release = kfd_procfs_kobj_release,
+};
+
+static const struct sysfs_ops sysfs_counters_ops = {
+ .show = kfd_sysfs_counters_show,
+};
+
+static struct kobj_type sysfs_counters_type = {
+ .sysfs_ops = &sysfs_counters_ops,
+ .release = kfd_procfs_kobj_release,
};
int kfd_procfs_add_queue(struct queue *q)
@@ -484,34 +512,31 @@ int kfd_procfs_add_queue(struct queue *q)
return 0;
}
-static int kfd_sysfs_create_file(struct kfd_process *p, struct attribute *attr,
+static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
char *name)
{
- int ret = 0;
+ int ret;
- if (!p || !attr || !name)
- return -EINVAL;
+ if (!kobj || !attr || !name)
+ return;
attr->name = name;
attr->mode = KFD_SYSFS_FILE_MODE;
sysfs_attr_init(attr);
- ret = sysfs_create_file(p->kobj, attr);
-
- return ret;
+ ret = sysfs_create_file(kobj, attr);
+ if (ret)
+ pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
}
-static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
+static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
{
- int ret = 0;
+ int ret;
int i;
char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
- if (!p)
- return -EINVAL;
-
- if (!p->kobj)
- return -EFAULT;
+ if (!p || !p->kobj)
+ return;
/*
* Create sysfs files for each GPU:
@@ -521,63 +546,87 @@ static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
*/
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
- struct kobject *kobj_stats;
snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
"stats_%u", pdd->dev->id);
- kobj_stats = kfd_alloc_struct(kobj_stats);
- if (!kobj_stats)
- return -ENOMEM;
+ pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
+ if (!pdd->kobj_stats)
+ return;
- ret = kobject_init_and_add(kobj_stats,
- &procfs_stats_type,
- p->kobj,
- stats_dir_filename);
+ ret = kobject_init_and_add(pdd->kobj_stats,
+ &procfs_stats_type,
+ p->kobj,
+ stats_dir_filename);
if (ret) {
pr_warn("Creating KFD proc/stats_%s folder failed",
- stats_dir_filename);
- kobject_put(kobj_stats);
- goto err;
+ stats_dir_filename);
+ kobject_put(pdd->kobj_stats);
+ pdd->kobj_stats = NULL;
+ return;
}
- pdd->kobj_stats = kobj_stats;
- pdd->attr_evict.name = "evicted_ms";
- pdd->attr_evict.mode = KFD_SYSFS_FILE_MODE;
- sysfs_attr_init(&pdd->attr_evict);
- ret = sysfs_create_file(kobj_stats, &pdd->attr_evict);
- if (ret)
- pr_warn("Creating eviction stats for gpuid %d failed",
- (int)pdd->dev->id);
-
+ kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
+ "evicted_ms");
/* Add sysfs file to report compute unit occupancy */
- if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL) {
- pdd->attr_cu_occupancy.name = "cu_occupancy";
- pdd->attr_cu_occupancy.mode = KFD_SYSFS_FILE_MODE;
- sysfs_attr_init(&pdd->attr_cu_occupancy);
- ret = sysfs_create_file(kobj_stats,
- &pdd->attr_cu_occupancy);
- if (ret)
- pr_warn("Creating %s failed for gpuid: %d",
- pdd->attr_cu_occupancy.name,
- (int)pdd->dev->id);
- }
+ if (pdd->dev->kfd2kgd->get_cu_occupancy)
+ kfd_sysfs_create_file(pdd->kobj_stats,
+ &pdd->attr_cu_occupancy,
+ "cu_occupancy");
}
-err:
- return ret;
}
-
-static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
+static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
{
int ret = 0;
int i;
+ char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
- if (!p)
- return -EINVAL;
+ if (!p || !p->kobj)
+ return;
- if (!p->kobj)
- return -EFAULT;
+ /*
+ * Create sysfs files for each GPU which supports SVM
+ * - proc/<pid>/counters_<gpuid>/
+ * - proc/<pid>/counters_<gpuid>/faults
+ * - proc/<pid>/counters_<gpuid>/page_in
+ * - proc/<pid>/counters_<gpuid>/page_out
+ */
+ for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
+ struct kfd_process_device *pdd = p->pdds[i];
+ struct kobject *kobj_counters;
+
+ snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
+ "counters_%u", pdd->dev->id);
+ kobj_counters = kfd_alloc_struct(kobj_counters);
+ if (!kobj_counters)
+ return;
+
+ ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
+ p->kobj, counters_dir_filename);
+ if (ret) {
+ pr_warn("Creating KFD proc/%s folder failed",
+ counters_dir_filename);
+ kobject_put(kobj_counters);
+ return;
+ }
+
+ pdd->kobj_counters = kobj_counters;
+ kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
+ "faults");
+ kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
+ "page_in");
+ kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
+ "page_out");
+ }
+}
+
+static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
+{
+ int i;
+
+ if (!p || !p->kobj)
+ return;
/*
* Create sysfs files for each GPU:
@@ -589,20 +638,14 @@ static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
pdd->dev->id);
- ret = kfd_sysfs_create_file(p, &pdd->attr_vram, pdd->vram_filename);
- if (ret)
- pr_warn("Creating vram usage for gpu id %d failed",
- (int)pdd->dev->id);
+ kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
+ pdd->vram_filename);
snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
pdd->dev->id);
- ret = kfd_sysfs_create_file(p, &pdd->attr_sdma, pdd->sdma_filename);
- if (ret)
- pr_warn("Creating sdma usage for gpu id %d failed",
- (int)pdd->dev->id);
+ kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
+ pdd->sdma_filename);
}
-
- return ret;
}
void kfd_procfs_del_queue(struct queue *q)
@@ -671,8 +714,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
if (err)
goto err_alloc_mem;
- err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem,
- pdd->drm_priv, NULL);
+ err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv);
if (err)
goto err_map_mem;
@@ -800,28 +842,17 @@ struct kfd_process *kfd_create_process(struct file *filep)
goto out;
}
- process->attr_pasid.name = "pasid";
- process->attr_pasid.mode = KFD_SYSFS_FILE_MODE;
- sysfs_attr_init(&process->attr_pasid);
- ret = sysfs_create_file(process->kobj, &process->attr_pasid);
- if (ret)
- pr_warn("Creating pasid for pid %d failed",
- (int)process->lead_thread->pid);
+ kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
+ "pasid");
process->kobj_queues = kobject_create_and_add("queues",
process->kobj);
if (!process->kobj_queues)
pr_warn("Creating KFD proc/queues folder failed");
- ret = kfd_procfs_add_sysfs_stats(process);
- if (ret)
- pr_warn("Creating sysfs stats dir for pid %d failed",
- (int)process->lead_thread->pid);
-
- ret = kfd_procfs_add_sysfs_files(process);
- if (ret)
- pr_warn("Creating sysfs usage file for pid %d failed",
- (int)process->lead_thread->pid);
+ kfd_procfs_add_sysfs_stats(process);
+ kfd_procfs_add_sysfs_files(process);
+ kfd_procfs_add_sysfs_counters(process);
}
out:
if (!IS_ERR(process))
@@ -964,6 +995,50 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
p->n_pdds = 0;
}
+static void kfd_process_remove_sysfs(struct kfd_process *p)
+{
+ struct kfd_process_device *pdd;
+ int i;
+
+ if (!p->kobj)
+ return;
+
+ sysfs_remove_file(p->kobj, &p->attr_pasid);
+ kobject_del(p->kobj_queues);
+ kobject_put(p->kobj_queues);
+ p->kobj_queues = NULL;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ pdd = p->pdds[i];
+
+ sysfs_remove_file(p->kobj, &pdd->attr_vram);
+ sysfs_remove_file(p->kobj, &pdd->attr_sdma);
+
+ sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
+ if (pdd->dev->kfd2kgd->get_cu_occupancy)
+ sysfs_remove_file(pdd->kobj_stats,
+ &pdd->attr_cu_occupancy);
+ kobject_del(pdd->kobj_stats);
+ kobject_put(pdd->kobj_stats);
+ pdd->kobj_stats = NULL;
+ }
+
+ for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
+ pdd = p->pdds[i];
+
+ sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
+ sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
+ sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
+ kobject_del(pdd->kobj_counters);
+ kobject_put(pdd->kobj_counters);
+ pdd->kobj_counters = NULL;
+ }
+
+ kobject_del(p->kobj);
+ kobject_put(p->kobj);
+ p->kobj = NULL;
+}
+
/* No process locking is needed in this function, because the process
* is not findable any more. We must assume that no other thread is
* using it any more, otherwise we couldn't safely free the process
@@ -973,33 +1048,7 @@ static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
- int i;
-
- /* Remove the procfs files */
- if (p->kobj) {
- sysfs_remove_file(p->kobj, &p->attr_pasid);
- kobject_del(p->kobj_queues);
- kobject_put(p->kobj_queues);
- p->kobj_queues = NULL;
-
- for (i = 0; i < p->n_pdds; i++) {
- struct kfd_process_device *pdd = p->pdds[i];
-
- sysfs_remove_file(p->kobj, &pdd->attr_vram);
- sysfs_remove_file(p->kobj, &pdd->attr_sdma);
- sysfs_remove_file(p->kobj, &pdd->attr_evict);
- if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL)
- sysfs_remove_file(p->kobj, &pdd->attr_cu_occupancy);
- kobject_del(pdd->kobj_stats);
- kobject_put(pdd->kobj_stats);
- pdd->kobj_stats = NULL;
- }
-
- kobject_del(p->kobj);
- kobject_put(p->kobj);
- p->kobj = NULL;
- }
-
+ kfd_process_remove_sysfs(p);
kfd_iommu_unbind_process(p);
kfd_process_free_outstanding_kfd_bos(p);
@@ -1260,7 +1309,6 @@ static struct kfd_process *create_process(const struct task_struct *thread)
process->mm = thread->mm;
process->lead_thread = thread->group_leader;
process->n_pdds = 0;
- process->svm_disabled = false;
INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
process->last_restore_timestamp = get_jiffies_64();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 95a6c36cea4c..243dd1efcdbf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -153,6 +153,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
if (pqn->q && pqn->q->gws)
amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
pqn->q->gws);
+ kfd_procfs_del_queue(pqn->q);
uninit_queue(pqn->q);
list_del(&pqn->process_queue_list);
kfree(pqn);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index da569d417d1d..c7b364e4a287 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -119,28 +119,40 @@ static void svm_range_remove_notifier(struct svm_range *prange)
}
static int
-svm_range_dma_map_dev(struct device *dev, dma_addr_t **dma_addr,
- unsigned long *hmm_pfns, uint64_t npages)
+svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
+ unsigned long *hmm_pfns, uint32_t gpuidx)
{
enum dma_data_direction dir = DMA_BIDIRECTIONAL;
- dma_addr_t *addr = *dma_addr;
+ dma_addr_t *addr = prange->dma_addr[gpuidx];
+ struct device *dev = adev->dev;
struct page *page;
int i, r;
if (!addr) {
- addr = kvmalloc_array(npages, sizeof(*addr),
+ addr = kvmalloc_array(prange->npages, sizeof(*addr),
GFP_KERNEL | __GFP_ZERO);
if (!addr)
return -ENOMEM;
- *dma_addr = addr;
+ prange->dma_addr[gpuidx] = addr;
}
- for (i = 0; i < npages; i++) {
+ for (i = 0; i < prange->npages; i++) {
if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]),
"leaking dma mapping\n"))
dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
page = hmm_pfn_to_page(hmm_pfns[i]);
+ if (is_zone_device_page(page)) {
+ struct amdgpu_device *bo_adev =
+ amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
+
+ addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
+ bo_adev->vm_manager.vram_base_offset -
+ bo_adev->kfd.dev->pgmap.range.start;
+ addr[i] |= SVM_RANGE_VRAM_DOMAIN;
+ pr_debug("vram address detected: 0x%llx\n", addr[i]);
+ continue;
+ }
addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
r = dma_mapping_error(dev, addr[i]);
if (r) {
@@ -175,8 +187,7 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
}
adev = (struct amdgpu_device *)pdd->dev->kgd;
- r = svm_range_dma_map_dev(adev->dev, &prange->dma_addr[gpuidx],
- hmm_pfns, prange->npages);
+ r = svm_range_dma_map_dev(adev, prange, hmm_pfns, gpuidx);
if (r)
break;
}
@@ -281,7 +292,8 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
p = container_of(svms, struct kfd_process, svms);
if (p->xnack_enabled)
- bitmap_fill(prange->bitmap_access, MAX_GPU_INSTANCE);
+ bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
+ MAX_GPU_INSTANCE);
svm_range_set_default_attributes(&prange->preferred_loc,
&prange->prefetch_loc,
@@ -300,14 +312,6 @@ static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
return true;
}
-static struct svm_range_bo *svm_range_bo_ref(struct svm_range_bo *svm_bo)
-{
- if (svm_bo)
- kref_get(&svm_bo->kref);
-
- return svm_bo;
-}
-
static void svm_range_bo_release(struct kref *kref)
{
struct svm_range_bo *svm_bo;
@@ -346,7 +350,7 @@ static void svm_range_bo_release(struct kref *kref)
kfree(svm_bo);
}
-static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
+void svm_range_bo_unref(struct svm_range_bo *svm_bo)
{
if (!svm_bo)
return;
@@ -563,6 +567,24 @@ svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
return (struct amdgpu_device *)pdd->dev->kgd;
}
+struct kfd_process_device *
+svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
+{
+ struct kfd_process *p;
+ int32_t gpu_idx, gpuid;
+ int r;
+
+ p = container_of(prange->svms, struct kfd_process, svms);
+
+ r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpu_idx);
+ if (r) {
+ pr_debug("failed to get device id by adev %p\n", adev);
+ return NULL;
+ }
+
+ return kfd_process_device_from_gpuidx(p, gpu_idx);
+}
+
static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
@@ -577,36 +599,25 @@ svm_range_check_attr(struct kfd_process *p,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
{
uint32_t i;
- int gpuidx;
for (i = 0; i < nattr; i++) {
+ uint32_t val = attrs[i].value;
+ int gpuidx = MAX_GPU_INSTANCE;
+
switch (attrs[i].type) {
case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
- if (attrs[i].value != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
- attrs[i].value != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
- kfd_process_gpuidx_from_gpuid(p,
- attrs[i].value) < 0) {
- pr_debug("no GPU 0x%x found\n", attrs[i].value);
- return -EINVAL;
- }
+ if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
+ val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
+ gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
break;
case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
- if (attrs[i].value != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
- kfd_process_gpuidx_from_gpuid(p,
- attrs[i].value) < 0) {
- pr_debug("no GPU 0x%x found\n", attrs[i].value);
- return -EINVAL;
- }
+ if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
+ gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
break;
case KFD_IOCTL_SVM_ATTR_ACCESS:
case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
- gpuidx = kfd_process_gpuidx_from_gpuid(p,
- attrs[i].value);
- if (gpuidx < 0) {
- pr_debug("no GPU 0x%x found\n", attrs[i].value);
- return -EINVAL;
- }
+ gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
break;
case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
break;
@@ -618,6 +629,15 @@ svm_range_check_attr(struct kfd_process *p,
pr_debug("unknown attr type 0x%x\n", attrs[i].type);
return -EINVAL;
}
+
+ if (gpuidx < 0) {
+ pr_debug("no GPU 0x%x found\n", val);
+ return -EINVAL;
+ } else if (gpuidx < MAX_GPU_INSTANCE &&
+ !test_bit(gpuidx, p->svms.bitmap_supported)) {
+ pr_debug("GPU 0x%x not supported\n", val);
+ return -EINVAL;
+ }
}
return 0;
@@ -1003,21 +1023,22 @@ svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
}
static uint64_t
-svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange)
+svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
+ int domain)
{
struct amdgpu_device *bo_adev;
uint32_t flags = prange->flags;
uint32_t mapping_flags = 0;
uint64_t pte_flags;
- bool snoop = !prange->ttm_res;
+ bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
- if (prange->svm_bo && prange->ttm_res)
+ if (domain == SVM_RANGE_VRAM_DOMAIN)
bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
switch (adev->asic_type) {
case CHIP_ARCTURUS:
- if (prange->svm_bo && prange->ttm_res) {
+ if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_adev == adev) {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
@@ -1033,7 +1054,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange)
}
break;
case CHIP_ALDEBARAN:
- if (prange->svm_bo && prange->ttm_res) {
+ if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_adev == adev) {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
@@ -1063,14 +1084,14 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
pte_flags = AMDGPU_PTE_VALID;
- pte_flags |= prange->ttm_res ? 0 : AMDGPU_PTE_SYSTEM;
+ pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
pr_debug("svms 0x%p [0x%lx 0x%lx] vram %d PTE 0x%llx mapping 0x%x\n",
prange->svms, prange->start, prange->last,
- prange->ttm_res ? 1:0, pte_flags, mapping_flags);
+ (domain == SVM_RANGE_VRAM_DOMAIN) ? 1:0, pte_flags, mapping_flags);
return pte_flags;
}
@@ -1141,31 +1162,41 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va bo_va;
bool table_freed = false;
uint64_t pte_flags;
+ unsigned long last_start;
+ int last_domain;
int r = 0;
+ int64_t i;
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
prange->last);
- if (prange->svm_bo && prange->ttm_res) {
+ if (prange->svm_bo && prange->ttm_res)
bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
- prange->mapping.bo_va = &bo_va;
- }
- prange->mapping.start = prange->start;
- prange->mapping.last = prange->last;
- prange->mapping.offset = prange->ttm_res ? prange->offset : 0;
- pte_flags = svm_range_get_pte_flags(adev, prange);
+ last_start = prange->start;
+ for (i = 0; i < prange->npages; i++) {
+ last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
+ dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
+ if ((prange->start + i) < prange->last &&
+ last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
+ continue;
- r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, NULL,
- prange->mapping.start,
- prange->mapping.last, pte_flags,
- prange->mapping.offset,
- prange->ttm_res,
- dma_addr, &vm->last_update,
- &table_freed);
- if (r) {
- pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
- goto out;
+ pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
+ last_start, prange->start + i, last_domain ? "GPU" : "CPU");
+ pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
+ r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, NULL,
+ last_start,
+ prange->start + i, pte_flags,
+ last_start - prange->start,
+ NULL,
+ dma_addr,
+ &vm->last_update,
+ &table_freed);
+ if (r) {
+ pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
+ goto out;
+ }
+ last_start = prange->start + i + 1;
}
r = amdgpu_vm_update_pdes(adev, vm, false);
@@ -1186,7 +1217,6 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
p->pasid, TLB_FLUSH_LEGACY);
}
out:
- prange->mapping.bo_va = NULL;
return r;
}
@@ -1274,7 +1304,7 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx)
adev = (struct amdgpu_device *)pdd->dev->kgd;
vm = drm_priv_to_vm(pdd->drm_priv);
- ctx->tv[gpuidx].bo = &vm->root.base.bo->tbo;
+ ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
ctx->tv[gpuidx].num_shared = 4;
list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
}
@@ -1320,6 +1350,17 @@ static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
}
+static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
+{
+ struct kfd_process_device *pdd;
+ struct amdgpu_device *adev;
+
+ pdd = kfd_process_device_from_gpuidx(p, gpuidx);
+ adev = (struct amdgpu_device *)pdd->dev->kgd;
+
+ return SVM_ADEV_PGMAP_OWNER(adev);
+}
+
/*
* Validation+GPU mapping with concurrent invalidation (MMU notifiers)
*
@@ -1350,6 +1391,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
{
struct svm_validate_context ctx;
struct hmm_range *hmm_range;
+ struct kfd_process *p;
+ void *owner;
+ int32_t idx;
int r = 0;
ctx.process = container_of(prange->svms, struct kfd_process, svms);
@@ -1395,33 +1439,38 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
svm_range_reserve_bos(&ctx);
- if (!prange->actual_loc) {
- r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
- prange->start << PAGE_SHIFT,
- prange->npages, &hmm_range,
- false, true);
- if (r) {
- pr_debug("failed %d to get svm range pages\n", r);
- goto unreserve_out;
- }
-
- r = svm_range_dma_map(prange, ctx.bitmap,
- hmm_range->hmm_pfns);
- if (r) {
- pr_debug("failed %d to dma map range\n", r);
- goto unreserve_out;
+ p = container_of(prange->svms, struct kfd_process, svms);
+ owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
+ MAX_GPU_INSTANCE));
+ for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
+ if (kfd_svm_page_owner(p, idx) != owner) {
+ owner = NULL;
+ break;
}
+ }
+ r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
+ prange->start << PAGE_SHIFT,
+ prange->npages, &hmm_range,
+ false, true, owner);
+ if (r) {
+ pr_debug("failed %d to get svm range pages\n", r);
+ goto unreserve_out;
+ }
- prange->validated_once = true;
+ r = svm_range_dma_map(prange, ctx.bitmap,
+ hmm_range->hmm_pfns);
+ if (r) {
+ pr_debug("failed %d to dma map range\n", r);
+ goto unreserve_out;
}
+ prange->validated_once = true;
+
svm_range_lock(prange);
- if (!prange->actual_loc) {
- if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
- pr_debug("hmm update the range, need validate again\n");
- r = -EAGAIN;
- goto unlock_out;
- }
+ if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
+ pr_debug("hmm update the range, need validate again\n");
+ r = -EAGAIN;
+ goto unlock_out;
}
if (!list_empty(&prange->child_list)) {
pr_debug("range split by unmap in parallel, validate again\n");
@@ -1573,6 +1622,7 @@ svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
unsigned long start, unsigned long last)
{
struct svm_range_list *svms = prange->svms;
+ struct svm_range *pchild;
struct kfd_process *p;
int r = 0;
@@ -1584,7 +1634,19 @@ svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
if (!p->xnack_enabled) {
int evicted_ranges;
- atomic_inc(&prange->invalid);
+ list_for_each_entry(pchild, &prange->child_list, child_list) {
+ mutex_lock_nested(&pchild->lock, 1);
+ if (pchild->start <= last && pchild->last >= start) {
+ pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
+ pchild->start, pchild->last);
+ atomic_inc(&pchild->invalid);
+ }
+ mutex_unlock(&pchild->lock);
+ }
+
+ if (prange->start <= last && prange->last >= start)
+ atomic_inc(&prange->invalid);
+
evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
if (evicted_ranges != 1)
return r;
@@ -1601,7 +1663,6 @@ svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
schedule_delayed_work(&svms->restore_work,
msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
} else {
- struct svm_range *pchild;
unsigned long s, l;
pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
@@ -1855,7 +1916,7 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
p = container_of(svms, struct kfd_process, svms);
- for (i = 0; i < p->n_pdds; i++) {
+ for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
pdd = p->pdds[i];
if (!pdd)
continue;
@@ -2312,6 +2373,33 @@ static bool svm_range_skip_recover(struct svm_range *prange)
return false;
}
+static void
+svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
+ int32_t gpuidx)
+{
+ struct kfd_process_device *pdd;
+
+ /* fault is on different page of same range
+ * or fault is skipped to recover later
+ * or fault is on invalid virtual address
+ */
+ if (gpuidx == MAX_GPU_INSTANCE) {
+ uint32_t gpuid;
+ int r;
+
+ r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx);
+ if (r < 0)
+ return;
+ }
+
+ /* fault is recovered
+ * or fault cannot recover because GPU no access on the range
+ */
+ pdd = kfd_process_device_from_gpuidx(p, gpuidx);
+ if (pdd)
+ WRITE_ONCE(pdd->faults, pdd->faults + 1);
+}
+
int
svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
uint64_t addr)
@@ -2321,10 +2409,16 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
struct svm_range *prange;
struct kfd_process *p;
uint64_t timestamp;
- int32_t best_loc, gpuidx;
+ int32_t best_loc;
+ int32_t gpuidx = MAX_GPU_INSTANCE;
bool write_locked = false;
int r = 0;
+ if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
+ pr_debug("device does not support SVM\n");
+ return -EFAULT;
+ }
+
p = kfd_lookup_process_by_pasid(pasid);
if (!p) {
pr_debug("kfd process not founded pasid 0x%x\n", pasid);
@@ -2436,6 +2530,9 @@ out_unlock_range:
out_unlock_svms:
mutex_unlock(&svms->lock);
mmap_read_unlock(mm);
+
+ svm_range_count_fault(adev, p, gpuidx);
+
mmput(mm);
out:
kfd_unref_process(p);
@@ -2472,6 +2569,7 @@ void svm_range_list_fini(struct kfd_process *p)
int svm_range_list_init(struct kfd_process *p)
{
struct svm_range_list *svms = &p->svms;
+ int i;
svms->objects = RB_ROOT_CACHED;
mutex_init(&svms->lock);
@@ -2482,6 +2580,10 @@ int svm_range_list_init(struct kfd_process *p)
INIT_LIST_HEAD(&svms->deferred_range_list);
spin_lock_init(&svms->deferred_list_lock);
+ for (i = 0; i < p->n_pdds; i++)
+ if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev))
+ bitmap_set(svms->bitmap_supported, i, 1);
+
return 0;
}
@@ -2653,7 +2755,8 @@ out:
/* FIXME: This is a workaround for page locking bug when some pages are
* invalid during migration to VRAM
*/
-void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm)
+void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
+ void *owner)
{
struct hmm_range *hmm_range;
int r;
@@ -2664,7 +2767,7 @@ void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm)
r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
prange->start << PAGE_SHIFT,
prange->npages, &hmm_range,
- false, true);
+ false, true, owner);
if (!r) {
amdgpu_hmm_range_get_pages_done(hmm_range);
prange->validated_once = true;
@@ -2709,16 +2812,6 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
best_loc == prange->actual_loc)
return 0;
- /*
- * Prefetch to GPU without host access flag, set actual_loc to gpu, then
- * validate on gpu and map to gpus will be handled afterwards.
- */
- if (best_loc && !prange->actual_loc &&
- !(prange->flags & KFD_IOCTL_SVM_FLAG_HOST_ACCESS)) {
- prange->actual_loc = best_loc;
- return 0;
- }
-
if (!best_loc) {
r = svm_migrate_vram_to_ram(prange, mm);
*migrated = !r;
@@ -2978,14 +3071,15 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
svm_range_set_default_attributes(&location, &prefetch_loc,
&granularity, &flags);
if (p->xnack_enabled)
- bitmap_fill(bitmap_access, MAX_GPU_INSTANCE);
+ bitmap_copy(bitmap_access, svms->bitmap_supported,
+ MAX_GPU_INSTANCE);
else
bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
goto fill_values;
}
- bitmap_fill(bitmap_access, MAX_GPU_INSTANCE);
- bitmap_fill(bitmap_aip, MAX_GPU_INSTANCE);
+ bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
+ bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
while (node) {
struct interval_tree_node *next;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 573f984b81fe..3fc1fd8b4fbc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -35,6 +35,10 @@
#include "amdgpu.h"
#include "kfd_priv.h"
+#define SVM_RANGE_VRAM_DOMAIN (1UL << 0)
+#define SVM_ADEV_PGMAP_OWNER(adev)\
+ ((adev)->hive ? (void *)(adev)->hive : (void *)(adev))
+
struct svm_range_bo {
struct amdgpu_bo *bo;
struct kref kref;
@@ -110,7 +114,6 @@ struct svm_range {
struct list_head update_list;
struct list_head remove_list;
struct list_head insert_list;
- struct amdgpu_bo_va_mapping mapping;
uint64_t npages;
dma_addr_t *dma_addr[MAX_GPU_INSTANCE];
struct ttm_resource *ttm_res;
@@ -147,6 +150,14 @@ static inline void svm_range_unlock(struct svm_range *prange)
mutex_unlock(&prange->lock);
}
+static inline struct svm_range_bo *svm_range_bo_ref(struct svm_range_bo *svm_bo)
+{
+ if (svm_bo)
+ kref_get(&svm_bo->kref);
+
+ return svm_bo;
+}
+
int svm_range_list_init(struct kfd_process *p);
void svm_range_list_fini(struct kfd_process *p);
int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
@@ -173,8 +184,17 @@ void schedule_deferred_list_work(struct svm_range_list *svms);
void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages);
void svm_range_free_dma_mappings(struct svm_range *prange);
-void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm);
+void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
+ void *owner);
+struct kfd_process_device *
+svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev);
+
+/* SVM API and HMM page migration work together, device memory type
+ * is initialized to not 0 when page migration register device memory.
+ */
+#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0)
+void svm_range_bo_unref(struct svm_range_bo *svm_bo);
#else
struct kfd_process;
@@ -201,6 +221,8 @@ static inline int svm_range_schedule_evict_svm_bo(
return -EINVAL;
}
+#define KFD_IS_SVM_API_SUPPORTED(dev) false
+
#endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
#endif /* KFD_SVM_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index f668b8cc2b57..b1ce072aa20b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -36,6 +36,7 @@
#include "kfd_topology.h"
#include "kfd_device_queue_manager.h"
#include "kfd_iommu.h"
+#include "kfd_svm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_ras.h"
@@ -1221,6 +1222,28 @@ static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev,
}
}
+static void kfd_set_iolink_non_coherent(struct kfd_topology_device *to_dev,
+ struct kfd_iolink_properties *outbound_link,
+ struct kfd_iolink_properties *inbound_link)
+{
+ /* CPU -> GPU with PCIe */
+ if (!to_dev->gpu &&
+ inbound_link->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
+ inbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
+
+ if (to_dev->gpu) {
+ /* GPU <-> GPU with PCIe and
+ * Vega20 with XGMI
+ */
+ if (inbound_link->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS ||
+ (inbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
+ to_dev->gpu->device_info->asic_family == CHIP_VEGA20)) {
+ outbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
+ inbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
+ }
+ }
+}
+
static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
{
struct kfd_iolink_properties *link, *inbound_link;
@@ -1246,6 +1269,7 @@ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
inbound_link->flags = CRAT_IOLINK_FLAGS_ENABLED;
kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link);
+ kfd_set_iolink_non_coherent(peer_dev, link, inbound_link);
}
}
}
@@ -1441,10 +1465,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.capability |= (adev->ras_enabled != 0) ?
HSA_CAP_RASEVENTNOTIFY : 0;
- /* SVM API and HMM page migration work together, device memory type
- * is initialized to not 0 when page migration register device memory.
- */
- if (adev->kfd.dev->pgmap.type != 0)
+ if (KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev))
dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED;
kfd_debug_print_topology();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 6bd6380b0ee0..8b48c6692007 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -54,8 +54,8 @@
#define HSA_CAP_ASIC_REVISION_SHIFT 22
#define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000
#define HSA_CAP_SVMAPI_SUPPORTED 0x08000000
-
-#define HSA_CAP_RESERVED 0xf00f8000
+#define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000
+#define HSA_CAP_RESERVED 0xe00f8000
struct kfd_node_properties {
uint64_t hive_id;
@@ -101,8 +101,7 @@ struct kfd_node_properties {
#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001
#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002
-#define HSA_MEM_FLAGS_COHERENTHOSTACCESS 0x00000004
-#define HSA_MEM_FLAGS_RESERVED 0xfffffff8
+#define HSA_MEM_FLAGS_RESERVED 0xfffffffc
struct kfd_mem_properties {
struct list_head list;
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 5b5f36c80efb..7dffc04a557e 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -31,13 +31,6 @@ config DRM_AMD_DC_SI
by default. This includes Tahiti, Pitcairn, Cape Verde, Oland.
Hainan is not supported by AMD DC and it has no physical DCE6.
-config DRM_AMD_DC_DCN3_1
- bool "DCN 3.1 family"
- depends on DRM_AMD_DC_DCN
- help
- Choose this option if you want to have
- DCN3.1 family support for display engine
-
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
depends on DRM_AMD_DC
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 9a3b7bf8ab0b..91fb72c96545 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -28,7 +28,7 @@
AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o
ifneq ($(CONFIG_DRM_AMD_DC),)
-AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o
+AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o
endif
ifdef CONFIG_DRM_AMD_DC_HDCP
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6fda0dfb78f8..d3a2a5ff57e9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -58,6 +58,7 @@
#if defined(CONFIG_DEBUG_FS)
#include "amdgpu_dm_debugfs.h"
#endif
+#include "amdgpu_dm_psr.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -109,10 +110,8 @@ MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
-#endif
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
@@ -213,12 +212,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
static void handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state);
-static void amdgpu_dm_set_psr_caps(struct dc_link *link);
-static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
-static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
-static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
-static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
-
static const struct drm_format_info *
amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
@@ -1150,16 +1143,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
init_data.flags.disable_dmcu = true;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
case CHIP_VANGOGH:
- init_data.flags.gpu_vm_support = true;
- break;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case CHIP_YELLOW_CARP:
init_data.flags.gpu_vm_support = true;
break;
-#endif
default:
break;
}
@@ -1173,6 +1160,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
init_data.flags.disable_fractional_pwm = true;
+ if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
+ init_data.flags.edp_no_power_sequencing = true;
+
init_data.flags.power_down_display_on_boot = true;
INIT_LIST_HEAD(&adev->dm.da_list);
@@ -1416,9 +1406,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_DIMGREY_CAVEFISH:
case CHIP_BEIGE_GOBY:
case CHIP_VANGOGH:
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case CHIP_YELLOW_CARP:
-#endif
return 0;
case CHIP_NAVI12:
fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
@@ -1537,12 +1525,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
dmub_asic = DMUB_ASIC_DCN303;
fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case CHIP_YELLOW_CARP:
dmub_asic = DMUB_ASIC_DCN31;
fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
break;
-#endif
default:
/* ASIC doesn't support DMUB. */
@@ -2237,7 +2223,7 @@ static int dm_resume(void *handle)
= 0xffffffff;
}
}
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/*
* Resource allocation happens for link encoders for newer ASIC in
* dc_validate_global_state, so we need to revalidate it.
@@ -3791,9 +3777,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case CHIP_YELLOW_CARP:
-#endif
case CHIP_RENOIR:
if (register_outbox_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
@@ -3898,9 +3882,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_DIMGREY_CAVEFISH:
case CHIP_BEIGE_GOBY:
case CHIP_VANGOGH:
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case CHIP_YELLOW_CARP:
-#endif
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@@ -4072,13 +4054,11 @@ static int dm_early_init(void *handle)
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case CHIP_YELLOW_CARP:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
break;
-#endif
case CHIP_NAVI14:
case CHIP_DIMGREY_CAVEFISH:
adev->mode_info.num_crtc = 5;
@@ -4316,9 +4296,7 @@ fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
adev->asic_type == CHIP_NAVY_FLOUNDER ||
adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
adev->asic_type == CHIP_BEIGE_GOBY ||
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
adev->asic_type == CHIP_YELLOW_CARP ||
-#endif
adev->asic_type == CHIP_VANGOGH)
tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
}
@@ -5651,6 +5629,36 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
}
#endif
+/**
+ * DOC: FreeSync Video
+ *
+ * When a userspace application wants to play a video, the content follows a
+ * standard format definition that usually specifies the FPS for that format.
+ * The below list illustrates some video format and the expected FPS,
+ * respectively:
+ *
+ * - TV/NTSC (23.976 FPS)
+ * - Cinema (24 FPS)
+ * - TV/PAL (25 FPS)
+ * - TV/NTSC (29.97 FPS)
+ * - TV/NTSC (30 FPS)
+ * - Cinema HFR (48 FPS)
+ * - TV/PAL (50 FPS)
+ * - Commonly used (60 FPS)
+ * - Multiples of 24 (48,72,96 FPS)
+ *
+ * The list of standards video format is not huge and can be added to the
+ * connector modeset list beforehand. With that, userspace can leverage
+ * FreeSync to extends the front porch in order to attain the target refresh
+ * rate. Such a switch will happen seamlessly, without screen blanking or
+ * reprogramming of the output in any other way. If the userspace requests a
+ * modesetting change compatible with FreeSync modes that only differ in the
+ * refresh rate, DC will skip the full update and avoid blink during the
+ * transition. For example, the video player can change the modesetting from
+ * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
+ * causing any display blink. This same concept can be applied to a mode
+ * setting change.
+ */
static struct drm_display_mode *
get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
bool use_probed_modes)
@@ -5875,6 +5883,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream->use_vsc_sdp_for_colorimetry = true;
}
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
+ aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
+
}
finish:
dc_sink_release(sink);
@@ -8688,7 +8698,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
!acrtc_state->stream->link->psr_settings.psr_allow_active) {
- amdgpu_dm_psr_enable(acrtc_state->stream);
+ struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
+ acrtc_state->stream->dm_stream_context;
+
+ if (aconn->psr_skip_count > 0)
+ aconn->psr_skip_count--;
+ else
+ amdgpu_dm_psr_enable(acrtc_state->stream);
}
mutex_unlock(&dm->dc_lock);
@@ -9175,7 +9191,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
/* restore the backlight level */
- if (dm->backlight_dev)
+ if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0]))
amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
#endif
/*
@@ -10105,7 +10121,7 @@ static int validate_overlay(struct drm_atomic_state *state)
int i;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
- struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
+ struct drm_plane_state *primary_state, *overlay_state = NULL;
/* Check if primary plane is contained inside overlay */
for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
@@ -10135,14 +10151,6 @@ static int validate_overlay(struct drm_atomic_state *state)
if (!primary_state->crtc)
return 0;
- /* check if cursor plane is enabled */
- cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
- if (IS_ERR(cursor_state))
- return PTR_ERR(cursor_state);
-
- if (drm_atomic_plane_disabling(plane->state, cursor_state))
- return 0;
-
/* Perform the bounds check to ensure the overlay plane covers the primary */
if (primary_state->crtc_x < overlay_state->crtc_x ||
primary_state->crtc_y < overlay_state->crtc_y ||
@@ -10245,6 +10253,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
dm_old_crtc_state->dsc_force_changed == false)
continue;
+ ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
+ if (ret)
+ goto fail;
+
if (!new_crtc_state->enable)
continue;
@@ -10725,136 +10737,6 @@ update:
freesync_capable);
}
-static void amdgpu_dm_set_psr_caps(struct dc_link *link)
-{
- uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
-
- if (!(link->connector_signal & SIGNAL_TYPE_EDP))
- return;
- if (link->type == dc_connection_none)
- return;
- if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
- dpcd_data, sizeof(dpcd_data))) {
- link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
-
- if (dpcd_data[0] == 0) {
- link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
- link->psr_settings.psr_feature_enabled = false;
- } else {
- link->psr_settings.psr_version = DC_PSR_VERSION_1;
- link->psr_settings.psr_feature_enabled = true;
- }
-
- DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
- }
-}
-
-/*
- * amdgpu_dm_link_setup_psr() - configure psr link
- * @stream: stream state
- *
- * Return: true if success
- */
-static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
-{
- struct dc_link *link = NULL;
- struct psr_config psr_config = {0};
- struct psr_context psr_context = {0};
- bool ret = false;
-
- if (stream == NULL)
- return false;
-
- link = stream->link;
-
- psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
-
- if (psr_config.psr_version > 0) {
- psr_config.psr_exit_link_training_required = 0x1;
- psr_config.psr_frame_capture_indication_req = 0;
- psr_config.psr_rfb_setup_time = 0x37;
- psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
- psr_config.allow_smu_optimizations = 0x0;
-
- ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
-
- }
- DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
-
- return ret;
-}
-
-/*
- * amdgpu_dm_psr_enable() - enable psr f/w
- * @stream: stream state
- *
- * Return: true if success
- */
-bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
-{
- struct dc_link *link = stream->link;
- unsigned int vsync_rate_hz = 0;
- struct dc_static_screen_params params = {0};
- /* Calculate number of static frames before generating interrupt to
- * enter PSR.
- */
- // Init fail safe of 2 frames static
- unsigned int num_frames_static = 2;
-
- DRM_DEBUG_DRIVER("Enabling psr...\n");
-
- vsync_rate_hz = div64_u64(div64_u64((
- stream->timing.pix_clk_100hz * 100),
- stream->timing.v_total),
- stream->timing.h_total);
-
- /* Round up
- * Calculate number of frames such that at least 30 ms of time has
- * passed.
- */
- if (vsync_rate_hz != 0) {
- unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
- num_frames_static = (30000 / frame_time_microsec) + 1;
- }
-
- params.triggers.cursor_update = true;
- params.triggers.overlay_update = true;
- params.triggers.surface_update = true;
- params.num_frames = num_frames_static;
-
- dc_stream_set_static_screen_params(link->ctx->dc,
- &stream, 1,
- &params);
-
- return dc_link_set_psr_allow_active(link, true, false, false);
-}
-
-/*
- * amdgpu_dm_psr_disable() - disable psr f/w
- * @stream: stream state
- *
- * Return: true if success
- */
-static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
-{
-
- DRM_DEBUG_DRIVER("Disabling psr...\n");
-
- return dc_link_set_psr_allow_active(stream->link, false, true, false);
-}
-
-/*
- * amdgpu_dm_psr_disable() - disable psr f/w
- * if psr is enabled on any stream
- *
- * Return: true if success
- */
-static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
-{
- DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
- return dc_set_psr_allow_active(dm->dc, false);
-}
-
void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 22730e554209..9522d4ca299e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -509,6 +509,8 @@ struct amdgpu_dm_connector {
struct dsc_preferred_settings dsc_settings;
/* Cached display modes */
struct drm_display_mode freesync_vid_base;
+
+ int psr_skip_count;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
@@ -617,6 +619,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
void amdgpu_dm_init_color_mod(void);
+int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 157fe4efbb59..a022e5bb30a5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -285,6 +285,37 @@ static int __set_input_tf(struct dc_transfer_func *func,
}
/**
+ * Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of
+ * the expected size.
+ * Returns 0 on success.
+ */
+int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
+{
+ const struct drm_color_lut *lut = NULL;
+ uint32_t size = 0;
+
+ lut = __extract_blob_lut(crtc_state->degamma_lut, &size);
+ if (lut && size != MAX_COLOR_LUT_ENTRIES) {
+ DRM_DEBUG_DRIVER(
+ "Invalid Degamma LUT size. Should be %u but got %u.\n",
+ MAX_COLOR_LUT_ENTRIES, size);
+ return -EINVAL;
+ }
+
+ lut = __extract_blob_lut(crtc_state->gamma_lut, &size);
+ if (lut && size != MAX_COLOR_LUT_ENTRIES &&
+ size != MAX_COLOR_LEGACY_LUT_ENTRIES) {
+ DRM_DEBUG_DRIVER(
+ "Invalid Gamma LUT size. Should be %u (or %u for legacy) but got %u.\n",
+ MAX_COLOR_LUT_ENTRIES, MAX_COLOR_LEGACY_LUT_ENTRIES,
+ size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
* @crtc: amdgpu_dm crtc state
*
@@ -317,14 +348,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
bool is_legacy;
int r;
- degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size);
- if (degamma_lut && degamma_size != MAX_COLOR_LUT_ENTRIES)
- return -EINVAL;
+ r = amdgpu_dm_verify_lut_sizes(&crtc->base);
+ if (r)
+ return r;
+ degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size);
regamma_lut = __extract_blob_lut(crtc->base.gamma_lut, &regamma_size);
- if (regamma_lut && regamma_size != MAX_COLOR_LUT_ENTRIES &&
- regamma_size != MAX_COLOR_LEGACY_LUT_ENTRIES)
- return -EINVAL;
has_degamma =
degamma_lut && !__is_lut_linear(degamma_lut, degamma_size);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 64c6ed50990d..e63c6885c757 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -467,13 +467,11 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
display->dig_fe = config->dig_fe;
link->dig_be = config->dig_be;
link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
display->stream_enc_idx = config->stream_enc_idx;
link->link_enc_idx = config->link_enc_idx;
link->phy_idx = config->phy_idx;
link->hdcp_supported_informational = dc_link_is_hdcp14(aconnector->dc_link,
aconnector->dc_sink->sink_signal) ? 1 : 0;
-#endif
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
link->dp.assr_enabled = config->assr_enabled;
link->dp.mst_enabled = config->mst_enabled;
@@ -657,12 +655,10 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
if (dc->ctx->dce_version == DCN_VERSION_3_1) {
hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1;
hdcp_work[i].hdcp.config.psp.caps.opm_state_query_supported = false;
}
-#endif
hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
new file mode 100644
index 000000000000..70a554f1e725
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "amdgpu_dm_psr.h"
+#include "dc.h"
+#include "dm_helpers.h"
+
+/*
+ * amdgpu_dm_set_psr_caps() - set link psr capabilities
+ * @link: link
+ *
+ */
+void amdgpu_dm_set_psr_caps(struct dc_link *link)
+{
+ uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
+
+ if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+ return;
+ if (link->type == dc_connection_none)
+ return;
+ if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
+ dpcd_data, sizeof(dpcd_data))) {
+ link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
+
+ if (dpcd_data[0] == 0) {
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ link->psr_settings.psr_feature_enabled = false;
+ } else {
+ link->psr_settings.psr_version = DC_PSR_VERSION_1;
+ link->psr_settings.psr_feature_enabled = true;
+ }
+
+ DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
+ }
+}
+
+/*
+ * amdgpu_dm_link_setup_psr() - configure psr link
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
+{
+ struct dc_link *link = NULL;
+ struct psr_config psr_config = {0};
+ struct psr_context psr_context = {0};
+ bool ret = false;
+
+ if (stream == NULL)
+ return false;
+
+ link = stream->link;
+
+ psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
+
+ if (psr_config.psr_version > 0) {
+ psr_config.psr_exit_link_training_required = 0x1;
+ psr_config.psr_frame_capture_indication_req = 0;
+ psr_config.psr_rfb_setup_time = 0x37;
+ psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
+ psr_config.allow_smu_optimizations = 0x0;
+
+ ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
+
+ }
+ DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
+
+ return ret;
+}
+
+/*
+ * amdgpu_dm_psr_enable() - enable psr f/w
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
+{
+ struct dc_link *link = stream->link;
+ unsigned int vsync_rate_hz = 0;
+ struct dc_static_screen_params params = {0};
+ /* Calculate number of static frames before generating interrupt to
+ * enter PSR.
+ */
+ // Init fail safe of 2 frames static
+ unsigned int num_frames_static = 2;
+
+ DRM_DEBUG_DRIVER("Enabling psr...\n");
+
+ vsync_rate_hz = div64_u64(div64_u64((
+ stream->timing.pix_clk_100hz * 100),
+ stream->timing.v_total),
+ stream->timing.h_total);
+
+ /* Round up
+ * Calculate number of frames such that at least 30 ms of time has
+ * passed.
+ */
+ if (vsync_rate_hz != 0) {
+ unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
+ num_frames_static = (30000 / frame_time_microsec) + 1;
+ }
+
+ params.triggers.cursor_update = true;
+ params.triggers.overlay_update = true;
+ params.triggers.surface_update = true;
+ params.num_frames = num_frames_static;
+
+ dc_stream_set_static_screen_params(link->ctx->dc,
+ &stream, 1,
+ &params);
+
+ return dc_link_set_psr_allow_active(link, true, false, false);
+}
+
+/*
+ * amdgpu_dm_psr_disable() - disable psr f/w
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
+{
+
+ DRM_DEBUG_DRIVER("Disabling psr...\n");
+
+ return dc_link_set_psr_allow_active(stream->link, false, true, false);
+}
+
+/*
+ * amdgpu_dm_psr_disable() - disable psr f/w
+ * if psr is enabled on any stream
+ *
+ * Return: true if success
+ */
+bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
+{
+ DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
+ return dc_set_psr_allow_active(dm->dc, false);
+}
+
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index b6c976a4d578..6806b3c9c84b 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2020 Advanced Micro Devices, Inc.
+ * Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,39 +19,22 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: AMD
+ *
*/
-#ifndef __SMU_V13_0_1_H__
-#define __SMU_V13_0_1_H__
-
-#include "amdgpu_smu.h"
-
-#define SMU13_0_1_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU13_0_1_DRIVER_IF_VERSION_YELLOW_CARP 0x3
-
-/* MP Apertures */
-#define MP0_Public 0x03800000
-#define MP0_SRAM 0x03900000
-#define MP1_Public 0x03b00000
-#define MP1_SRAM 0x03c00004
-
-/* address block */
-#define smnMP1_FIRMWARE_FLAGS 0x3010024
-
-
-#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
-
-int smu_v13_0_1_check_fw_status(struct smu_context *smu);
-
-int smu_v13_0_1_check_fw_version(struct smu_context *smu);
-int smu_v13_0_1_fini_smc_tables(struct smu_context *smu);
+#ifndef AMDGPU_DM_AMDGPU_DM_PSR_H_
+#define AMDGPU_DM_AMDGPU_DM_PSR_H_
-int smu_v13_0_1_get_vbios_bootup_values(struct smu_context *smu);
+#include "amdgpu.h"
-int smu_v13_0_1_set_default_dpm_tables(struct smu_context *smu);
+/* the number of pageflips before enabling psr */
+#define AMDGPU_DM_PSR_ENTRY_DELAY 5
-int smu_v13_0_1_set_driver_table_location(struct smu_context *smu);
+void amdgpu_dm_set_psr_caps(struct dc_link *link);
+bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
+bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
-int smu_v13_0_1_gfx_off_control(struct smu_context *smu, bool enable);
-#endif
-#endif
+#endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 95aca9b0ef7f..943fcb164876 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -34,10 +34,8 @@ DC_LIBS += dcn30
DC_LIBS += dcn301
DC_LIBS += dcn302
DC_LIBS += dcn303
-ifdef CONFIG_DRM_AMD_DC_DCN3_1
DC_LIBS += dcn31
endif
-endif
DC_LIBS += dce120
@@ -60,7 +58,7 @@ include $(AMD_DC)
DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \
-dc_link_enc_cfg.o
+dc_link_enc_cfg.o dc_link_dpcd.o
ifdef CONFIG_DRM_AMD_DC_DCN
DISPLAY_CORE += dc_vm_helper.o
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 7d1c1b76d8d0..6dbde74c1e06 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -576,13 +576,11 @@ static struct device_id device_type_from_device_id(uint16_t device_id)
result_device_id.device_type = DEVICE_TYPE_LCD;
result_device_id.enum_id = 1;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case ATOM_DISPLAY_LCD2_SUPPORT:
result_device_id.device_type = DEVICE_TYPE_LCD;
result_device_id.enum_id = 2;
break;
-#endif
case ATOM_DISPLAY_DFP1_SUPPORT:
result_device_id.device_type = DEVICE_TYPE_DFP;
@@ -2140,7 +2138,7 @@ static enum bp_result get_integrated_info_v2_1(
info_v2_1->edp1_info.edp_pwr_down_bloff_to_vary_bloff;
info->edp1_info.edp_panel_bpc =
info_v2_1->edp1_info.edp_panel_bpc;
- info->edp1_info.edp_bootup_bl_level =
+ info->edp1_info.edp_bootup_bl_level = info_v2_1->edp1_info.edp_bootup_bl_level;
info->edp2_info.edp_backlight_pwm_hz =
le16_to_cpu(info_v2_1->edp2_info.edp_backlight_pwm_hz);
@@ -2162,7 +2160,6 @@ static enum bp_result get_integrated_info_v2_1(
return BP_RESULT_OK;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
static enum bp_result get_integrated_info_v2_2(
struct bios_parser *bp,
struct integrated_info *info)
@@ -2262,7 +2259,7 @@ static enum bp_result get_integrated_info_v2_2(
return BP_RESULT_OK;
}
-#endif
+
/*
* construct_integrated_info
*
@@ -2310,11 +2307,9 @@ static enum bp_result construct_integrated_info(
case 1:
result = get_integrated_info_v2_1(bp, info);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case 2:
result = get_integrated_info_v2_2(bp, info);
break;
-#endif
default:
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 3ac4dc01f8e1..cb3fd44cb1ed 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -72,13 +72,9 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCN_VERSION_2_1:
case DCN_VERSION_3_0:
case DCN_VERSION_3_01:
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
- case DCN_VERSION_3_1:
- *h = dal_cmd_tbl_helper_dce112_get_table2();
- return true;
-#endif
case DCN_VERSION_3_02:
case DCN_VERSION_3_03:
+ case DCN_VERSION_3_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index 713251547d1c..7fa0b007a7ea 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -135,9 +135,7 @@ endif
AMD_DAL_CLK_MGR_DCN301 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn301/,$(CLK_MGR_DCN301))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN301)
-endif
-ifdef CONFIG_DRM_AMD_DC_DCN3_1
###############################################################################
# DCN31
###############################################################################
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index f652f491c419..bb31541f8072 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -41,9 +41,7 @@
#include "dcn21/rn_clk_mgr.h"
#include "dcn30/dcn30_clk_mgr.h"
#include "dcn301/vg_clk_mgr.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
#include "dcn31/dcn31_clk_mgr.h"
-#endif
int clk_mgr_helper_get_active_display_cnt(
@@ -273,9 +271,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
return &clk_mgr->base.base;
}
break;
-#endif
-
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case FAMILY_YELLOW_CARP: {
struct clk_mgr_dcn31 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
@@ -325,12 +320,10 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
vg_clk_mgr_destroy(clk_mgr);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case FAMILY_YELLOW_CARP:
if (ASICREV_IS_YELLOW_CARP(clk_mgr_base->ctx->asic_id.hw_internal_rev))
dcn31_clk_mgr_destroy(clk_mgr);
break;
-#endif
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 59d17195bc22..6e0c5c664fdc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -123,7 +123,7 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
}
}
-void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr)
+void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct dc_state *context)
{
int dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz;
@@ -132,6 +132,68 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr)
uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider);
uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider);
+ uint32_t current_dispclk_wdivider;
+ uint32_t i;
+
+ REG_GET(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, &current_dispclk_wdivider);
+
+ /* When changing divider to or from 127, some extra programming is required to prevent corruption */
+ if (current_dispclk_wdivider == 127 && dispclk_wdivider != 127) {
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ uint32_t fifo_level;
+ struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
+ struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ int32_t N;
+ int32_t j;
+
+ if (!pipe_ctx->stream)
+ continue;
+ /* Virtual encoders don't have this function */
+ if (!stream_enc->funcs->get_fifo_cal_average_level)
+ continue;
+ fifo_level = stream_enc->funcs->get_fifo_cal_average_level(
+ stream_enc);
+ N = fifo_level / 4;
+ dccg->funcs->set_fifo_errdet_ovr_en(
+ dccg,
+ true);
+ for (j = 0; j < N - 4; j++)
+ dccg->funcs->otg_drop_pixel(
+ dccg,
+ pipe_ctx->stream_res.tg->inst);
+ dccg->funcs->set_fifo_errdet_ovr_en(
+ dccg,
+ false);
+ }
+ } else if (dispclk_wdivider == 127 && current_dispclk_wdivider != 127) {
+ REG_UPDATE(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, 126);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 100);
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
+ struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ uint32_t fifo_level;
+ int32_t N;
+ int32_t j;
+
+ if (!pipe_ctx->stream)
+ continue;
+ /* Virtual encoders don't have this function */
+ if (!stream_enc->funcs->get_fifo_cal_average_level)
+ continue;
+ fifo_level = stream_enc->funcs->get_fifo_cal_average_level(
+ stream_enc);
+ N = fifo_level / 4;
+ dccg->funcs->set_fifo_errdet_ovr_en(dccg, true);
+ for (j = 0; j < 12 - N; j++)
+ dccg->funcs->otg_add_pixel(dccg,
+ pipe_ctx->stream_res.tg->inst);
+ dccg->funcs->set_fifo_errdet_ovr_en(dccg, false);
+ }
+ }
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
@@ -251,11 +313,11 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (dpp_clock_lowered) {
// if clock is being lowered, increase DTO before lowering refclk
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
- dcn20_update_clocks_update_dentist(clk_mgr);
+ dcn20_update_clocks_update_dentist(clk_mgr, context);
} else {
// if clock is being raised, increase refclk before lowering DTO
if (update_dppclk || update_dispclk)
- dcn20_update_clocks_update_dentist(clk_mgr);
+ dcn20_update_clocks_update_dentist(clk_mgr, context);
// always update dtos unless clock is lowered and not safe to lower
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
@@ -324,10 +386,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
// Both fclk and ref_dppclk run on the same scemi clock.
clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
/* TODO: set dtbclk in correct place */
clk_mgr->clks.dtbclk_en = false;
-#endif
dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
index 0b9c045b0c8e..d254d0b6fba1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
@@ -50,7 +50,8 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg);
-void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr);
+void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context);
void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 652fa89fae5f..513676a6f52b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -334,11 +334,11 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
if (dpp_clock_lowered) {
/* if clock is being lowered, increase DTO before lowering refclk */
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
- dcn20_update_clocks_update_dentist(clk_mgr);
+ dcn20_update_clocks_update_dentist(clk_mgr, context);
} else {
/* if clock is being raised, increase refclk before lowering DTO */
if (update_dppclk || update_dispclk)
- dcn20_update_clocks_update_dentist(clk_mgr);
+ dcn20_update_clocks_update_dentist(clk_mgr, context);
/* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
* that we do not lower dto when it is not safe to lower. We do not need to
* compare the current and new dppclk before calling this function.*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index 66db5e988bc1..dad4a4c18bcf 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -31,8 +31,8 @@
#include "dcn31_smu.h"
#include "yellow_carp_offset.h"
-#include "mp/mp_13_0_1_offset.h"
-#include "mp/mp_13_0_1_sh_mask.h"
+#include "mp/mp_13_0_2_offset.h"
+#include "mp/mp_13_0_2_sh_mask.h"
#define REG(reg_name) \
(MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index be1dbd3136ec..605e297b7a59 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1524,7 +1524,7 @@ static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
return stream_mask;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
void dc_z10_restore(struct dc *dc)
{
if (dc->hwss.z10_restore)
@@ -1544,9 +1544,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
#if defined(CONFIG_DRM_AMD_DC_DCN)
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
dc_z10_restore(dc);
-#endif
dc_allow_idle_optimizations(dc, false);
#endif
@@ -2626,7 +2624,7 @@ static void commit_planes_for_stream(struct dc *dc,
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_restore(dc);
#endif
@@ -3085,7 +3083,7 @@ void dc_set_power_state(
case DC_ACPI_CM_POWER_STATE_D0:
dc_resource_state_construct(dc, dc->current_state);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_restore(dc);
#endif
if (dc->ctx->dmub_srv)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 15f987a63025..9039fb134db5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -409,13 +409,13 @@ void get_surface_tile_visual_confirm_color(
struct tg_color *color)
{
uint32_t color_value = MAX_TG_COLOR_VALUE;
- /* Determine the overscan color based on the top-most (desktop) plane's context */
- struct pipe_ctx *top_pipe_ctx = pipe_ctx;
+ /* Determine the overscan color based on the bottom-most plane's context */
+ struct pipe_ctx *bottom_pipe_ctx = pipe_ctx;
- while (top_pipe_ctx->top_pipe != NULL)
- top_pipe_ctx = top_pipe_ctx->top_pipe;
+ while (bottom_pipe_ctx->bottom_pipe != NULL)
+ bottom_pipe_ctx = bottom_pipe_ctx->bottom_pipe;
- switch (top_pipe_ctx->plane_state->tiling_info.gfx9.swizzle) {
+ switch (bottom_pipe_ctx->plane_state->tiling_info.gfx9.swizzle) {
case DC_SW_LINEAR:
/* LINEAR Surface - set border color to red */
color->color_r_cr = color_value;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 0f91280883a6..6132b645bfd1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -49,6 +49,7 @@
#include "dmub/dmub_srv.h"
#include "inc/hw/panel_cntl.h"
#include "inc/link_enc_cfg.h"
+#include "inc/link_dpcd.h"
#define DC_LOGGER_INIT(logger)
@@ -59,20 +60,6 @@
#define RETIMER_REDRIVER_INFO(...) \
DC_LOG_RETIMER_REDRIVER( \
__VA_ARGS__)
-/*******************************************************************************
- * Private structures
- ******************************************************************************/
-
-enum {
- PEAK_FACTOR_X1000 = 1006,
- /*
- * Some receivers fail to train on first try and are good
- * on subsequent tries. 2 retries should be plenty. If we
- * don't have a successful training then we don't expect to
- * ever get one.
- */
- LINK_TRAINING_MAX_VERIFY_RETRY = 2
-};
/*******************************************************************************
* Private functions
@@ -718,11 +705,9 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
static bool detect_dp(struct dc_link *link,
struct display_sink_capability *sink_caps,
- bool *converter_disable_audio,
- struct audio_support *audio_support,
enum dc_detect_reason reason)
{
- bool boot = false;
+ struct audio_support *audio_support = &link->dc->res_pool->audio_support;
sink_caps->signal = link_detect_sink(link, reason);
sink_caps->transaction_type =
@@ -745,60 +730,12 @@ static bool detect_dp(struct dc_link *link,
* of this function). */
query_hdcp_capability(SIGNAL_TYPE_DISPLAY_PORT_MST, link);
#endif
- /*
- * This call will initiate MST topology discovery. Which
- * will detect MST ports and add new DRM connector DRM
- * framework. Then read EDID via remote i2c over aux. In
- * the end, will notify DRM detect result and save EDID
- * into DRM framework.
- *
- * .detect is called by .fill_modes.
- * .fill_modes is called by user mode ioctl
- * DRM_IOCTL_MODE_GETCONNECTOR.
- *
- * .get_modes is called by .fill_modes.
- *
- * call .get_modes, AMDGPU DM implementation will create
- * new dc_sink and add to dc_link. For long HPD plug
- * in/out, MST has its own handle.
- *
- * Therefore, just after dc_create, link->sink is not
- * created for MST until user mode app calls
- * DRM_IOCTL_MODE_GETCONNECTOR.
- *
- * Need check ->sink usages in case ->sink = NULL
- * TODO: s3 resume check
- */
- if (reason == DETECT_REASON_BOOT)
- boot = true;
-
- dm_helpers_dp_update_branch_info(link->ctx, link);
-
- if (!dm_helpers_dp_mst_start_top_mgr(link->ctx,
- link, boot)) {
- /* MST not supported */
- link->type = dc_connection_single;
- sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
- }
}
if (link->type != dc_connection_mst_branch &&
- is_dp_branch_device(link)) {
+ is_dp_branch_device(link))
/* DP SST branch */
link->type = dc_connection_sst_branch;
- if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
- /*
- * SST branch unplug processing for short irq
- */
- link_disconnect_sink(link);
- return true;
- }
-
- if (is_dp_active_dongle(link) &&
- (link->dpcd_caps.dongle_type !=
- DISPLAY_DONGLE_DP_HDMI_CONVERTER))
- *converter_disable_audio = true;
- }
} else {
/* DP passive dongles */
sink_caps->signal = dp_passive_dongle_detection(link->ddc,
@@ -884,7 +821,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
{
struct dc_sink_init_data sink_init_data = { 0 };
struct display_sink_capability sink_caps = { 0 };
- uint8_t i;
+ uint32_t i;
bool converter_disable_audio = false;
struct audio_support *aud_support = &link->dc->res_pool->audio_support;
bool same_edid = false;
@@ -893,7 +830,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
struct dc_sink *sink = NULL;
struct dc_sink *prev_sink = NULL;
struct dpcd_caps prev_dpcd_caps;
- bool same_dpcd = true;
enum dc_connection_type new_connection_type = dc_connection_none;
enum dc_connection_type pre_connection_type = dc_connection_none;
bool perform_dp_seamless_boot = false;
@@ -904,9 +840,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
if (dc_is_virtual_signal(link->connector_signal))
return false;
- if ((link->connector_signal == SIGNAL_TYPE_LVDS ||
- link->connector_signal == SIGNAL_TYPE_EDP) &&
- link->local_sink) {
+ if (((link->connector_signal == SIGNAL_TYPE_LVDS ||
+ link->connector_signal == SIGNAL_TYPE_EDP) &&
+ (!link->dc->config.allow_edp_hotplug_detection)) &&
+ link->local_sink) {
// need to re-write OUI and brightness in resume case
if (link->connector_signal == SIGNAL_TYPE_EDP) {
dpcd_set_source_specific_data(link);
@@ -983,20 +920,59 @@ static bool dc_link_detect_helper(struct dc_link *link,
return false;
}
- if (!detect_dp(link, &sink_caps,
- &converter_disable_audio,
- aud_support, reason)) {
+ if (!detect_dp(link, &sink_caps, reason)) {
if (prev_sink)
dc_sink_release(prev_sink);
return false;
}
- // Check if dpcp block is the same
- if (prev_sink) {
- if (memcmp(&link->dpcd_caps, &prev_dpcd_caps,
- sizeof(struct dpcd_caps)))
- same_dpcd = false;
+ if (link->type == dc_connection_mst_branch) {
+ LINK_INFO("link=%d, mst branch is now Connected\n",
+ link->link_index);
+ /* Need to setup mst link_cap struct here
+ * otherwise dc_link_detect() will leave mst link_cap
+ * empty which leads to allocate_mst_payload() has "0"
+ * pbn_per_slot value leading to exception on dc_fixpt_div()
+ */
+ dp_verify_mst_link_cap(link);
+
+ /*
+ * This call will initiate MST topology discovery. Which
+ * will detect MST ports and add new DRM connector DRM
+ * framework. Then read EDID via remote i2c over aux. In
+ * the end, will notify DRM detect result and save EDID
+ * into DRM framework.
+ *
+ * .detect is called by .fill_modes.
+ * .fill_modes is called by user mode ioctl
+ * DRM_IOCTL_MODE_GETCONNECTOR.
+ *
+ * .get_modes is called by .fill_modes.
+ *
+ * call .get_modes, AMDGPU DM implementation will create
+ * new dc_sink and add to dc_link. For long HPD plug
+ * in/out, MST has its own handle.
+ *
+ * Therefore, just after dc_create, link->sink is not
+ * created for MST until user mode app calls
+ * DRM_IOCTL_MODE_GETCONNECTOR.
+ *
+ * Need check ->sink usages in case ->sink = NULL
+ * TODO: s3 resume check
+ */
+
+ dm_helpers_dp_update_branch_info(link->ctx, link);
+ if (dm_helpers_dp_mst_start_top_mgr(link->ctx,
+ link, reason == DETECT_REASON_BOOT)) {
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ return false;
+ } else {
+ link->type = dc_connection_sst_branch;
+ sink_caps.signal = SIGNAL_TYPE_DISPLAY_PORT;
+ }
}
+
/* Active SST downstream branch device unplug*/
if (link->type == dc_connection_sst_branch &&
link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
@@ -1006,31 +982,23 @@ static bool dc_link_detect_helper(struct dc_link *link,
return true;
}
+ /* disable audio for non DP to HDMI active sst converter */
+ if (link->type == dc_connection_sst_branch &&
+ is_dp_active_dongle(link) &&
+ (link->dpcd_caps.dongle_type !=
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER))
+ converter_disable_audio = true;
+
// link switch from MST to non-MST stop topology manager
if (pre_connection_type == dc_connection_mst_branch &&
- link->type != dc_connection_mst_branch) {
+ link->type != dc_connection_mst_branch)
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
- }
- if (link->type == dc_connection_mst_branch) {
- LINK_INFO("link=%d, mst branch is now Connected\n",
- link->link_index);
- /* Need to setup mst link_cap struct here
- * otherwise dc_link_detect() will leave mst link_cap
- * empty which leads to allocate_mst_payload() has "0"
- * pbn_per_slot value leading to exception on dc_fixpt_div()
- */
- dp_verify_mst_link_cap(link);
-
- if (prev_sink)
- dc_sink_release(prev_sink);
- return false;
- }
// For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified.
if (reason == DETECT_REASON_BOOT &&
- !dc_ctx->dc->config.power_down_display_on_boot &&
- link->link_status.link_active)
+ !dc_ctx->dc->config.power_down_display_on_boot &&
+ link->link_status.link_active)
perform_dp_seamless_boot = true;
if (perform_dp_seamless_boot) {
@@ -1213,11 +1181,11 @@ static bool dc_link_detect_helper(struct dc_link *link,
link->dongle_max_pix_clk = 0;
}
- LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n",
+ LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n",
link->link_index, sink,
(sink_caps.signal ==
SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"),
- prev_sink, same_dpcd, same_edid);
+ prev_sink, same_edid);
if (prev_sink)
dc_sink_release(prev_sink);
@@ -1501,7 +1469,8 @@ static bool dc_link_construct(struct dc_link *link,
link->connector_signal = SIGNAL_TYPE_EDP;
if (link->hpd_gpio) {
- link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ if (!link->dc->config.allow_edp_hotplug_detection)
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
link->irq_source_hpd_rx =
dal_irq_get_rx_source(link->hpd_gpio);
}
@@ -2692,7 +2661,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active,
return false;
link->psr_settings.psr_allow_active = allow_active;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!allow_active)
dc_z10_restore(dc);
#endif
@@ -2873,7 +2842,7 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->psr_level.u32all = 0;
/*skip power down the single pipe since it blocks the cstate*/
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (link->ctx->asic_id.chip_family >= FAMILY_RV) {
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
if (link->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && !dc->debug.disable_z10)
@@ -3222,7 +3191,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
/*stream_enc_inst*/
config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
config.link_enc_idx = pipe_ctx->stream->link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
config.phy_idx = pipe_ctx->stream->link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 919c94de2a20..6da226bf11d5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -25,6 +25,8 @@ static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA";
link->ctx->logger
#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+#include "link_dpcd.h"
+
/* maximum pre emphasis level allowed for each voltage swing level*/
static const enum dc_pre_emphasis
voltage_swing_to_pre_emphasis[] = { PRE_EMPHASIS_LEVEL3,
@@ -1663,6 +1665,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
uint8_t repeater_cnt;
uint8_t repeater_id;
+ uint8_t lane = 0;
if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
start_clock_recovery_pattern_early(link, lt_settings, DPRX);
@@ -1693,6 +1696,9 @@ static enum link_training_result dp_perform_8b_10b_link_training(
repeater_training_done(link, repeater_id);
}
+
+ for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++)
+ lt_settings->lane_settings[lane].VOLTAGE_SWING = VOLTAGE_SWING_LEVEL0;
}
if (status == LINK_TRAINING_SUCCESS) {
@@ -1755,42 +1761,6 @@ enum link_training_result dc_link_dp_perform_link_training(
return status;
}
-static enum dp_panel_mode try_enable_assr(struct dc_stream_state *stream)
-{
- struct dc_link *link = stream->link;
- enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
- struct cp_psp *cp_psp = &stream->ctx->cp_psp;
-#endif
-
- /* ASSR must be supported on the panel */
- if (panel_mode == DP_PANEL_MODE_DEFAULT)
- return panel_mode;
-
- /* eDP or internal DP only */
- if (link->connector_signal != SIGNAL_TYPE_EDP &&
- !(link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- link->is_internal_display))
- return DP_PANEL_MODE_DEFAULT;
-
-#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (cp_psp && cp_psp->funcs.enable_assr) {
- if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {
- /* since eDP implies ASSR on, change panel
- * mode to disable ASSR
- */
- panel_mode = DP_PANEL_MODE_DEFAULT;
- }
- } else
- panel_mode = DP_PANEL_MODE_DEFAULT;
-
-#else
- /* turn off ASSR if the implementation is not compiled in */
- panel_mode = DP_PANEL_MODE_DEFAULT;
-#endif
- return panel_mode;
-}
-
bool perform_link_training_with_retries(
const struct dc_link_settings *link_setting,
bool skip_video_pattern,
@@ -1799,14 +1769,14 @@ bool perform_link_training_with_retries(
enum signal_type signal,
bool do_fallback)
{
- uint8_t j;
+ int j;
uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- enum dp_panel_mode panel_mode;
+ enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
struct link_encoder *link_enc;
enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
- struct dc_link_settings currnet_setting = *link_setting;
+ struct dc_link_settings current_setting = *link_setting;
/* Dynamically assigned link encoders associated with stream rather than
* link.
@@ -1815,7 +1785,6 @@ bool perform_link_training_with_retries(
link_enc = stream->link_enc;
else
link_enc = link->link_enc;
- ASSERT(link_enc);
/* We need to do this before the link training to ensure the idle pattern in SST
* mode will be sent right after the link training
@@ -1832,7 +1801,7 @@ bool perform_link_training_with_retries(
link,
signal,
pipe_ctx->clock_source->id,
- &currnet_setting);
+ &current_setting);
if (stream->sink_patches.dppowerup_delay > 0) {
int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
@@ -1840,19 +1809,31 @@ bool perform_link_training_with_retries(
msleep(delay_dp_power_up_in_ms);
}
- panel_mode = try_enable_assr(stream);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (panel_mode == DP_PANEL_MODE_EDP) {
+ struct cp_psp *cp_psp = &stream->ctx->cp_psp;
+
+ if (cp_psp && cp_psp->funcs.enable_assr) {
+ if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {
+ /* since eDP implies ASSR on, change panel
+ * mode to disable ASSR
+ */
+ panel_mode = DP_PANEL_MODE_DEFAULT;
+ }
+ } else
+ panel_mode = DP_PANEL_MODE_DEFAULT;
+ }
+#endif
+
dp_set_panel_mode(link, panel_mode);
- DC_LOG_DETECTION_DP_CAPS("Link: %d ASSR enabled: %d\n",
- link->link_index,
- panel_mode != DP_PANEL_MODE_DEFAULT);
if (link->aux_access_disabled) {
- dc_link_dp_perform_link_training_skip_aux(link, &currnet_setting);
+ dc_link_dp_perform_link_training_skip_aux(link, &current_setting);
return true;
} else {
status = dc_link_dp_perform_link_training(
link,
- &currnet_setting,
+ &current_setting,
skip_video_pattern);
if (status == LINK_TRAINING_SUCCESS)
return true;
@@ -1872,12 +1853,12 @@ bool perform_link_training_with_retries(
if (status == LINK_TRAINING_ABORT)
break;
else if (do_fallback) {
- decide_fallback_link_setting(*link_setting, &currnet_setting, status);
+ decide_fallback_link_setting(*link_setting, &current_setting, status);
/* Fail link training if reduced link bandwidth no longer meets
* stream requirements.
*/
if (dc_bandwidth_in_kbps_from_timing(&stream->timing) <
- dc_link_bandwidth_kbps(link, &currnet_setting))
+ dc_link_bandwidth_kbps(link, &current_setting))
break;
}
@@ -2326,7 +2307,7 @@ bool dp_verify_link_cap_with_retries(
struct dc_link_settings *known_limit_link_setting,
int attempts)
{
- uint8_t i = 0;
+ int i = 0;
bool success = false;
for (i = 0; i < attempts; i++) {
@@ -3619,79 +3600,16 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
return true;
}
-static bool retrieve_link_cap(struct dc_link *link)
+bool dp_retrieve_lttpr_cap(struct dc_link *link)
{
- /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
- * which means size 16 will be good for both of those DPCD register block reads
- */
- uint8_t dpcd_data[16];
uint8_t lttpr_dpcd_data[6];
-
- /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
- */
- uint8_t dpcd_dprx_data = '\0';
- uint8_t dpcd_power_state = '\0';
-
- struct dp_device_vendor_id sink_id;
- union down_stream_port_count down_strm_port_count;
- union edp_configuration_cap edp_config_cap;
- union dp_downstream_port_present ds_port = { 0 };
- enum dc_status status = DC_ERROR_UNEXPECTED;
- uint32_t read_dpcd_retry_cnt = 3;
- int i;
- struct dp_sink_hw_fw_revision dp_hw_fw_revision;
- bool is_lttpr_present = false;
- const uint32_t post_oui_delay = 30; // 30ms
bool vbios_lttpr_enable = false;
bool vbios_lttpr_interop = false;
struct dc_bios *bios = link->dc->ctx->dc_bios;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ bool is_lttpr_present = false;
- memset(dpcd_data, '\0', sizeof(dpcd_data));
memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
- memset(&down_strm_port_count,
- '\0', sizeof(union down_stream_port_count));
- memset(&edp_config_cap, '\0',
- sizeof(union edp_configuration_cap));
-
- /* if extended timeout is supported in hardware,
- * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
- * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
- */
- dc_link_aux_try_to_configure_timeout(link->ddc,
- LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
-
- status = core_link_read_dpcd(link, DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
-
- /* Delay 1 ms if AUX CH is in power down state. Based on spec
- * section 2.3.1.2, if AUX CH may be powered down due to
- * write to DPCD 600h = 2. Sink AUX CH is monitoring differential
- * signal and may need up to 1 ms before being able to reply.
- */
- if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3)
- udelay(1000);
-
- dpcd_set_source_specific_data(link);
- /* Sink may need to configure internals based on vendor, so allow some
- * time before proceeding with possibly vendor specific transactions
- */
- msleep(post_oui_delay);
-
- for (i = 0; i < read_dpcd_retry_cnt; i++) {
- status = core_link_read_dpcd(
- link,
- DP_DPCD_REV,
- dpcd_data,
- sizeof(dpcd_data));
- if (status == DC_OK)
- break;
- }
-
- if (status != DC_OK) {
- dm_error("%s: Read dpcd data failed.\n", __func__);
- return false;
- }
-
/* Query BIOS to determine if LTTPR functionality is forced on by system */
if (bios->funcs->get_lttpr_caps) {
enum bp_result bp_query_result;
@@ -3737,6 +3655,10 @@ static bool retrieve_link_cap(struct dc_link *link)
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
lttpr_dpcd_data,
sizeof(lttpr_dpcd_data));
+ if (status != DC_OK) {
+ dm_error("%s: Read LTTPR caps data failed.\n", __func__);
+ return false;
+ }
link->dpcd_caps.lttpr_caps.revision.raw =
lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
@@ -3763,21 +3685,91 @@ static bool retrieve_link_cap(struct dc_link *link)
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
- is_lttpr_present = (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0 &&
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt < 0xff &&
+ is_lttpr_present = (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
- if (is_lttpr_present)
+ if (is_lttpr_present) {
CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
- else
+ configure_lttpr_mode_transparent(link);
+ } else
link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
}
+ return is_lttpr_present;
+}
+
+static bool retrieve_link_cap(struct dc_link *link)
+{
+ /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
+ * which means size 16 will be good for both of those DPCD register block reads
+ */
+ uint8_t dpcd_data[16];
+ /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
+ */
+ uint8_t dpcd_dprx_data = '\0';
+ uint8_t dpcd_power_state = '\0';
+
+ struct dp_device_vendor_id sink_id;
+ union down_stream_port_count down_strm_port_count;
+ union edp_configuration_cap edp_config_cap;
+ union dp_downstream_port_present ds_port = { 0 };
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ uint32_t read_dpcd_retry_cnt = 3;
+ int i;
+ struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+ const uint32_t post_oui_delay = 30; // 30ms
+ bool is_lttpr_present = false;
+
+ memset(dpcd_data, '\0', sizeof(dpcd_data));
+ memset(&down_strm_port_count,
+ '\0', sizeof(union down_stream_port_count));
+ memset(&edp_config_cap, '\0',
+ sizeof(union edp_configuration_cap));
+
+ /* if extended timeout is supported in hardware,
+ * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
+ * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
+ */
+ dc_link_aux_try_to_configure_timeout(link->ddc,
+ LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
+
+ is_lttpr_present = dp_retrieve_lttpr_cap(link);
+
+ status = core_link_read_dpcd(link, DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+
+ /* Delay 1 ms if AUX CH is in power down state. Based on spec
+ * section 2.3.1.2, if AUX CH may be powered down due to
+ * write to DPCD 600h = 2. Sink AUX CH is monitoring differential
+ * signal and may need up to 1 ms before being able to reply.
+ */
+ if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3)
+ udelay(1000);
+
+ dpcd_set_source_specific_data(link);
+ /* Sink may need to configure internals based on vendor, so allow some
+ * time before proceeding with possibly vendor specific transactions
+ */
+ msleep(post_oui_delay);
+
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+ if (status == DC_OK)
+ break;
+ }
+
+ if (status != DC_OK) {
+ dm_error("%s: Read receiver caps dpcd data failed.\n", __func__);
+ return false;
+ }
if (!is_lttpr_present)
dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
-
{
union training_aux_rd_interval aux_rd_interval;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
new file mode 100644
index 000000000000..fe234760a0f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
@@ -0,0 +1,218 @@
+#include <inc/core_status.h>
+#include <dc_link.h>
+#include <inc/link_hwss.h>
+#include <inc/link_dpcd.h>
+#include "drm/drm_dp_helper.h"
+#include <dc_dp_types.h>
+#include "dm_helpers.h"
+
+#define END_ADDRESS(start, size) (start + size - 1)
+#define ADDRESS_RANGE_SIZE(start, end) (end - start + 1)
+struct dpcd_address_range {
+ uint32_t start;
+ uint32_t end;
+};
+
+static enum dc_status internal_link_read_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size)
+{
+ if (!link->aux_access_disabled &&
+ !dm_helpers_dp_read_dpcd(link->ctx,
+ link, address, data, size)) {
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ return DC_OK;
+}
+
+static enum dc_status internal_link_write_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size)
+{
+ if (!link->aux_access_disabled &&
+ !dm_helpers_dp_write_dpcd(link->ctx,
+ link, address, data, size)) {
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ return DC_OK;
+}
+
+/*
+ * Partition the entire DPCD address space
+ * XXX: This partitioning must cover the entire DPCD address space,
+ * and must contain no gaps or overlapping address ranges.
+ */
+static const struct dpcd_address_range mandatory_dpcd_partitions[] = {
+ { 0, DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR1) - 1},
+ { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR1), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR2) - 1 },
+ { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR2), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR3) - 1 },
+ { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR3), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR4) - 1 },
+ { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR4), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR5) - 1 },
+ { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR5), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR6) - 1 },
+ { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR6), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR7) - 1 },
+ { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR7), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR8) - 1 },
+ { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR8), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1) - 1 },
+ /*
+ * The FEC registers are contiguous
+ */
+ { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1) - 1 },
+ { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR2), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR2) - 1 },
+ { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR3), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR3) - 1 },
+ { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR4), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR4) - 1 },
+ { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR5), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR5) - 1 },
+ { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR6), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR6) - 1 },
+ { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR7), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR7) - 1 },
+ { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR8), DP_LTTPR_MAX_ADD },
+ /* all remaining DPCD addresses */
+ { DP_LTTPR_MAX_ADD + 1, DP_DPCD_MAX_ADD } };
+
+static inline bool do_addresses_intersect_with_range(
+ const struct dpcd_address_range *range,
+ const uint32_t start_address,
+ const uint32_t end_address)
+{
+ return start_address <= range->end && end_address >= range->start;
+}
+
+static uint32_t dpcd_get_next_partition_size(const uint32_t address, const uint32_t size)
+{
+ const uint32_t end_address = END_ADDRESS(address, size);
+ uint32_t partition_iterator = 0;
+
+ /*
+ * find current partition
+ * this loop spins forever if partition map above is not surjective
+ */
+ while (!do_addresses_intersect_with_range(&mandatory_dpcd_partitions[partition_iterator],
+ address, end_address))
+ partition_iterator++;
+ if (end_address < mandatory_dpcd_partitions[partition_iterator].end)
+ return size;
+ return ADDRESS_RANGE_SIZE(address, mandatory_dpcd_partitions[partition_iterator].end);
+}
+
+/*
+ * Ranges of DPCD addresses that must be read in a single transaction
+ * XXX: Do not allow any two address ranges in this array to overlap
+ */
+static const struct dpcd_address_range mandatory_dpcd_blocks[] = {
+ { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT }};
+
+/*
+ * extend addresses to read all mandatory blocks together
+ */
+static void dpcd_extend_address_range(
+ const uint32_t in_address,
+ uint8_t * const in_data,
+ const uint32_t in_size,
+ uint32_t *out_address,
+ uint8_t **out_data,
+ uint32_t *out_size)
+{
+ const uint32_t end_address = END_ADDRESS(in_address, in_size);
+ const struct dpcd_address_range *addr_range;
+ struct dpcd_address_range new_addr_range;
+ uint32_t i;
+
+ new_addr_range.start = in_address;
+ new_addr_range.end = end_address;
+ for (i = 0; i < ARRAY_SIZE(mandatory_dpcd_blocks); i++) {
+ addr_range = &mandatory_dpcd_blocks[i];
+ if (addr_range->start <= in_address && addr_range->end >= in_address)
+ new_addr_range.start = addr_range->start;
+
+ if (addr_range->start <= end_address && addr_range->end >= end_address)
+ new_addr_range.end = addr_range->end;
+ }
+ *out_address = in_address;
+ *out_size = in_size;
+ *out_data = in_data;
+ if (new_addr_range.start != in_address || new_addr_range.end != end_address) {
+ *out_address = new_addr_range.start;
+ *out_size = ADDRESS_RANGE_SIZE(new_addr_range.start, new_addr_range.end);
+ *out_data = kzalloc(*out_size * sizeof(**out_data), GFP_KERNEL);
+ }
+}
+
+/*
+ * Reduce the AUX reply down to the values the caller requested
+ */
+static void dpcd_reduce_address_range(
+ const uint32_t extended_address,
+ uint8_t * const extended_data,
+ const uint32_t extended_size,
+ const uint32_t reduced_address,
+ uint8_t * const reduced_data,
+ const uint32_t reduced_size)
+{
+ const uint32_t reduced_end_address = END_ADDRESS(reduced_address, reduced_size);
+ const uint32_t extended_end_address = END_ADDRESS(extended_address, extended_size);
+ const uint32_t offset = reduced_address - extended_address;
+
+ if (extended_end_address == reduced_end_address && extended_address == reduced_address)
+ return; /* extended and reduced address ranges point to the same data */
+
+ memcpy(&extended_data[offset], reduced_data, reduced_size);
+ kfree(extended_data);
+}
+
+enum dc_status core_link_read_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size)
+{
+ uint32_t extended_address;
+ uint32_t partitioned_address;
+ uint8_t *extended_data;
+ uint32_t extended_size;
+ /* size of the remaining partitioned address space */
+ uint32_t size_left_to_read;
+ enum dc_status status;
+ /* size of the next partition to be read from */
+ uint32_t partition_size;
+ uint32_t data_index = 0;
+
+ dpcd_extend_address_range(address, data, size, &extended_address, &extended_data, &extended_size);
+ partitioned_address = extended_address;
+ size_left_to_read = extended_size;
+ while (size_left_to_read) {
+ partition_size = dpcd_get_next_partition_size(partitioned_address, size_left_to_read);
+ status = internal_link_read_dpcd(link, partitioned_address, &extended_data[data_index], partition_size);
+ if (status != DC_OK)
+ break;
+ partitioned_address += partition_size;
+ data_index += partition_size;
+ size_left_to_read -= partition_size;
+ }
+ dpcd_reduce_address_range(extended_address, extended_data, extended_size, address, data, size);
+ return status;
+}
+
+enum dc_status core_link_write_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size)
+{
+ uint32_t partition_size;
+ uint32_t data_index = 0;
+ enum dc_status status;
+
+ while (size) {
+ partition_size = dpcd_get_next_partition_size(address, size);
+ status = internal_link_write_dpcd(link, address, &data[data_index], partition_size);
+ if (status != DC_OK)
+ break;
+ address += partition_size;
+ data_index += partition_size;
+ size -= partition_size;
+ }
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index f7dfc8fefdfa..9c51cd09dcf1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -16,6 +16,7 @@
#include "resource.h"
#include "link_enc_cfg.h"
#include "clk_mgr.h"
+#include "inc/link_dpcd.h"
static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
{
@@ -47,36 +48,6 @@ static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset
return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset);
}
-enum dc_status core_link_read_dpcd(
- struct dc_link *link,
- uint32_t address,
- uint8_t *data,
- uint32_t size)
-{
- if (!link->aux_access_disabled &&
- !dm_helpers_dp_read_dpcd(link->ctx,
- link, address, data, size)) {
- return DC_ERROR_UNEXPECTED;
- }
-
- return DC_OK;
-}
-
-enum dc_status core_link_write_dpcd(
- struct dc_link *link,
- uint32_t address,
- const uint8_t *data,
- uint32_t size)
-{
- if (!link->aux_access_disabled &&
- !dm_helpers_dp_write_dpcd(link->ctx,
- link, address, data, size)) {
- return DC_ERROR_UNEXPECTED;
- }
-
- return DC_OK;
-}
-
void dp_receiver_power_ctrl(struct dc_link *link, bool on)
{
uint8_t state;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 5fe4c5f80b54..a6a67244a322 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -58,9 +58,7 @@
#include "dcn301/dcn301_resource.h"
#include "dcn302/dcn302_resource.h"
#include "dcn303/dcn303_resource.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
-#include "../dcn31/dcn31_resource.h"
-#endif
+#include "dcn31/dcn31_resource.h"
#endif
#define DC_LOGGER_INIT(logger)
@@ -141,9 +139,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
case FAMILY_VGH:
dc_version = DCN_VERSION_3_01;
break;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case FAMILY_YELLOW_CARP:
if (ASICREV_IS_YELLOW_CARP(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_3_1;
@@ -233,12 +229,10 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
case DCN_VERSION_3_03:
res_pool = dcn303_create_resource_pool(init_data, dc);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case DCN_VERSION_3_1:
res_pool = dcn31_create_resource_pool(init_data, dc);
break;
#endif
-#endif
default:
break;
}
@@ -445,7 +439,7 @@ bool resource_are_vblanks_synchronizable(
{
uint32_t base60_refresh_rates[] = {10, 20, 5};
uint8_t i;
- uint8_t rr_count = sizeof(base60_refresh_rates)/sizeof(base60_refresh_rates[0]);
+ uint8_t rr_count = ARRAY_SIZE(base60_refresh_rates);
uint64_t frame_time_diff;
if (stream1->ctx->dc->config.vblank_alignment_dto_params &&
@@ -2142,7 +2136,7 @@ enum dc_status dc_validate_global_state(
if (!new_ctx)
return DC_ERROR_UNEXPECTED;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/*
* Update link encoder to stream assignment.
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 5420fda47bb7..45931ee14a6e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -294,9 +294,7 @@ bool dc_stream_set_cursor_attributes(
stream->cursor_attributes = *attributes;
#if defined(CONFIG_DRM_AMD_DC_DCN)
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
dc_z10_restore(dc);
-#endif
/* disable idle optimizations while updating cursor */
if (dc->idle_optimizations_allowed) {
dc_allow_idle_optimizations(dc, false);
@@ -358,9 +356,7 @@ bool dc_stream_set_cursor_position(
dc = stream->ctx->dc;
res_ctx = &dc->current_state->res_ctx;
#if defined(CONFIG_DRM_AMD_DC_DCN)
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
dc_z10_restore(dc);
-#endif
/* disable idle optimizations if enabling cursor */
if (dc->idle_optimizations_allowed && !stream->cursor_position.enable && position->enable) {
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index a70697898025..45640f1c26c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -45,7 +45,7 @@
/* forward declaration */
struct aux_payload;
-#define DC_VER "3.2.139"
+#define DC_VER "3.2.141"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -297,12 +297,14 @@ struct dc_config {
bool allow_seamless_boot_optimization;
bool power_down_display_on_boot;
bool edp_not_connected;
+ bool edp_no_power_sequencing;
bool force_enum_edp;
bool forced_clocks;
bool allow_lttpr_non_transparent_mode;
bool multi_mon_pp_mclk_switch;
bool disable_dmcu;
bool enable_4to1MPC;
+ bool allow_edp_hotplug_detection;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool clamp_min_dcfclk;
#endif
@@ -351,7 +353,7 @@ enum dcn_pwr_state {
DCN_PWR_STATE_LOW_POWER = 3,
};
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dcn_z9_support_state {
DCN_Z9_SUPPORT_UNKNOWN,
DCN_Z9_SUPPORT_ALLOW,
@@ -375,7 +377,7 @@ struct dc_clocks {
int phyclk_khz;
int dramclk_khz;
bool p_state_change_support;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dcn_z9_support_state z9_support;
bool dtbclk_en;
#endif
@@ -500,7 +502,7 @@ struct dc_debug_options {
bool disable_pplib_clock_request;
bool disable_clock_gate;
bool disable_mem_low_power;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
bool pstate_enabled;
#endif
bool disable_dmcu;
@@ -521,8 +523,6 @@ struct dc_debug_options {
unsigned int force_odm_combine; //bit vector based on otg inst
#if defined(CONFIG_DRM_AMD_DC_DCN)
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
bool disable_z9_mpc;
#endif
unsigned int force_fclk_khz;
@@ -566,7 +566,7 @@ struct dc_debug_options {
bool force_enable_edp_fec;
/* FEC/PSR1 sequence enable delay in 100us */
uint8_t fec_enable_delay_in100us;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
bool disable_z10;
bool enable_sw_cntl_psr;
#endif
@@ -594,7 +594,7 @@ struct dc_phy_addr_space_config {
uint64_t page_table_start_addr;
uint64_t page_table_end_addr;
uint64_t page_table_base_addr;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
bool base_addr_is_mc_addr;
#endif
} gart_config;
@@ -1334,7 +1334,7 @@ void dc_hardware_release(struct dc *dc);
#endif
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
void dc_z10_restore(struct dc *dc);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 48ca23e1e599..360f3199ea6f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -86,6 +86,7 @@ void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
error:
DC_ERROR("Error queuing DMUB command: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
}
void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv)
@@ -95,8 +96,10 @@ void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv)
enum dmub_status status;
status = dmub_srv_cmd_execute(dmub);
- if (status != DMUB_STATUS_OK)
+ if (status != DMUB_STATUS_OK) {
DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
}
void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
@@ -106,8 +109,10 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
enum dmub_status status;
status = dmub_srv_wait_for_idle(dmub, 100000);
- if (status != DMUB_STATUS_OK)
+ if (status != DMUB_STATUS_OK) {
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
}
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
@@ -180,7 +185,7 @@ bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
dmub, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
stream_mask, timeout) == DMUB_STATUS_OK;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+
bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv)
{
struct dmub_srv *dmub;
@@ -202,7 +207,6 @@ bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv)
return boot_status.bits.restore_required;
}
-#endif
bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry)
{
@@ -214,3 +218,94 @@ void dc_dmub_trace_event_control(struct dc *dc, bool enable)
{
dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable);
}
+
+bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
+{
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data)
+ return false;
+ return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub, diag_data);
+}
+
+void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dmub_diagnostic_data diag_data = {0};
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
+ DC_LOG_ERROR("%s: invalid parameters.", __func__);
+ return;
+ }
+
+ if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) {
+ DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__);
+ return;
+ }
+
+ DC_LOG_DEBUG(
+ "DMCUB STATE\n"
+ " dmcub_version : %08x\n"
+ " scratch [0] : %08x\n"
+ " scratch [1] : %08x\n"
+ " scratch [2] : %08x\n"
+ " scratch [3] : %08x\n"
+ " scratch [4] : %08x\n"
+ " scratch [5] : %08x\n"
+ " scratch [6] : %08x\n"
+ " scratch [7] : %08x\n"
+ " scratch [8] : %08x\n"
+ " scratch [9] : %08x\n"
+ " scratch [10] : %08x\n"
+ " scratch [11] : %08x\n"
+ " scratch [12] : %08x\n"
+ " scratch [13] : %08x\n"
+ " scratch [14] : %08x\n"
+ " scratch [15] : %08x\n"
+ " pc : %08x\n"
+ " unk_fault_addr : %08x\n"
+ " inst_fault_addr : %08x\n"
+ " data_fault_addr : %08x\n"
+ " inbox1_rptr : %08x\n"
+ " inbox1_wptr : %08x\n"
+ " inbox1_size : %08x\n"
+ " inbox0_rptr : %08x\n"
+ " inbox0_wptr : %08x\n"
+ " inbox0_size : %08x\n"
+ " is_enabled : %d\n"
+ " is_soft_reset : %d\n"
+ " is_secure_reset : %d\n"
+ " is_traceport_en : %d\n"
+ " is_cw0_en : %d\n"
+ " is_cw6_en : %d\n",
+ diag_data.dmcub_version,
+ diag_data.scratch[0],
+ diag_data.scratch[1],
+ diag_data.scratch[2],
+ diag_data.scratch[3],
+ diag_data.scratch[4],
+ diag_data.scratch[5],
+ diag_data.scratch[6],
+ diag_data.scratch[7],
+ diag_data.scratch[8],
+ diag_data.scratch[9],
+ diag_data.scratch[10],
+ diag_data.scratch[11],
+ diag_data.scratch[12],
+ diag_data.scratch[13],
+ diag_data.scratch[14],
+ diag_data.scratch[15],
+ diag_data.pc,
+ diag_data.undefined_address_fault_addr,
+ diag_data.inst_fetch_fault_addr,
+ diag_data.data_write_fault_addr,
+ diag_data.inbox1_rptr,
+ diag_data.inbox1_wptr,
+ diag_data.inbox1_size,
+ diag_data.inbox0_rptr,
+ diag_data.inbox0_wptr,
+ diag_data.inbox0_size,
+ diag_data.is_dmcub_enabled,
+ diag_data.is_dmcub_soft_reset,
+ diag_data.is_dmcub_secure_reset,
+ diag_data.is_traceport_en,
+ diag_data.is_cw0_enabled,
+ diag_data.is_cw6_enabled);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index f5489c7aa770..3e35eee7188c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -62,13 +62,16 @@ bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub
bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
unsigned int stream_mask);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv);
-#endif
+
bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry);
void dc_dmub_trace_event_control(struct dc *dc, bool enable);
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data);
+bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca);
+
+void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
+
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 83d97dfe328f..2fb88e54a4bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -615,7 +615,8 @@ int dce_aux_transfer_dmub_raw(struct ddc_service *ddc,
}
#define AUX_MAX_RETRIES 7
-#define AUX_MAX_DEFER_RETRIES 7
+#define AUX_MIN_DEFER_RETRIES 7
+#define AUX_MAX_DEFER_TIMEOUT_MS 50
#define AUX_MAX_I2C_DEFER_RETRIES 7
#define AUX_MAX_INVALID_REPLY_RETRIES 2
#define AUX_MAX_TIMEOUT_RETRIES 3
@@ -628,6 +629,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
bool payload_reply = true;
enum aux_return_code_type operation_result;
bool retry_on_defer = false;
+ struct ddc *ddc_pin = ddc->ddc_pin;
+ struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine);
+ uint32_t defer_time_in_ms = 0;
int aux_ack_retries = 0,
aux_defer_retries = 0,
@@ -660,19 +665,27 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
break;
case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ /* polling_timeout_period is in us */
+ defer_time_in_ms += aux110->polling_timeout_period / 1000;
+ ++aux_defer_retries;
+ fallthrough;
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
retry_on_defer = true;
fallthrough;
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
- if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
+ if (aux_defer_retries >= AUX_MIN_DEFER_RETRIES
+ && defer_time_in_ms >= AUX_MAX_DEFER_TIMEOUT_MS) {
goto fail;
} else {
if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) ||
(*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
- if (payload->defer_delay > 1)
+ if (payload->defer_delay > 1) {
msleep(payload->defer_delay);
- else if (payload->defer_delay <= 1)
+ defer_time_in_ms += payload->defer_delay;
+ } else if (payload->defer_delay <= 1) {
udelay(payload->defer_delay * 1000);
+ defer_time_in_ms += payload->defer_delay;
+ }
}
}
break;
@@ -701,7 +714,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
// Check whether a DEFER had occurred before the timeout.
// If so, treat timeout as a DEFER.
if (retry_on_defer) {
- if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
+ if (++aux_defer_retries >= AUX_MIN_DEFER_RETRIES)
goto fail;
else if (payload->defer_delay > 0)
msleep(payload->defer_delay);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 8817a834383a..df6539e4c730 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -1106,22 +1106,18 @@ struct dce_hwseq_registers {
type PANEL_DIGON_OVRD;\
type PANEL_PWRSEQ_TARGET_STATE_R;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
#define HWSEQ_DCN31_REG_FIELD_LIST(type) \
type DOMAIN_POWER_FORCEON;\
type DOMAIN_POWER_GATE;\
type DOMAIN_PGFSM_PWR_STATUS;\
type HPO_HDMISTREAMCLK_G_GATE_DIS;
-#endif
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
HWSEQ_DCN_REG_FIELD_LIST(uint8_t)
HWSEQ_DCN3_REG_FIELD_LIST(uint8_t)
HWSEQ_DCN301_REG_FIELD_LIST(uint8_t)
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
HWSEQ_DCN31_REG_FIELD_LIST(uint8_t)
-#endif
};
struct dce_hwseq_mask {
@@ -1129,9 +1125,7 @@ struct dce_hwseq_mask {
HWSEQ_DCN_REG_FIELD_LIST(uint32_t)
HWSEQ_DCN3_REG_FIELD_LIST(uint32_t)
HWSEQ_DCN301_REG_FIELD_LIST(uint32_t)
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
HWSEQ_DCN31_REG_FIELD_LIST(uint32_t)
-#endif
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index a524f471e0d7..6d1b01c267b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -264,18 +264,25 @@ static void set_speed(
struct dce_i2c_hw *dce_i2c_hw,
uint32_t speed)
{
- uint32_t xtal_ref_div = 0;
+ uint32_t xtal_ref_div = 0, ref_base_div = 0;
uint32_t prescale = 0;
+ uint32_t i2c_ref_clock = 0;
if (speed == 0)
return;
- REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
+ REG_GET_2(MICROSECOND_TIME_BASE_DIV, MICROSECOND_TIME_BASE_DIV, &ref_base_div,
+ XTAL_REF_DIV, &xtal_ref_div);
if (xtal_ref_div == 0)
xtal_ref_div = 2;
- prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed;
+ if (ref_base_div == 0)
+ i2c_ref_clock = (dce_i2c_hw->reference_frequency * 2);
+ else
+ i2c_ref_clock = ref_base_div * 1000;
+
+ prescale = (i2c_ref_clock / xtal_ref_div) / speed;
if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
REG_UPDATE_N(SPEED, 3,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
index 2309f2bb162c..3f45ecd189a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
@@ -139,6 +139,7 @@ enum {
I2C_SF(DC_I2C_DATA, DC_I2C_INDEX, mask_sh),\
I2C_SF(DC_I2C_DATA, DC_I2C_INDEX_WRITE, mask_sh),\
I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh),\
+ I2C_SF(MICROSECOND_TIME_BASE_DIV, MICROSECOND_TIME_BASE_DIV, mask_sh),\
I2C_SF(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, mask_sh)
#define I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
@@ -182,6 +183,7 @@ struct dce_i2c_shift {
uint8_t DC_I2C_INDEX;
uint8_t DC_I2C_INDEX_WRITE;
uint8_t XTAL_REF_DIV;
+ uint8_t MICROSECOND_TIME_BASE_DIV;
uint8_t DC_I2C_DDC1_SEND_RESET_LENGTH;
uint8_t DC_I2C_REG_RW_CNTL_STATUS;
uint8_t I2C_LIGHT_SLEEP_FORCE;
@@ -225,6 +227,7 @@ struct dce_i2c_mask {
uint32_t DC_I2C_INDEX;
uint32_t DC_I2C_INDEX_WRITE;
uint32_t XTAL_REF_DIV;
+ uint32_t MICROSECOND_TIME_BASE_DIV;
uint32_t DC_I2C_DDC1_SEND_RESET_LENGTH;
uint32_t DC_I2C_REG_RW_CNTL_STATUS;
uint32_t I2C_LIGHT_SLEEP_FORCE;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 53dd305fa6b0..62d595ded866 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -48,7 +48,7 @@
#include "link_encoder.h"
#include "link_hwss.h"
#include "dc_link_dp.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dccg.h"
#endif
#include "clock_source.h"
@@ -65,7 +65,6 @@
#include "atomfirmware.h"
-#include "dce110_hw_sequencer.h"
#include "dcn10/dcn10_hw_sequencer.h"
#define GAMMA_HW_POINTS_NUM 256
@@ -1023,8 +1022,20 @@ void dce110_edp_backlight_control(
/* dc_service_sleep_in_milliseconds(50); */
/*edp 1.2*/
panel_instance = link->panel_cntl->inst;
- if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
- edp_receiver_ready_T7(link);
+
+ if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) {
+ if (!link->dc->config.edp_no_power_sequencing)
+ /*
+ * Sometimes, DP receiver chip power-controlled externally by an
+ * Embedded Controller could be treated and used as eDP,
+ * if it drives mobile display. In this case,
+ * we shouldn't be doing power-sequencing, hence we can skip
+ * waiting for T7-ready.
+ */
+ edp_receiver_ready_T7(link);
+ else
+ DC_LOG_DC("edp_receiver_ready_T7 skipped\n");
+ }
if (ctx->dc->ctx->dmub_srv &&
ctx->dc->debug.dmub_command_table) {
@@ -1049,8 +1060,19 @@ void dce110_edp_backlight_control(
dc_link_backlight_enable_aux(link, enable);
/*edp 1.2*/
- if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF)
- edp_add_delay_for_T9(link);
+ if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF) {
+ if (!link->dc->config.edp_no_power_sequencing)
+ /*
+ * Sometimes, DP receiver chip power-controlled externally by an
+ * Embedded Controller could be treated and used as eDP,
+ * if it drives mobile display. In this case,
+ * we shouldn't be doing power-sequencing, hence we can skip
+ * waiting for T9-ready.
+ */
+ edp_add_delay_for_T9(link);
+ else
+ DC_LOG_DC("edp_receiver_ready_T9 skipped\n");
+ }
if (!enable && link->dpcd_sink_ext_caps.bits.oled)
msleep(OLED_PRE_T11_DELAY);
@@ -2091,7 +2113,7 @@ static void dce110_setup_audio_dto(
build_audio_output(context, pipe_ctx, &audio_output);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/* For DCN3.1, audio to HPO FRL encoder is using audio DTBCLK DTO */
if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->set_audio_dtbclk_dto) {
/* disable audio DTBCLK DTO */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 9a1f40eb5c47..71b3a6949001 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1497,8 +1497,6 @@ void dpp1_cnv_setup (
enum dc_color_space input_color_space,
struct cnv_alpha_2bit_lut *alpha_2bit_lut);
-void dpp1_full_bypass(struct dpp *dpp_base);
-
void dpp1_dppclk_control(
struct dpp *dpp_base,
bool dppclk_div,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index 83c3f944e9ab..8d7e92d5d3e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -139,7 +139,6 @@ struct dcn_hubbub_registers {
uint32_t DCHVM_CLK_CTRL;
uint32_t DCHVM_RIOMMU_CTRL0;
uint32_t DCHVM_RIOMMU_STAT0;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
uint32_t DCHUBBUB_DET0_CTRL;
uint32_t DCHUBBUB_DET1_CTRL;
uint32_t DCHUBBUB_DET2_CTRL;
@@ -155,7 +154,6 @@ struct dcn_hubbub_registers {
uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C;
uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D;
uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D;
-#endif
};
/* set field name */
@@ -292,7 +290,7 @@ struct dcn_hubbub_registers {
type HOSTVM_POWERSTATUS; \
type RIOMMU_ACTIVE; \
type HOSTVM_PREFETCH_DONE
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+
#define HUBBUB_RET_REG_FIELD_LIST(type) \
type DET_DEPTH;\
type DET0_SIZE;\
@@ -315,25 +313,20 @@ struct dcn_hubbub_registers {
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C;\
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D;\
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D
-#endif
struct dcn_hubbub_shift {
DCN_HUBBUB_REG_FIELD_LIST(uint8_t);
HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t);
HUBBUB_HVM_REG_FIELD_LIST(uint8_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
HUBBUB_RET_REG_FIELD_LIST(uint8_t);
-#endif
};
struct dcn_hubbub_mask {
DCN_HUBBUB_REG_FIELD_LIST(uint32_t);
HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t);
HUBBUB_HVM_REG_FIELD_LIST(uint32_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
HUBBUB_RET_REG_FIELD_LIST(uint32_t);
-#endif
};
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 3b175af97388..c545eddabdcc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -54,6 +54,8 @@
#include "dce/dmub_hw_lock_mgr.h"
#include "dc_trace.h"
#include "dce/dmub_outbox.h"
+#include "inc/dc_link_dp.h"
+#include "inc/link_dpcd.h"
#define DC_LOGGER_INIT(logger)
@@ -1403,6 +1405,9 @@ void dcn10_init_hw(struct dc *dc)
if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
continue;
+ /* DP 2.0 requires that LTTPR Caps be read first */
+ dp_retrieve_lttpr_cap(dc->links[i]);
+
/*
* If any of the displays are lit up turn them off.
* The reason is that some MST hubs cannot be turned off
@@ -3240,10 +3245,17 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
* about the actual size being incorrect, that's a limitation of
* the hardware.
*/
- x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
- pipe_ctx->plane_state->dst_rect.width;
- y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
- pipe_ctx->plane_state->dst_rect.height;
+ if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
+ x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
+ pipe_ctx->plane_state->dst_rect.width;
+ y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.height;
+ } else {
+ x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.width;
+ y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
+ pipe_ctx->plane_state->dst_rect.height;
+ }
/**
* If the cursor's source viewport is clipped then we need to
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index ba47553081a7..d8b22618b79e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -160,14 +160,12 @@ struct dcn10_link_enc_registers {
uint32_t PHYA_LINK_CNTL2;
uint32_t PHYB_LINK_CNTL2;
uint32_t PHYC_LINK_CNTL2;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
uint32_t DIO_LINKA_CNTL;
uint32_t DIO_LINKB_CNTL;
uint32_t DIO_LINKC_CNTL;
uint32_t DIO_LINKD_CNTL;
uint32_t DIO_LINKE_CNTL;
uint32_t DIO_LINKF_CNTL;
-#endif
};
#define LE_SF(reg_name, field_name, post_fix)\
@@ -467,29 +465,24 @@ struct dcn10_link_enc_registers {
type DPCS_TX_DATA_SWAP_10_BIT;\
type DPCS_TX_DATA_ORDER_INVERT_18_BIT;\
type RDPCS_TX_CLK_EN
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+
#define DCN31_LINK_ENCODER_REG_FIELD_LIST(type) \
type ENC_TYPE_SEL;\
type HPO_DP_ENC_SEL;\
type HPO_HDMI_ENC_SEL
-#endif
struct dcn10_link_enc_shift {
DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
DCN30_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
DCN31_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
-#endif
};
struct dcn10_link_enc_mask {
DCN_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
DCN30_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
DCN31_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
-#endif
};
struct dcn10_link_encoder {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index 76b334644f9e..0d86df97878c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -52,6 +52,7 @@
SRI(AFMT_60958_1, DIG, id), \
SRI(AFMT_60958_2, DIG, id), \
SRI(DIG_FE_CNTL, DIG, id), \
+ SRI(DIG_FIFO_STATUS, DIG, id), \
SRI(HDMI_CONTROL, DIG, id), \
SRI(HDMI_DB_CONTROL, DIG, id), \
SRI(HDMI_GC, DIG, id), \
@@ -124,6 +125,7 @@ struct dcn10_stream_enc_registers {
uint32_t AFMT_60958_2;
uint32_t DIG_FE_CNTL;
uint32_t DIG_FE_CNTL2;
+ uint32_t DIG_FIFO_STATUS;
uint32_t DP_MSE_RATE_CNTL;
uint32_t DP_MSE_RATE_UPDATE;
uint32_t DP_PIXEL_FORMAT;
@@ -266,6 +268,17 @@ struct dcn10_stream_enc_registers {
SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_LEVEL_ERROR, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_USE_OVERWRITE_LEVEL, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_OVERWRITE_LEVEL, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_ERROR_ACK, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CAL_AVERAGE_LEVEL, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MAXIMUM_LEVEL, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MINIMUM_LEVEL, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_READ_CLOCK_SRC, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CALIBRATED, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECAL_AVERAGE, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECOMP_MINMAX, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, mask_sh),\
@@ -488,6 +501,17 @@ struct dcn10_stream_enc_registers {
type DP_VID_N_MUL;\
type DP_VID_M_DOUBLE_VALUE_EN;\
type DIG_SOURCE_SELECT;\
+ type DIG_FIFO_LEVEL_ERROR;\
+ type DIG_FIFO_USE_OVERWRITE_LEVEL;\
+ type DIG_FIFO_OVERWRITE_LEVEL;\
+ type DIG_FIFO_ERROR_ACK;\
+ type DIG_FIFO_CAL_AVERAGE_LEVEL;\
+ type DIG_FIFO_MAXIMUM_LEVEL;\
+ type DIG_FIFO_MINIMUM_LEVEL;\
+ type DIG_FIFO_READ_CLOCK_SRC;\
+ type DIG_FIFO_CALIBRATED;\
+ type DIG_FIFO_FORCE_RECAL_AVERAGE;\
+ type DIG_FIFO_FORCE_RECOMP_MINMAX;\
type DIG_CLOCK_PATTERN
#define SE_REG_FIELD_LIST_DCN2_0(type) \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index 62904d7ca100..ede65100a050 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -140,7 +140,6 @@
type PHYCSYMCLK_FORCE_EN;\
type PHYCSYMCLK_FORCE_SRC_SEL;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
#define DCCG31_REG_FIELD_LIST(type) \
type PHYDSYMCLK_FORCE_EN;\
type PHYDSYMCLK_FORCE_SRC_SEL;\
@@ -171,22 +170,17 @@
type DCCG_AUDIO_DTO_SEL;\
type DCCG_AUDIO_DTO0_SOURCE_SEL;\
type DENTIST_DISPCLK_CHG_MODE;
-#endif
struct dccg_shift {
DCCG_REG_FIELD_LIST(uint8_t)
DCCG3_REG_FIELD_LIST(uint8_t)
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
DCCG31_REG_FIELD_LIST(uint8_t)
-#endif
};
struct dccg_mask {
DCCG_REG_FIELD_LIST(uint32_t)
DCCG3_REG_FIELD_LIST(uint32_t)
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
DCCG31_REG_FIELD_LIST(uint32_t)
-#endif
};
struct dccg_registers {
@@ -199,7 +193,6 @@ struct dccg_registers {
uint32_t PHYASYMCLK_CLOCK_CNTL;
uint32_t PHYBSYMCLK_CLOCK_CNTL;
uint32_t PHYCSYMCLK_CLOCK_CNTL;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
uint32_t PHYDSYMCLK_CLOCK_CNTL;
uint32_t PHYESYMCLK_CLOCK_CNTL;
uint32_t DTBCLK_DTO_MODULO[MAX_PIPES];
@@ -212,7 +205,6 @@ struct dccg_registers {
uint32_t SYMCLK32_SE_CNTL;
uint32_t SYMCLK32_LE_CNTL;
uint32_t DENTIST_DISPCLK_CNTL;
-#endif
};
struct dcn_dccg {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
index a4fc70fa0957..10af257d90ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
@@ -83,7 +83,6 @@ struct dcn20_hubbub {
int num_vmid;
struct dcn20_vmid vmid[16];
unsigned int detile_buf_size;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
unsigned int crb_size_segs;
unsigned int compbuf_size_segments;
unsigned int pixel_chunk_size;
@@ -91,7 +90,6 @@ struct dcn20_hubbub {
unsigned int det1_size;
unsigned int det2_size;
unsigned int det3_size;
-#endif
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
index 2652d6abe9e3..eea2254b15e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
@@ -216,32 +216,22 @@
type ROW_TTU_MODE; \
type NUM_PKRS
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
#define DCN31_HUBP_REG_FIELD_VARIABLE_LIST(type) \
DCN30_HUBP_REG_FIELD_VARIABLE_LIST(type);\
type HUBP_UNBOUNDED_REQ_MODE;\
type CURSOR_REQ_MODE;\
type HUBP_SOFT_RESET
-#endif
struct dcn_hubp2_registers {
DCN30_HUBP_REG_COMMON_VARIABLE_LIST;
};
struct dcn_hubp2_shift {
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
DCN31_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
-#else
- DCN30_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
-#endif
};
struct dcn_hubp2_mask {
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
DCN31_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
-#else
- DCN30_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
-#endif
};
struct dcn20_hubp {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index d0d6a94e4992..5c2853654cca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1270,9 +1270,7 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
new_pipe->update_flags.bits.gamut_remap = 1;
new_pipe->update_flags.bits.scaler = 1;
new_pipe->update_flags.bits.viewport = 1;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
new_pipe->update_flags.bits.det_size = 1;
-#endif
if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
new_pipe->update_flags.bits.odm = 1;
new_pipe->update_flags.bits.global_sync = 1;
@@ -1307,10 +1305,9 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
new_pipe->update_flags.bits.global_sync = 1;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb)
new_pipe->update_flags.bits.det_size = 1;
-#endif
+
/*
* Detect opp / tg change, only set on change, not on enable
* Assume mpcc inst = pipe index, if not this code needs to be updated
@@ -1426,10 +1423,9 @@ static void dcn20_update_dchubp_dpp(
&pipe_ctx->ttu_regs,
&pipe_ctx->rq_regs,
&pipe_ctx->pipe_dlg_param);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+
if (hubp->funcs->set_unbounded_requesting)
hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req);
-#endif
}
if (pipe_ctx->update_flags.bits.hubp_interdependent)
hubp->funcs->hubp_setup_interdependent(
@@ -1609,11 +1605,9 @@ static void dcn20_program_pipe(
dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
if (dc->res_pool->hubbub->funcs->program_det_size && pipe_ctx->update_flags.bits.det_size)
dc->res_pool->hubbub->funcs->program_det_size(
dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
-#endif
if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
@@ -1705,12 +1699,10 @@ void dcn20_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
|| context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
- #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
struct hubbub *hubbub = dc->res_pool->hubbub;
if (hubbub->funcs->program_det_size && context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
- #endif
hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
@@ -1828,11 +1820,9 @@ void dcn20_prepare_bandwidth(
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
/* decrease compbuf size */
if (hubbub->funcs->program_compbuf_size)
hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, false);
-#endif
}
void dcn20_optimize_bandwidth(
@@ -1851,11 +1841,9 @@ void dcn20_optimize_bandwidth(
dc->clk_mgr,
context,
true);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
/* increase compbuf size */
if (hubbub->funcs->program_compbuf_size)
hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
-#endif
}
bool dcn20_update_bandwidth(
@@ -2561,10 +2549,9 @@ void dcn20_fpga_init_hw(struct dc *dc)
tg->funcs->tg_init(tg);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+
if (dc->res_pool->hubbub->funcs->init_crb)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
-#endif
}
#ifndef TRIM_FSFT
bool dcn20_optimize_timing_for_fsft(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 1b670c5de1aa..1b05a37b674d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -3069,7 +3069,6 @@ static void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
{
int i;
@@ -3079,7 +3078,6 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
}
return false;
}
-#endif
void dcn20_calculate_dlg_params(
struct dc *dc, struct dc_state *context,
@@ -3088,9 +3086,7 @@ void dcn20_calculate_dlg_params(
int vlevel)
{
int i, pipe_idx;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
int plane_count;
-#endif
/* Writeback MCIF_WB arbitration parameters */
dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
@@ -3105,7 +3101,7 @@ void dcn20_calculate_dlg_params(
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
!= dm_dram_clock_change_unsupported;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+
context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ?
DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW;
@@ -3119,7 +3115,6 @@ void dcn20_calculate_dlg_params(
context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW;
context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
-#endif
if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
@@ -3131,10 +3126,9 @@ void dcn20_calculate_dlg_params(
pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
- context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes;
- context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode;
-#endif
+ context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes;
+ context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode;
+
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
@@ -3148,10 +3142,8 @@ void dcn20_calculate_dlg_params(
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000;
context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes
- context->bw_ctx.dml.ip.det_buffer_size_kbytes * pipe_idx;
-#endif
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 4075ae111530..e6307397e0d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -552,6 +552,17 @@ void enc2_stream_encoder_dp_set_stream_attribute(
DP_SST_SDP_SPLITTING, enable_sdp_splitting);
}
+uint32_t enc2_get_fifo_cal_average_level(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t fifo_level;
+
+ REG_GET(DIG_FIFO_STATUS,
+ DIG_FIFO_CAL_AVERAGE_LEVEL, &fifo_level);
+ return fifo_level;
+}
+
static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
.dp_set_odm_combine =
enc2_dp_set_odm_combine,
@@ -598,6 +609,7 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
.dp_set_dsc_pps_info_packet = enc2_dp_set_dsc_pps_info_packet,
.set_dynamic_metadata = enc2_set_dynamic_metadata,
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
+ .get_fifo_cal_average_level = enc2_get_fifo_cal_average_level,
};
void dcn20_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
index 9a881e639709..f3d1a0237bda 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
@@ -112,4 +112,7 @@ void enc2_set_dynamic_metadata(struct stream_encoder *enc,
uint32_t hubp_requestor_id,
enum dynamic_metadata_mode dmdata_mode);
+uint32_t enc2_get_fifo_cal_average_level(
+ struct stream_encoder *enc);
+
#endif /* __DC_STREAM_ENCODER_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index 72bee637c1e4..8487516819ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -823,6 +823,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
.dp_set_dsc_pps_info_packet = enc3_dp_set_dsc_pps_info_packet,
.set_dynamic_metadata = enc2_set_dynamic_metadata,
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
+
+ .get_fifo_cal_average_level = enc2_get_fifo_cal_average_level,
};
void dcn30_dio_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
index 9566b9037458..e2c264ecb20f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
@@ -106,6 +106,7 @@
SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \
SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
SRI(DIG_FE_CNTL, DIG, id), \
+ SRI(DIG_FIFO_STATUS, DIG, id), \
SRI(DIG_CLOCK_PATTERN, DIG, id)
@@ -167,6 +168,17 @@
SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_LEVEL_ERROR, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_USE_OVERWRITE_LEVEL, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_OVERWRITE_LEVEL, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_ERROR_ACK, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CAL_AVERAGE_LEVEL, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MAXIMUM_LEVEL, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MINIMUM_LEVEL, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_READ_CLOCK_SRC, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CALIBRATED, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECAL_AVERAGE, mask_sh),\
+ SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECOMP_MINMAX, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index ef5d0b778a72..c68e3a708a33 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -48,6 +48,8 @@
#include "dc_dmub_srv.h"
#include "link_hwss.h"
#include "dpcd_defs.h"
+#include "inc/dc_link_dp.h"
+#include "inc/link_dpcd.h"
@@ -529,6 +531,8 @@ void dcn30_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
continue;
+ /* DP 2.0 states that LTTPR regs must be read first */
+ dp_retrieve_lttpr_cap(dc->links[i]);
/* if any of the displays are lit up turn them off */
status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
@@ -651,10 +655,8 @@ void dcn30_init_hw(struct dc *dc)
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, false, false);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
if (dc->res_pool->hubbub->funcs->init_crb)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
-#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index b16d19a25d88..16a75ba0ca82 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -164,8 +164,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
.num_states = 1,
- .sr_exit_time_us = 15.5,
- .sr_enter_plus_exit_time_us = 20,
+ .sr_exit_time_us = 26.5,
+ .sr_enter_plus_exit_time_us = 31,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -1102,6 +1102,26 @@ static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool)
loaded_ip->max_num_dpp = pool->pipe_count;
loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
dcn20_patch_bounding_box(dc, loaded_bb);
+
+ if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
+ struct bp_soc_bb_info bb_info = { 0 };
+
+ if (dc->ctx->dc_bios->funcs->get_soc_bb_info(
+ dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
+ if (bb_info.dram_clock_change_latency_100ns > 0)
+ dcn3_02_soc.dram_clock_change_latency_us =
+ bb_info.dram_clock_change_latency_100ns * 10;
+
+ if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+ dcn3_02_soc.sr_enter_plus_exit_time_us =
+ bb_info.dram_sr_enter_exit_latency_100ns * 10;
+
+ if (bb_info.dram_sr_exit_latency_100ns > 0)
+ dcn3_02_soc.sr_exit_time_us =
+ bb_info.dram_sr_exit_latency_100ns * 10;
+ }
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c
index dc33ec8b7bdb..b48b732aa647 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c
@@ -38,3 +38,8 @@ void dcn303_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool po
{
/*DCN303 removes PG registers*/
}
+
+void dcn303_enable_power_gating_plane(struct dce_hwseq *hws, bool enable)
+{
+ /*DCN303 removes PG registers*/
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.h
index fc6cab720b6d..8b69a3b76c11 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.h
@@ -13,5 +13,6 @@
void dcn303_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on);
void dcn303_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
void dcn303_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on);
+void dcn303_enable_power_gating_plane(struct dce_hwseq *hws, bool enable);
#endif /* __DC_HWSS_DCN303_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
index 86d4b303d02f..aa5dbbade2bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
@@ -16,4 +16,5 @@ void dcn303_hw_sequencer_construct(struct dc *dc)
dc->hwseq->funcs.dpp_pg_control = dcn303_dpp_pg_control;
dc->hwseq->funcs.hubp_pg_control = dcn303_hubp_pg_control;
dc->hwseq->funcs.dsc_pg_control = dcn303_dsc_pg_control;
+ dc->hwseq->funcs.enable_power_gating_plane = dcn303_enable_power_gating_plane;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 758f89ba0192..34b89464ae02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -146,8 +146,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc = {
.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
.num_states = 1,
- .sr_exit_time_us = 15.5,
- .sr_enter_plus_exit_time_us = 20,
+ .sr_exit_time_us = 26.5,
+ .sr_enter_plus_exit_time_us = 31,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -1028,6 +1028,26 @@ static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool)
loaded_ip->max_num_dpp = pool->pipe_count;
loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
dcn20_patch_bounding_box(dc, loaded_bb);
+
+ if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
+ struct bp_soc_bb_info bb_info = { 0 };
+
+ if (dc->ctx->dc_bios->funcs->get_soc_bb_info(
+ dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
+ if (bb_info.dram_clock_change_latency_100ns > 0)
+ dcn3_03_soc.dram_clock_change_latency_us =
+ bb_info.dram_clock_change_latency_100ns * 10;
+
+ if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+ dcn3_03_soc.sr_enter_plus_exit_time_us =
+ bb_info.dram_sr_enter_exit_latency_100ns * 10;
+
+ if (bb_info.dram_sr_exit_latency_100ns > 0)
+ dcn3_03_soc.sr_exit_time_us =
+ bb_info.dram_sr_exit_latency_100ns * 10;
+ }
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
index 5dcdc5a858fe..4bab97acb155 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
@@ -28,6 +28,7 @@ endif
CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o += -mhard-float
endif
+ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -36,6 +37,7 @@ CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o += -mpreferred-stack-boundary=4
else
CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o += -msse2
endif
+endif
AMD_DAL_DCN31 = $(addprefix $(AMDDALPATH)/dc/dcn31/,$(DCN31))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index c0e544d7556f..836864a5a5dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -33,7 +33,6 @@
#include "clk_mgr.h"
#include "reg_helper.h"
#include "abm.h"
-#include "clk_mgr.h"
#include "hubp.h"
#include "dchubbub.h"
#include "timing_generator.h"
@@ -47,6 +46,7 @@
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
#include "dc_link_dp.h"
+#include "inc/link_dpcd.h"
#define DC_LOGGER_INIT(logger)
@@ -299,10 +299,8 @@ void dcn31_init_hw(struct dc *dc)
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, false, false);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
if (dc->res_pool->hubbub->funcs->init_crb)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
-#endif
}
void dcn31_dsc_pg_control(
@@ -392,7 +390,7 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
- if (!is_hdmi_tmds)
+ if (!is_hdmi_tmds && !is_dp)
return;
if (is_hdmi_tmds)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 0d6cb6caad81..c67bc9544f5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -934,7 +934,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.pstate_enabled = true,
.use_max_lb = true,
- .pstate_enabled = true,
.enable_mem_low_power = {
.bits = {
.vga = false,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 8381c8f9ddb7..a9170b9f84d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -32,11 +32,9 @@ struct cp_psp_stream_config {
uint8_t otg_inst;
uint8_t dig_be;
uint8_t dig_fe;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
uint8_t link_enc_idx;
uint8_t stream_enc_idx;
uint8_t phy_idx;
-#endif
uint8_t assr_enabled;
uint8_t mst_enabled;
void *dm_stream_ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 11ab03abc0fe..45862167e6ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -50,6 +50,10 @@ dml_ccflags += -msse2
endif
endif
+ifneq ($(CONFIG_FRAME_WARN),0)
+frame_warn_flag := -Wframe-larger-than=2048
+endif
+
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
ifdef CONFIG_DRM_AMD_DC_DCN
@@ -60,12 +64,10 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) -Wframe-larger-than=2048
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
-ifdef CONFIG_DRM_AMD_DC_DCN3_1
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) -Wframe-larger-than=2048
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
-endif
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_rcflags)
@@ -76,10 +78,8 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_rcflags)
-ifdef CONFIG_DRM_AMD_DC_DCN3_1
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_rcflags)
-endif
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags)
endif
CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags)
@@ -94,10 +94,8 @@ DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_
DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
-ifdef CONFIG_DRM_AMD_DC_DCN3_1
DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
endif
-endif
AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index d655655baaba..a9667068c690 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -23,7 +23,6 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
#include "dc.h"
#include "dc_link.h"
#include "../display_mode_lib.h"
@@ -2668,6 +2667,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
(double) v->WritebackDelay[v->VoltageLevel][k]
/ (v->HTotal[k] / v->PixelClock[k]),
1));
+ if (v->MaxVStartupLines[k] > 1023)
+ v->MaxVStartupLines[k] = 1023;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d MaxVStartupLines = %d\n", __func__, k, v->MaxVStartupLines[k]);
@@ -3536,7 +3537,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(
*BytePerPixelDETC = 0;
*BytePerPixelY = 4;
*BytePerPixelC = 0;
- } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) {
+ } else if (SourcePixelFormat == dm_444_16) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 0;
*BytePerPixelY = 2;
@@ -5064,6 +5065,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/ (v->HTotal[k]
/ v->PixelClock[k]),
1.0));
+ if (v->MaximumVStartup[i][j][k] > 1023)
+ v->MaximumVStartup[i][j][k] = 1023;
v->MaxMaxVStartup[i][j] = dml_max(v->MaxMaxVStartup[i][j], v->MaximumVStartup[i][j][k]);
}
}
@@ -5674,7 +5677,7 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k < v->NumberOfActivePlanes; k++) {
if (v->ViewportWidth[k] > v->SurfaceWidthY[k] || v->ViewportHeight[k] > v->SurfaceHeightY[k]) {
ViewportExceedsSurface = true;
- if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16
+ if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32
&& v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8
&& v->SourcePixelFormat[k] != dm_rgbe) {
if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k]
@@ -7505,4 +7508,3 @@ static bool UnboundedRequest(enum unbounded_requesting_policy UseUnboundedReques
return (ret_val);
}
-#endif /* CONFIG_DRM_AMD_DC_DCN3_1 */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index cb15525ddb49..3def093ef88e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -23,8 +23,6 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
-
#include "../display_mode_lib.h"
#include "../display_mode_vba.h"
#include "../dml_inline_defs.h"
@@ -52,7 +50,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(
*BytePerPixelDETC = 0;
*BytePerPixelY = 4;
*BytePerPixelC = 0;
- } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) {
+ } else if (SourcePixelFormat == dm_444_16) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 0;
*BytePerPixelY = 2;
@@ -1724,4 +1722,3 @@ void dml31_rq_dlg_get_dlg_reg(
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 9bb3b00e8be1..8a5bd919aec8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -33,11 +33,9 @@
#include "dcn21/display_rq_dlg_calc_21.h"
#include "dcn30/display_mode_vba_30.h"
#include "dcn30/display_rq_dlg_calc_30.h"
-#include "dml_logger.h"
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
#include "dcn31/display_mode_vba_31.h"
#include "dcn31/display_rq_dlg_calc_31.h"
-#endif
+#include "dml_logger.h"
const struct dml_funcs dml20_funcs = {
.validate = dml20_ModeSupportAndSystemConfigurationFull,
@@ -66,14 +64,13 @@ const struct dml_funcs dml30_funcs = {
.rq_dlg_get_dlg_reg = dml30_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml30_rq_dlg_get_rq_reg
};
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+
const struct dml_funcs dml31_funcs = {
.validate = dml31_ModeSupportAndSystemConfigurationFull,
.recalculate = dml31_recalculate,
.rq_dlg_get_dlg_reg = dml31_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml31_rq_dlg_get_rq_reg
};
-#endif
void dml_init_instance(struct display_mode_lib *lib,
const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
@@ -96,13 +93,11 @@ void dml_init_instance(struct display_mode_lib *lib,
case DML_PROJECT_DCN30:
lib->funcs = dml30_funcs;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case DML_PROJECT_DCN31:
case DML_PROJECT_DCN31_FPGA:
lib->funcs = dml31_funcs;
break;
-#endif
default:
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 7b7f1500a91e..d42a0aeca6be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -38,10 +38,8 @@ enum dml_project {
DML_PROJECT_NAVI10v2,
DML_PROJECT_DCN21,
DML_PROJECT_DCN30,
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
DML_PROJECT_DCN31,
DML_PROJECT_DCN31_FPGA,
-#endif
};
struct display_mode_lib;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index bdd075576573..64daa0507393 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -74,10 +74,8 @@ struct _vcs_dpi_soc_bounding_box_st {
unsigned int num_states;
double sr_exit_time_us;
double sr_enter_plus_exit_time_us;
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
double sr_exit_z8_time_us;
double sr_enter_plus_exit_z8_time_us;
-#endif
double urgent_latency_us;
double urgent_latency_pixel_data_only_us;
double urgent_latency_pixel_mixed_with_vm_data_us;
@@ -213,14 +211,12 @@ struct _vcs_dpi_ip_params_st {
unsigned int is_line_buffer_bpp_fixed;
unsigned int line_buffer_fixed_bpp;
unsigned int dcc_supported;
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
unsigned int config_return_buffer_size_in_kbytes;
unsigned int compressed_buffer_segment_size_in_kbytes;
unsigned int meta_fifo_size_in_kentries;
unsigned int zero_size_buffer_entries;
unsigned int compbuf_reserved_space_64b;
unsigned int compbuf_reserved_space_zs;
-#endif
unsigned int IsLineBufferBppFixed;
unsigned int LineBufferFixedBpp;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 345d2d409a6e..d3b1b6d4ce2f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -89,12 +89,10 @@ dml_get_attr_func(wm_memory_trip, mode_lib->vba.UrgentLatency);
dml_get_attr_func(wm_writeback_urgent, mode_lib->vba.WritebackUrgentWatermark);
dml_get_attr_func(wm_stutter_exit, mode_lib->vba.StutterExitWatermark);
dml_get_attr_func(wm_stutter_enter_exit, mode_lib->vba.StutterEnterPlusExitWatermark);
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
dml_get_attr_func(wm_z8_stutter_exit, mode_lib->vba.Z8StutterExitWatermark);
dml_get_attr_func(wm_z8_stutter_enter_exit, mode_lib->vba.Z8StutterEnterPlusExitWatermark);
dml_get_attr_func(stutter_efficiency_z8, mode_lib->vba.Z8StutterEfficiency);
dml_get_attr_func(stutter_num_bursts_z8, mode_lib->vba.Z8NumberOfStutterBurstsPerFrame);
-#endif
dml_get_attr_func(wm_dram_clock_change, mode_lib->vba.DRAMClockChangeWatermark);
dml_get_attr_func(wm_writeback_dram_clock_change, mode_lib->vba.WritebackDRAMClockChangeWatermark);
dml_get_attr_func(stutter_efficiency, mode_lib->vba.StutterEfficiency);
@@ -159,15 +157,12 @@ dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_l_in_us, mode_lib->vba.TimeP
dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_c_in_us, mode_lib->vba.TimePerChromaMetaChunkVBlank);
dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_l_in_us, mode_lib->vba.TimePerMetaChunkFlip);
dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_c_in_us, mode_lib->vba.TimePerChromaMetaChunkFlip);
-
dml_get_pipe_attr_func(vstartup, mode_lib->vba.VStartup);
dml_get_pipe_attr_func(vupdate_offset, mode_lib->vba.VUpdateOffsetPix);
dml_get_pipe_attr_func(vupdate_width, mode_lib->vba.VUpdateWidthPix);
dml_get_pipe_attr_func(vready_offset, mode_lib->vba.VReadyOffsetPix);
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
dml_get_pipe_attr_func(vready_at_or_after_vsync, mode_lib->vba.VREADY_AT_OR_AFTER_VSYNC);
dml_get_pipe_attr_func(min_dst_y_next_start, mode_lib->vba.MIN_DST_Y_NEXT_START);
-#endif
double get_total_immediate_flip_bytes(
struct display_mode_lib *mode_lib,
@@ -236,7 +231,6 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
mode_lib->vba.WritebackLatency = soc->writeback_latency_us;
mode_lib->vba.SRExitTime = soc->sr_exit_time_us;
mode_lib->vba.SREnterPlusExitTime = soc->sr_enter_plus_exit_time_us;
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
mode_lib->vba.PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency = soc->pct_ideal_sdp_bw_after_urgent;
mode_lib->vba.PercentOfIdealDRAMBWReceivedAfterUrgLatencyPixelMixedWithVMData = soc->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm;
mode_lib->vba.PercentOfIdealDRAMBWReceivedAfterUrgLatencyPixelDataOnly = soc->pct_ideal_dram_sdp_bw_after_urgent_pixel_only;
@@ -245,7 +239,6 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
soc->max_avg_sdp_bw_use_normal_percent;
mode_lib->vba.SRExitZ8Time = soc->sr_exit_z8_time_us;
mode_lib->vba.SREnterPlusExitZ8Time = soc->sr_enter_plus_exit_z8_time_us;
-#endif
mode_lib->vba.DRAMClockChangeLatency = soc->dram_clock_change_latency_us;
mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us;
mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support ||
@@ -320,7 +313,6 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib)
mode_lib->vba.MaxPSCLToLBThroughput = ip->max_pscl_lb_bw_pix_per_clk;
mode_lib->vba.ROBBufferSizeInKByte = ip->rob_buffer_size_kbytes;
mode_lib->vba.DETBufferSizeInKByte[0] = ip->det_buffer_size_kbytes;
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
mode_lib->vba.ConfigReturnBufferSizeInKByte = ip->config_return_buffer_size_in_kbytes;
mode_lib->vba.CompressedBufferSegmentSizeInkByte = ip->compressed_buffer_segment_size_in_kbytes;
mode_lib->vba.MetaFIFOSizeInKEntries = ip->meta_fifo_size_in_kentries;
@@ -329,7 +321,6 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib)
mode_lib->vba.COMPBUF_RESERVED_SPACE_ZS = ip->compbuf_reserved_space_zs;
mode_lib->vba.MaximumDSCBitsPerComponent = ip->maximum_dsc_bits_per_component;
mode_lib->vba.DSC422NativeSupport = ip->dsc422_native_support;
-#endif
mode_lib->vba.PixelChunkSizeInKByte = ip->pixel_chunk_size_kbytes;
mode_lib->vba.MetaChunkSize = ip->meta_chunk_size_kbytes;
@@ -419,7 +410,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
visited[j] = true;
mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
-
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
(enum scan_direction_class) (src->source_scan);
@@ -459,11 +449,9 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.VTAPsChroma[mode_lib->vba.NumberOfActivePlanes] = taps->vtaps_c;
mode_lib->vba.HTotal[mode_lib->vba.NumberOfActivePlanes] = dst->htotal;
mode_lib->vba.VTotal[mode_lib->vba.NumberOfActivePlanes] = dst->vtotal;
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
mode_lib->vba.VFrontPorch[mode_lib->vba.NumberOfActivePlanes] = dst->vfront_porch;
mode_lib->vba.DCCFractionOfZeroSizeRequestsLuma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_fraction_of_zs_req_luma;
mode_lib->vba.DCCFractionOfZeroSizeRequestsChroma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_fraction_of_zs_req_chroma;
-#endif
mode_lib->vba.DCCEnable[mode_lib->vba.NumberOfActivePlanes] =
src->dcc_use_global ?
ip->dcc_supported : src->dcc && ip->dcc_supported;
@@ -710,13 +698,11 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
}
}
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
mode_lib->vba.UseUnboundedRequesting = dm_unbounded_requesting;
for (k = 0; k < mode_lib->vba.cache_num_pipes; ++k) {
if (pipes[k].pipe.src.unbounded_req_mode == 0)
mode_lib->vba.UseUnboundedRequesting = dm_unbounded_requesting_disable;
}
-#endif
// TODO: ODMCombineEnabled => 2 * DPPPerPlane...actually maybe not since all pipes are specified
// Do we want the dscclk to automatically be halved? Guess not since the value is specified
mode_lib->vba.SynchronizedVBlank = pipes[0].pipe.dest.synchronized_vblank_all_planes;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index fad7bb57e668..d18a021d4d32 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -39,12 +39,10 @@ dml_get_attr_decl(wm_memory_trip);
dml_get_attr_decl(wm_writeback_urgent);
dml_get_attr_decl(wm_stutter_exit);
dml_get_attr_decl(wm_stutter_enter_exit);
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
dml_get_attr_decl(wm_z8_stutter_exit);
dml_get_attr_decl(wm_z8_stutter_enter_exit);
dml_get_attr_decl(stutter_efficiency_z8);
dml_get_attr_decl(stutter_num_bursts_z8);
-#endif
dml_get_attr_decl(wm_dram_clock_change);
dml_get_attr_decl(wm_writeback_dram_clock_change);
dml_get_attr_decl(stutter_efficiency_no_vblank);
@@ -108,10 +106,8 @@ dml_get_pipe_attr_decl(vstartup);
dml_get_pipe_attr_decl(vupdate_offset);
dml_get_pipe_attr_decl(vupdate_width);
dml_get_pipe_attr_decl(vready_offset);
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
dml_get_pipe_attr_decl(vready_at_or_after_vsync);
dml_get_pipe_attr_decl(min_dst_y_next_start);
-#endif
double get_total_immediate_flip_bytes(
struct display_mode_lib *mode_lib,
@@ -933,7 +929,6 @@ struct vba_vars_st {
bool ClampMinDCFCLK;
bool AllowDramClockChangeOneDisplayVactive;
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
double MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation;
double PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency;
double PercentOfIdealDRAMBWReceivedAfterUrgLatencyPixelMixedWithVMData;
@@ -974,7 +969,6 @@ struct vba_vars_st {
int Z8NumberOfStutterBurstsPerFrame;
unsigned int MaximumDSCBitsPerComponent;
unsigned int NotEnoughUrgentLatencyHidingA[DC__VOLTAGE_STATES][2];
-#endif
};
bool CalculateMinAndMaxPrefetchMode(
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index fbf71ff0a402..c5c840a06050 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -112,9 +112,7 @@ bool dal_hw_factory_init(
case DCN_VERSION_3_01:
case DCN_VERSION_3_02:
case DCN_VERSION_3_03:
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case DCN_VERSION_3_1:
-#endif
dal_hw_factory_dcn30_init(factory);
return true;
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 89794687f6a0..4a9848308766 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -107,9 +107,7 @@ bool dal_hw_translate_init(
case DCN_VERSION_3_01:
case DCN_VERSION_3_02:
case DCN_VERSION_3_03:
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case DCN_VERSION_3_1:
-#endif
dal_hw_translate_dcn30_init(translate);
return true;
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
index 51855a2624cf..4233955e3c47 100644
--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -33,6 +33,7 @@
#include "core_types.h"
#include "dc_link_ddc.h"
#include "link_hwss.h"
+#include "inc/link_dpcd.h"
#define DC_LOGGER \
link->ctx->logger
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 0d1c3260b68e..45a6216dfa2a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -338,9 +338,7 @@ union pipe_update_flags {
uint32_t scaler : 1;
uint32_t viewport : 1;
uint32_t plane_changed : 1;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
uint32_t det_size : 1;
-#endif
} bits;
uint32_t raw;
};
@@ -368,11 +366,9 @@ struct pipe_ctx {
struct _vcs_dpi_display_ttu_regs_st ttu_regs;
struct _vcs_dpi_display_rq_regs_st rq_regs;
struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
int det_buffer_size_kb;
bool unbounded_req;
#endif
-#endif
union pipe_update_flags update_flags;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
@@ -422,9 +418,7 @@ struct dcn_bw_output {
struct dc_clocks clk;
struct dcn_watermark_set watermarks;
struct dcn_bw_writeback bw_writeback;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
int compbuf_size_kb;
-#endif
};
union bw_output {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 883c3af51022..e2b58ec9912d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -42,7 +42,15 @@ enum {
/* to avoid infinite loop where-in the receiver
* switches between different VS
*/
- LINK_TRAINING_MAX_CR_RETRY = 100
+ LINK_TRAINING_MAX_CR_RETRY = 100,
+ /*
+ * Some receivers fail to train on first try and are good
+ * on subsequent tries. 2 retries should be plenty. If we
+ * don't have a successful training then we don't expect to
+ * ever get one.
+ */
+ LINK_TRAINING_MAX_VERIFY_RETRY = 2,
+ PEAK_FACTOR_X1000 = 1006,
};
bool dp_verify_link_cap(
@@ -182,4 +190,5 @@ enum dc_status dpcd_configure_lttpr_mode(
struct link_training_settings *lt_settings);
enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings);
+bool dp_retrieve_lttpr_cap(struct dc_link *link);
#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 90dbe26bf954..a17e5de3b100 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -91,9 +91,7 @@ struct clk_limit_table_entry {
unsigned int dispclk_mhz;
unsigned int dppclk_mhz;
unsigned int phyclk_mhz;
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
unsigned int wck_ratio;
-#endif
};
/* This table is contiguous */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index fc11f8e1ab59..0afa2364a986 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -29,7 +29,6 @@
#include "dc_types.h"
#include "hw_shared.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
enum phyd32clk_clock_source {
PHYD32CLKA,
PHYD32CLKB,
@@ -55,18 +54,15 @@ enum dentist_dispclk_change_mode {
DISPCLK_CHANGE_MODE_IMMEDIATE,
DISPCLK_CHANGE_MODE_RAMPING,
};
-#endif
struct dccg {
struct dc_context *ctx;
const struct dccg_funcs *funcs;
int pipe_dppclk_khz[MAX_PIPES];
int ref_dppclk;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
int dtbclk_khz[MAX_PIPES];
int audio_dtbclk_khz;
int ref_dtbclk_khz;
-#endif
};
struct dccg_funcs {
@@ -83,7 +79,6 @@ struct dccg_funcs {
void (*otg_drop_pixel)(struct dccg *dccg,
uint32_t otg_inst);
void (*dccg_init)(struct dccg *dccg);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
void (*set_physymclk)(
struct dccg *dccg,
@@ -105,7 +100,6 @@ struct dccg_funcs {
void (*set_dispclk_change_mode)(
struct dccg *dccg,
enum dentist_dispclk_change_mode change_mode);
-#endif
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index d2611b865695..0638b337f143 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -152,11 +152,9 @@ struct hubbub_funcs {
void (*force_pstate_change_control)(struct hubbub *hubbub, bool force, bool allow);
void (*init_watermarks)(struct hubbub *hubbub);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
void (*program_det_size)(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_in_kbyte);
void (*program_compbuf_size)(struct hubbub *hubbub, unsigned compbuf_size_kb, bool safe_to_increase);
void (*init_crb)(struct hubbub *hubbub);
-#endif
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index f643fe3ed064..9eaf345aa2a1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -127,13 +127,11 @@ struct link_enc_state {
};
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
- enum encoder_type_select {
- ENCODER_TYPE_DIG = 0,
- ENCODER_TYPE_HDMI_FRL = 1,
- ENCODER_TYPE_DP_128B132B = 2
- };
-#endif
+enum encoder_type_select {
+ ENCODER_TYPE_DIG = 0,
+ ENCODER_TYPE_HDMI_FRL = 1,
+ ENCODER_TYPE_DP_128B132B = 2
+};
struct link_encoder_funcs {
void (*read_state)(
@@ -193,12 +191,10 @@ struct link_encoder_funcs {
enum signal_type (*get_dig_mode)(
struct link_encoder *enc);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
void (*set_dio_phy_mux)(
struct link_encoder *enc,
enum encoder_type_select sel,
uint32_t hpo_inst);
-#endif
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index dd562dcd03b2..8798cfa11a4d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -33,10 +33,8 @@
struct dchub_init_data;
struct cstate_pstate_watermarks_st {
uint32_t cstate_exit_ns;
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
uint32_t cstate_exit_z8_ns;
uint32_t cstate_enter_plus_exit_z8_ns;
-#endif
uint32_t cstate_enter_plus_exit_ns;
uint32_t pstate_change_ns;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 47c7e4c3a51b..564ea6a727b0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -237,6 +237,9 @@ struct stream_encoder_funcs {
void (*dp_set_odm_combine)(
struct stream_encoder *enc,
bool odm_combine);
+
+ uint32_t (*get_fifo_cal_average_level)(
+ struct stream_encoder *enc);
};
#endif /* STREAM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 8f06e273e703..03f47f23fb65 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -173,9 +173,7 @@ struct timing_generator_funcs {
bool (*enable_crtc)(struct timing_generator *tg);
bool (*disable_crtc)(struct timing_generator *tg);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
bool (*immediate_disable_crtc)(struct timing_generator *tg);
-#endif
bool (*is_counter_moving)(struct timing_generator *tg);
void (*get_position)(struct timing_generator *tg,
struct crtc_position *position);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index e3ab4e43f673..5ab008e62b82 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -236,9 +236,7 @@ struct hw_sequencer_funcs {
const struct tg_color *solid_color,
int width, int height, int offset);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
void (*z10_restore)(struct dc *dc);
-#endif
void (*update_visual_confirm_color)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h
new file mode 100644
index 000000000000..d4d52ef1b165
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h
@@ -0,0 +1,18 @@
+#ifndef __LINK_DPCD_H__
+#define __LINK_DPCD_H__
+#include <inc/core_status.h>
+#include <dc_link.h>
+#include <inc/link_hwss.h>
+
+enum dc_status core_link_read_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size);
+
+enum dc_status core_link_write_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
index 33590a728fc5..fc1d289bb9fe 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -26,20 +26,6 @@
#ifndef __DC_LINK_HWSS_H__
#define __DC_LINK_HWSS_H__
-#include "inc/core_status.h"
-
-enum dc_status core_link_read_dpcd(
- struct dc_link *link,
- uint32_t address,
- uint8_t *data,
- uint32_t size);
-
-enum dc_status core_link_write_dpcd(
- struct dc_link *link,
- uint32_t address,
- const uint8_t *data,
- uint32_t size);
-
struct gpio *get_hpd_gpio(struct dc_bios *dcb,
struct graphics_object_id link_id,
struct gpio_service *gpio_service);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index ea4943c5d552..0d09181227c5 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -117,9 +117,7 @@ IRQ_DCN3_03 = irq_service_dcn303.o
AMD_DAL_IRQ_DCN3_03 = $(addprefix $(AMDDALPATH)/dc/irq/dcn303/,$(IRQ_DCN3_03))
AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN3_03)
-endif
-ifdef CONFIG_DRM_AMD_DC_DCN3_1
###############################################################################
# DCN 31
###############################################################################
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.h b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.h
index d6c845f6bfbf..67efbfe9b7aa 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.h
@@ -23,8 +23,6 @@
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
-
#ifndef __DAL_IRQ_SERVICE_DCN31_H__
#define __DAL_IRQ_SERVICE_DCN31_H__
@@ -34,4 +32,3 @@ struct irq_service *dal_irq_service_dcn31_create(
struct irq_service_init_data *init_data);
#endif /* __DAL_IRQ_SERVICE_DCN31_H__ */
-#endif /* CONFIG_DRM_AMD_DC_DCN3_1 */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index 5f245bde54ff..a2a4fbeb83f8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -119,7 +119,7 @@ bool dal_irq_service_set(
dal_irq_service_ack(irq_service, source);
- if (info->funcs->set)
+ if (info->funcs && info->funcs->set)
return info->funcs->set(irq_service, info, enable);
dal_irq_service_set_generic(irq_service, info, enable);
@@ -153,7 +153,7 @@ bool dal_irq_service_ack(
return false;
}
- if (info->funcs->ack)
+ if (info->funcs && info->funcs->ack)
return info->funcs->ack(irq_service, info);
dal_irq_service_ack_generic(irq_service, info);
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index 5f9346622301..1139b9eb9f6f 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -165,7 +165,7 @@ enum irq_type
};
#define DAL_VALID_IRQ_SRC_NUM(src) \
- ((src) <= DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
+ ((src) < DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
/* Number of Page Flip IRQ Sources. */
#define DAL_PFLIP_IRQ_SRC_NUM \
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 7634e8d94543..abbf7ae584c9 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -73,9 +73,7 @@ extern "C" {
/* Forward declarations */
struct dmub_srv;
struct dmub_srv_common_regs;
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
struct dmub_srv_dcn31_regs;
-#endif
struct dmcub_trace_buf_entry;
@@ -97,9 +95,7 @@ enum dmub_asic {
DMUB_ASIC_DCN301,
DMUB_ASIC_DCN302,
DMUB_ASIC_DCN303,
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
DMUB_ASIC_DCN31,
-#endif
DMUB_ASIC_MAX,
};
@@ -238,9 +234,32 @@ struct dmub_srv_hw_params {
uint32_t psp_version;
bool load_inst_const;
bool skip_panel_power_sequence;
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
bool disable_z10;
-#endif
+};
+
+/**
+ * struct dmub_diagnostic_data - Diagnostic data retrieved from DMCUB for
+ * debugging purposes, including logging, crash analysis, etc.
+ */
+struct dmub_diagnostic_data {
+ uint32_t dmcub_version;
+ uint32_t scratch[16];
+ uint32_t pc;
+ uint32_t undefined_address_fault_addr;
+ uint32_t inst_fetch_fault_addr;
+ uint32_t data_write_fault_addr;
+ uint32_t inbox1_rptr;
+ uint32_t inbox1_wptr;
+ uint32_t inbox1_size;
+ uint32_t inbox0_rptr;
+ uint32_t inbox0_wptr;
+ uint32_t inbox0_size;
+ uint8_t is_dmcub_enabled : 1;
+ uint8_t is_dmcub_soft_reset : 1;
+ uint8_t is_dmcub_secure_reset : 1;
+ uint8_t is_traceport_en : 1;
+ uint8_t is_cw0_enabled : 1;
+ uint8_t is_cw6_enabled : 1;
};
/**
@@ -335,6 +354,8 @@ struct dmub_srv_hw_funcs {
void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
uint32_t (*get_current_time)(struct dmub_srv *dmub);
+
+ void (*get_diagnostic_data)(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca);
};
/**
@@ -373,9 +394,7 @@ struct dmub_srv {
/* private: internal use only */
const struct dmub_srv_common_regs *regs;
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
const struct dmub_srv_dcn31_regs *regs_dcn31;
-#endif
struct dmub_srv_base_funcs funcs;
struct dmub_srv_hw_funcs hw_funcs;
@@ -685,6 +704,8 @@ enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry);
+bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index ff1f4ec1531e..7c4734f905d9 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -47,10 +47,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0xefd666c1
+#define DMUB_FW_VERSION_GIT_HASH 0xf3da2b656
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 69
+#define DMUB_FW_VERSION_REVISION 71
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
@@ -309,6 +309,7 @@ struct dmcub_trace_buf_entry {
* Current scratch register usage is as follows:
*
* SCRATCH0: FW Boot Status register
+ * SCRATCH5: LVTMA Status Register
* SCRATCH15: FW Boot Options register
*/
@@ -335,6 +336,21 @@ enum dmub_fw_boot_status_bit {
DMUB_FW_BOOT_STATUS_BIT_RESTORE_REQUIRED = (1 << 3), /**< 1 if driver should call restore */
};
+/* Register bit definition for SCRATCH5 */
+union dmub_lvtma_status {
+ struct {
+ uint32_t psp_ok : 1;
+ uint32_t edp_on : 1;
+ uint32_t reserved : 30;
+ } bits;
+ uint32_t all;
+};
+
+enum dmub_lvtma_status_bit {
+ DMUB_LVTMA_STATUS_BIT_PSP_OK = (1 << 0),
+ DMUB_LVTMA_STATUS_BIT_EDP_ON = (1 << 1),
+};
+
/**
* union dmub_fw_boot_options - Boot option definitions for SCRATCH15
*/
@@ -346,11 +362,7 @@ union dmub_fw_boot_options {
uint32_t skip_phy_access : 1; /**< 1 if PHY access should be skipped */
uint32_t disable_clk_gate: 1; /**< 1 if clock gating should be disabled */
uint32_t skip_phy_init_panel_sequence: 1; /**< 1 to skip panel init seq */
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
uint32_t z10_disable: 1; /**< 1 to disable z10 */
-#else
- uint32_t reserved_unreleased: 1; /**< reserved for an unreleased feature */
-#endif
uint32_t reserved : 25; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
@@ -615,7 +627,6 @@ enum dmub_cmd_type {
* Command type used for OUTBOX1 notification enable
*/
DMUB_CMD__OUTBOX1_ENABLE = 71,
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
/**
* Command type used for all idle optimization commands.
*/
@@ -628,7 +639,10 @@ enum dmub_cmd_type {
* Command type used for all panel control commands.
*/
DMUB_CMD__PANEL_CNTL = 74,
-#endif
+ /**
+ * Command type used for EDID CEA parsing
+ */
+ DMUB_CMD__EDID_CEA = 79,
/**
* Command type used for all VBIOS interface commands.
*/
@@ -834,8 +848,6 @@ struct dmub_rb_cmd_mall {
uint8_t reserved2; /**< Reserved bits */
};
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
-
/**
* enum dmub_cmd_idle_opt_type - Idle optimization command type.
*/
@@ -880,7 +892,7 @@ struct dmub_rb_cmd_clk_mgr_notify_clocks {
struct dmub_cmd_header header; /**< header */
struct dmub_clocks clocks; /**< clock data */
};
-#endif
+
/**
* struct dmub_cmd_digx_encoder_control_data - Encoder control data.
*/
@@ -2091,7 +2103,6 @@ struct dmub_rb_cmd_drr_update {
struct dmub_optc_state dmub_optc_state_req;
};
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
/**
* enum dmub_cmd_panel_cntl_type - Panel control command.
*/
@@ -2126,7 +2137,6 @@ struct dmub_rb_cmd_panel_cntl {
struct dmub_cmd_header header; /**< header */
struct dmub_cmd_panel_cntl_data data; /**< payload */
};
-#endif
/**
* Data passed from driver to FW in a DMUB_CMD__VBIOS_LVTMA_CONTROL command.
@@ -2153,6 +2163,68 @@ struct dmub_rb_cmd_lvtma_control {
};
/**
+ * Maximum number of bytes a chunk sent to DMUB for parsing
+ */
+#define DMUB_EDID_CEA_DATA_CHUNK_BYTES 8
+
+/**
+ * Represent a chunk of CEA blocks sent to DMUB for parsing
+ */
+struct dmub_cmd_send_edid_cea {
+ uint16_t offset; /**< offset into the CEA block */
+ uint8_t length; /**< number of bytes in payload to copy as part of CEA block */
+ uint16_t total_length; /**< total length of the CEA block */
+ uint8_t payload[DMUB_EDID_CEA_DATA_CHUNK_BYTES]; /**< data chunk of the CEA block */
+ uint8_t pad[3]; /**< padding and for future expansion */
+};
+
+/**
+ * Result of VSDB parsing from CEA block
+ */
+struct dmub_cmd_edid_cea_amd_vsdb {
+ uint8_t vsdb_found; /**< 1 if parsing has found valid AMD VSDB */
+ uint8_t freesync_supported; /**< 1 if Freesync is supported */
+ uint16_t amd_vsdb_version; /**< AMD VSDB version */
+ uint16_t min_frame_rate; /**< Maximum frame rate */
+ uint16_t max_frame_rate; /**< Minimum frame rate */
+};
+
+/**
+ * Result of sending a CEA chunk
+ */
+struct dmub_cmd_edid_cea_ack {
+ uint16_t offset; /**< offset of the chunk into the CEA block */
+ uint8_t success; /**< 1 if this sending of chunk succeeded */
+ uint8_t pad; /**< padding and for future expansion */
+};
+
+/**
+ * Specify whether the result is an ACK/NACK or the parsing has finished
+ */
+enum dmub_cmd_edid_cea_reply_type {
+ DMUB_CMD__EDID_CEA_AMD_VSDB = 1, /**< VSDB parsing has finished */
+ DMUB_CMD__EDID_CEA_ACK = 2, /**< acknowledges the CEA sending is OK or failing */
+};
+
+/**
+ * Definition of a DMUB_CMD__EDID_CEA command.
+ */
+struct dmub_rb_cmd_edid_cea {
+ struct dmub_cmd_header header; /**< Command header */
+ union dmub_cmd_edid_cea_data {
+ struct dmub_cmd_send_edid_cea input; /**< input to send CEA chunks */
+ struct dmub_cmd_edid_cea_output { /**< output with results */
+ uint8_t type; /**< dmub_cmd_edid_cea_reply_type */
+ union {
+ struct dmub_cmd_edid_cea_amd_vsdb amd_vsdb;
+ struct dmub_cmd_edid_cea_ack ack;
+ };
+ } output; /**< output to retrieve ACK/NACK or VSDB parsing results */
+ } data; /**< Command data */
+
+};
+
+/**
* union dmub_rb_cmd - DMUB inbox command.
*/
union dmub_rb_cmd {
@@ -2225,7 +2297,6 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__MALL command.
*/
struct dmub_rb_cmd_mall mall;
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
/**
* Definition of a DMUB_CMD__IDLE_OPT_DCN_RESTORE command.
*/
@@ -2240,7 +2311,6 @@ union dmub_rb_cmd {
* Definition of DMUB_CMD__PANEL_CNTL commands.
*/
struct dmub_rb_cmd_panel_cntl panel_cntl;
-#endif
/**
* Definition of a DMUB_CMD__ABM_SET_PIPE command.
*/
@@ -2290,6 +2360,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__VBIOS_LVTMA_CONTROL command.
*/
struct dmub_rb_cmd_lvtma_control lvtma_control;
+ /**
+ * Definition of a DMUB_CMD__EDID_CEA command.
+ */
+ struct dmub_rb_cmd_edid_cea edid_cea;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile
index 80b9fe225208..0495bcdd3463 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/Makefile
+++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile
@@ -21,12 +21,8 @@
#
DMUB = dmub_srv.o dmub_srv_stat.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o
-DMUB += dmub_dcn30.o dmub_dcn301.o
-DMUB += dmub_dcn302.o
-DMUB += dmub_dcn303.o
-ifdef CONFIG_DRM_AMD_DC_DCN3_1
+DMUB += dmub_dcn30.o dmub_dcn301.o dmub_dcn302.o dmub_dcn303.o
DMUB += dmub_dcn31.o
-endif
AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB))
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index 8cdc1c75394e..a6540e27044d 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -40,7 +40,10 @@
const struct dmub_srv_common_regs dmub_srv_dcn20_regs = {
#define DMUB_SR(reg) REG_OFFSET(reg),
- { DMUB_COMMON_REGS() },
+ {
+ DMUB_COMMON_REGS()
+ DMCUB_INTERNAL_REGS()
+ },
#undef DMUB_SR
#define DMUB_SF(reg, field) FD_MASK(reg, field),
@@ -404,3 +407,63 @@ uint32_t dmub_dcn20_get_current_time(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_TIMER_CURRENT);
}
+
+void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+{
+ uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
+ uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+
+ if (!dmub || !diag_data)
+ return;
+
+ memset(diag_data, 0, sizeof(*diag_data));
+
+ diag_data->dmcub_version = dmub->fw_version;
+
+ diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
+
+ diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
+ diag_data->is_dmcub_enabled = is_dmub_enabled;
+
+ REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &is_soft_reset);
+ diag_data->is_dmcub_soft_reset = is_soft_reset;
+
+ REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
+ diag_data->is_dmcub_secure_reset = is_sec_reset;
+
+ REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
+ diag_data->is_traceport_en = is_traceport_enabled;
+
+ REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
+ diag_data->is_cw0_enabled = is_cw0_enabled;
+
+ REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
+ diag_data->is_cw6_enabled = is_cw6_enabled;
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index f772f8b348ea..c2e5831ac52c 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -36,6 +36,9 @@ struct dmub_srv;
DMUB_SR(DMCUB_CNTL) \
DMUB_SR(DMCUB_MEM_CNTL) \
DMUB_SR(DMCUB_SEC_CNTL) \
+ DMUB_SR(DMCUB_INBOX0_SIZE) \
+ DMUB_SR(DMCUB_INBOX0_RPTR) \
+ DMUB_SR(DMCUB_INBOX0_WPTR) \
DMUB_SR(DMCUB_INBOX1_BASE_ADDRESS) \
DMUB_SR(DMCUB_INBOX1_SIZE) \
DMUB_SR(DMCUB_INBOX1_RPTR) \
@@ -108,7 +111,12 @@ struct dmub_srv;
DMUB_SR(DCN_VM_FB_LOCATION_BASE) \
DMUB_SR(DCN_VM_FB_OFFSET) \
DMUB_SR(DMCUB_INTERRUPT_ACK) \
- DMUB_SR(DMCUB_TIMER_CURRENT)
+ DMUB_SR(DMCUB_TIMER_CURRENT) \
+ DMUB_SR(DMCUB_INST_FETCH_FAULT_ADDR) \
+ DMUB_SR(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR) \
+ DMUB_SR(DMCUB_DATA_WRITE_FAULT_ADDR)
+
+#define DMCUB_INTERNAL_REGS()
#define DMUB_COMMON_FIELDS() \
DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \
@@ -118,6 +126,7 @@ struct dmub_srv;
DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_WRITE_SPACE) \
DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET) \
DMUB_SF(DMCUB_SEC_CNTL, DMCUB_MEM_UNIT_ID) \
+ DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS) \
DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_TOP_ADDRESS) \
DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE) \
DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_TOP_ADDRESS) \
@@ -147,6 +156,7 @@ struct dmub_srv;
struct dmub_srv_common_reg_offset {
#define DMUB_SR(reg) uint32_t reg;
DMUB_COMMON_REGS()
+ DMCUB_INTERNAL_REGS()
#undef DMUB_SR
};
@@ -234,4 +244,6 @@ bool dmub_dcn20_use_cached_trace_buffer(struct dmub_srv *dmub);
uint32_t dmub_dcn20_get_current_time(struct dmub_srv *dmub);
+void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca);
+
#endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
index 1cf67b3e4771..51bb9bceb1b1 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
@@ -39,7 +39,10 @@
const struct dmub_srv_common_regs dmub_srv_dcn21_regs = {
#define DMUB_SR(reg) REG_OFFSET(reg),
- { DMUB_COMMON_REGS() },
+ {
+ DMUB_COMMON_REGS()
+ DMCUB_INTERNAL_REGS()
+ },
#undef DMUB_SR
#define DMUB_SF(reg, field) FD_MASK(reg, field),
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
index fb11c8d39208..81dae75e9ff8 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
@@ -40,7 +40,10 @@
const struct dmub_srv_common_regs dmub_srv_dcn30_regs = {
#define DMUB_SR(reg) REG_OFFSET(reg),
- { DMUB_COMMON_REGS() },
+ {
+ DMUB_COMMON_REGS()
+ DMCUB_INTERNAL_REGS()
+ },
#undef DMUB_SR
#define DMUB_SF(reg, field) FD_MASK(reg, field),
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn301.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn301.c
index 197398257692..84c3ab3becc5 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn301.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn301.c
@@ -39,7 +39,10 @@
const struct dmub_srv_common_regs dmub_srv_dcn301_regs = {
#define DMUB_SR(reg) REG_OFFSET(reg),
- { DMUB_COMMON_REGS() },
+ {
+ DMUB_COMMON_REGS()
+ DMCUB_INTERNAL_REGS()
+ },
#undef DMUB_SR
#define DMUB_SF(reg, field) FD_MASK(reg, field),
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn302.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn302.c
index f5db4437a882..8c02575e168d 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn302.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn302.c
@@ -39,7 +39,10 @@
const struct dmub_srv_common_regs dmub_srv_dcn302_regs = {
#define DMUB_SR(reg) REG_OFFSET(reg),
- { DMUB_COMMON_REGS() },
+ {
+ DMUB_COMMON_REGS()
+ DMCUB_INTERNAL_REGS()
+ },
#undef DMUB_SR
#define DMUB_SF(reg, field) FD_MASK(reg, field),
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c
index 9331e1719901..b42369984473 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c
@@ -21,7 +21,10 @@
const struct dmub_srv_common_regs dmub_srv_dcn303_regs = {
#define DMUB_SR(reg) REG_OFFSET(reg),
- { DMUB_COMMON_REGS() },
+ {
+ DMUB_COMMON_REGS()
+ DMCUB_INTERNAL_REGS()
+ },
#undef DMUB_SR
#define DMUB_SF(reg, field) FD_MASK(reg, field),
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 8c886ece71f6..973de346410d 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -352,3 +352,63 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_TIMER_CURRENT);
}
+
+void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+{
+ uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
+ uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+
+ if (!dmub || !diag_data)
+ return;
+
+ memset(diag_data, 0, sizeof(*diag_data));
+
+ diag_data->dmcub_version = dmub->fw_version;
+
+ diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
+
+ diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
+ diag_data->is_dmcub_enabled = is_dmub_enabled;
+
+ REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
+ diag_data->is_dmcub_soft_reset = is_soft_reset;
+
+ REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
+ diag_data->is_dmcub_secure_reset = is_sec_reset;
+
+ REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
+ diag_data->is_traceport_en = is_traceport_enabled;
+
+ REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
+ diag_data->is_cw0_enabled = is_cw0_enabled;
+
+ REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
+ diag_data->is_cw6_enabled = is_cw6_enabled;
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
index 2829c3e9a310..9456a6a2d518 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
@@ -36,6 +36,9 @@ struct dmub_srv;
DMUB_SR(DMCUB_CNTL) \
DMUB_SR(DMCUB_CNTL2) \
DMUB_SR(DMCUB_SEC_CNTL) \
+ DMUB_SR(DMCUB_INBOX0_SIZE) \
+ DMUB_SR(DMCUB_INBOX0_RPTR) \
+ DMUB_SR(DMCUB_INBOX0_WPTR) \
DMUB_SR(DMCUB_INBOX1_BASE_ADDRESS) \
DMUB_SR(DMCUB_INBOX1_SIZE) \
DMUB_SR(DMCUB_INBOX1_RPTR) \
@@ -103,11 +106,15 @@ struct dmub_srv;
DMUB_SR(DMCUB_SCRATCH14) \
DMUB_SR(DMCUB_SCRATCH15) \
DMUB_SR(DMCUB_GPINT_DATAIN1) \
+ DMUB_SR(DMCUB_GPINT_DATAOUT) \
DMUB_SR(CC_DC_PIPE_DIS) \
DMUB_SR(MMHUBBUB_SOFT_RESET) \
DMUB_SR(DCN_VM_FB_LOCATION_BASE) \
DMUB_SR(DCN_VM_FB_OFFSET) \
- DMUB_SR(DMCUB_TIMER_CURRENT)
+ DMUB_SR(DMCUB_TIMER_CURRENT) \
+ DMUB_SR(DMCUB_INST_FETCH_FAULT_ADDR) \
+ DMUB_SR(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR) \
+ DMUB_SR(DMCUB_DATA_WRITE_FAULT_ADDR)
#define DMUB_DCN31_FIELDS() \
DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \
@@ -115,6 +122,7 @@ struct dmub_srv;
DMUB_SF(DMCUB_CNTL2, DMCUB_SOFT_RESET) \
DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET) \
DMUB_SF(DMCUB_SEC_CNTL, DMCUB_MEM_UNIT_ID) \
+ DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS) \
DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_TOP_ADDRESS) \
DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE) \
DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_TOP_ADDRESS) \
@@ -138,11 +146,13 @@ struct dmub_srv;
DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
- DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET)
+ DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) \
+ DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR)
struct dmub_srv_dcn31_reg_offset {
#define DMUB_SR(reg) uint32_t reg;
DMUB_DCN31_REGS()
+ DMCUB_INTERNAL_REGS()
#undef DMUB_SR
};
@@ -227,4 +237,6 @@ void dmub_dcn31_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset);
uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub);
+void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+
#endif /* _DMUB_DCN31_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 681500f42c91..2bdbd7406f56 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -31,9 +31,7 @@
#include "dmub_dcn301.h"
#include "dmub_dcn302.h"
#include "dmub_dcn303.h"
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
#include "dmub_dcn31.h"
-#endif
#include "os_types.h"
/*
* Note: the DMUB service is standalone. No additional headers should be
@@ -176,6 +174,8 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_outbox0_wptr = dmub_dcn20_get_outbox0_wptr;
funcs->set_outbox0_rptr = dmub_dcn20_set_outbox0_rptr;
+ funcs->get_diagnostic_data = dmub_dcn20_get_diagnostic_data;
+
if (asic == DMUB_ASIC_DCN21) {
dmub->regs = &dmub_srv_dcn21_regs;
@@ -206,9 +206,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->setup_windows = dmub_dcn30_setup_windows;
}
break;
-#ifdef CONFIG_DRM_AMD_DC_DCN3_1
case DMUB_ASIC_DCN31:
+ dmub->regs_dcn31 = &dmub_srv_dcn31_regs;
funcs->reset = dmub_dcn31_reset;
funcs->reset_release = dmub_dcn31_reset_release;
funcs->backdoor_load = dmub_dcn31_backdoor_load;
@@ -232,14 +232,11 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_outbox0_wptr = dmub_dcn31_get_outbox0_wptr;
funcs->set_outbox0_rptr = dmub_dcn31_set_outbox0_rptr;
- if (asic == DMUB_ASIC_DCN31) {
- dmub->regs_dcn31 = &dmub_srv_dcn31_regs;
- }
+ funcs->get_diagnostic_data = dmub_dcn31_get_diagnostic_data;
funcs->get_current_time = dmub_dcn31_get_current_time;
break;
-#endif
default:
return false;
@@ -794,3 +791,11 @@ bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entr
return dmub_rb_out_trace_buffer_front(&dmub->outbox0_rb, (void *)entry);
}
+
+bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+{
+ if (!dmub || !dmub->hw_funcs.get_diagnostic_data || !diag_data)
+ return false;
+ dmub->hw_funcs.get_diagnostic_data(dmub, diag_data);
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index d615a8e00f8c..381c17caace1 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -224,7 +224,6 @@ enum {
#define ASICREV_IS_GREEN_SARDINE(eChipRev) ((eChipRev >= GREEN_SARDINE_A0) && (eChipRev < 0xFF))
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
#define FAMILY_YELLOW_CARP 146
#define YELLOW_CARP_A0 0x01
@@ -234,7 +233,6 @@ enum {
#ifndef ASICREV_IS_YELLOW_CARP
#define ASICREV_IS_YELLOW_CARP(eChipRev) ((eChipRev >= YELLOW_CARP_A0) && (eChipRev < YELLOW_CARP_UNKNOWN))
#endif
-#endif
/*
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index 59453ced9ece..fe75ec834892 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -55,9 +55,7 @@ enum dce_version {
DCN_VERSION_3_01,
DCN_VERSION_3_02,
DCN_VERSION_3_03,
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
DCN_VERSION_3_1,
-#endif
DCN_VERSION_MAX
};
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
index 62dd89cf70bb..1a0f7c3dc964 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -172,10 +172,8 @@ char *mod_hdcp_status_to_str(int32_t status)
return "MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE";
case MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE:
return "MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE";
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
case MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE:
return "MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE";
-#endif
default:
return "MOD_HDCP_STATUS_UNKNOWN";
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index fc88fe249a50..1b02056bc8bd 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -44,13 +44,9 @@ static void hdcp2_message_init(struct mod_hdcp *hdcp,
in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
in->process.msg3_desc.msg_size = 0;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+
static enum mod_hdcp_status mod_hdcp_remove_display_from_topology_v2(
struct mod_hdcp *hdcp, uint8_t index)
-#else
-enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
- struct mod_hdcp *hdcp, uint8_t index)
-#endif
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
@@ -84,7 +80,7 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
mutex_unlock(&psp->dtm_context.mutex);
return status;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+
static enum mod_hdcp_status mod_hdcp_remove_display_from_topology_v3(
struct mod_hdcp *hdcp, uint8_t index)
{
@@ -136,14 +132,9 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
return status;
}
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+
static enum mod_hdcp_status mod_hdcp_add_display_to_topology_v2(
struct mod_hdcp *hdcp, struct mod_hdcp_display *display)
-#else
-enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
- struct mod_hdcp_display *display)
-#endif
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
@@ -189,7 +180,6 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
return status;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
static enum mod_hdcp_status mod_hdcp_add_display_to_topology_v3(
struct mod_hdcp *hdcp, struct mod_hdcp_display *display)
{
@@ -254,7 +244,7 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
return status;
}
-#endif
+
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
{
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
index b0b2544eaa9d..2937b4b61461 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
@@ -44,12 +44,8 @@ enum bgd_security_hdcp2_content_type {
enum ta_dtm_command {
TA_DTM_COMMAND__UNUSED_1 = 1,
TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2,
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE,
TA_DTM_COMMAND__TOPOLOGY_UPDATE_V3
-#else
- TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE
-#endif
};
/* DTM related enumerations */
@@ -91,7 +87,6 @@ struct ta_dtm_topology_update_input_v2 {
uint32_t max_hdcp_supported_version;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
/* For security reason/HW may change value, these encoder type enum values are not HW register values */
/* Security code will check real HW register values and these SW enum values */
enum ta_dtm_encoder_type {
@@ -117,7 +112,7 @@ struct ta_dtm_topology_update_input_v3 {
uint32_t phy_id;
uint32_t link_hdcp_cap;
};
-#endif
+
struct ta_dtm_topology_assr_enable {
uint32_t display_topology_dig_be_index;
};
@@ -131,9 +126,7 @@ struct ta_dtm_topology_assr_enable {
union ta_dtm_cmd_input {
struct ta_dtm_topology_update_input_v2 topology_update_v2;
struct ta_dtm_topology_assr_enable topology_assr_enable;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
struct ta_dtm_topology_update_input_v3 topology_update_v3;
-#endif
};
union ta_dtm_cmd_output {
@@ -313,10 +306,8 @@ enum ta_hdcp2_version {
TA_HDCP2_VERSION_UNKNOWN = 0,
TA_HDCP2_VERSION_2_0 = 20,
TA_HDCP2_VERSION_2_1 = 21,
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
+ TA_HDCP2_VERSION_2_2 = 22,
TA_HDCP2_VERSION_2_3 = 23,
-#endif
- TA_HDCP2_VERSION_2_2 = 22
};
/* input/output structures for HDCP commands */
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index 2197c269e0a7..c590493fd293 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -97,9 +97,7 @@ enum mod_hdcp_status {
MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST,
MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE,
MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE,
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE,
-#endif
};
struct mod_hdcp_displayport {
@@ -123,13 +121,11 @@ enum mod_hdcp_display_state {
MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
};
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
struct mod_hdcp_psp_caps {
uint8_t dtm_v3_supported;
uint8_t opm_state_query_supported;
};
-#endif
enum mod_hdcp_display_disable_option {
MOD_HDCP_DISPLAY_NOT_DISABLE = 0,
MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION,
@@ -162,9 +158,7 @@ struct mod_hdcp_ddc {
struct mod_hdcp_psp {
void *handle;
void *funcs;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
struct mod_hdcp_psp_caps caps;
-#endif
};
struct mod_hdcp_display_adjustment {
@@ -240,9 +234,7 @@ struct mod_hdcp_display {
uint8_t index;
uint8_t controller;
uint8_t dig_fe;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
uint8_t stream_enc_idx;
-#endif
union {
uint8_t vc_id;
};
@@ -255,11 +247,9 @@ struct mod_hdcp_link {
enum mod_hdcp_operation_mode mode;
uint8_t dig_be;
uint8_t ddc_line;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
uint8_t link_enc_idx;
uint8_t phy_idx;
uint8_t hdcp_supported_informational;
-#endif
union {
struct mod_hdcp_displayport dp;
struct mod_hdcp_hdmi hdmi;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 5e7331be1c0d..2b00f334e93d 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -87,19 +87,19 @@ struct abm_parameters {
};
static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = {
-// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blStart blRed
- {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0, 0xCCCC, 0xCCCC},
- {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf, 0xCCCC, 0xCCCC},
- {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0, 0xCCCC, 0xCCCC},
- {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
+// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blRed blStart
+ {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0, 0xf777, 0xcccc},
+ {0xde, 0x85, 0x20, 0x00, 0xe0, 0x90, 0xa8, 0x40, 0xc8, 0xf777, 0xcccc},
+ {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0, 0xeeee, 0x9999},
+ {0x82, 0x40, 0x20, 0x00, 0x00, 0xb8, 0xb3, 0x70, 0x70, 0xe333, 0xb333},
};
static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = {
-// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blStart blRed
- {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
- {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
- {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
- {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
+// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blRed blStart
+ {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc},
+ {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc},
+ {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc},
+ {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc},
};
static const struct abm_parameters * const abm_settings[] = {
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 332b0df53e52..ff1d3d4a6488 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -223,10 +223,12 @@ enum amd_harvest_ip_mask {
};
enum DC_FEATURE_MASK {
- DC_FBC_MASK = 0x1,
- DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2,
- DC_DISABLE_FRACTIONAL_PWM_MASK = 0x4,
- DC_PSR_MASK = 0x8,
+ //Default value can be found at "uint amdgpu_dc_feature_mask"
+ DC_FBC_MASK = (1 << 0), //0x1, disabled by default
+ DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), //0x2, enabled by default
+ DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default
+ DC_PSR_MASK = (1 << 3), //0x8, disabled by default
+ DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default
};
enum DC_DEBUG_MASK {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_1_sh_mask.h
index 2a622c13817b..59155007c186 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_1_sh_mask.h
@@ -29292,6 +29292,7 @@
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
@@ -34431,6 +34432,7 @@
#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8
#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a
#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
index 7b2e5833e345..e5fd0121ceff 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
@@ -33869,6 +33869,7 @@
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
@@ -33879,6 +33880,7 @@
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h
deleted file mode 100644
index dfacc6b5d89d..000000000000
--- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- */
-#ifndef _mp_13_0_1_OFFSET_HEADER
-#define _mp_13_0_1_OFFSET_HEADER
-
-
-
-// addressBlock: mp_SmuMp0_SmnDec
-// base address: 0x0
-#define regMP0_SMN_C2PMSG_32 0x0060
-#define regMP0_SMN_C2PMSG_32_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_33 0x0061
-#define regMP0_SMN_C2PMSG_33_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_34 0x0062
-#define regMP0_SMN_C2PMSG_34_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_35 0x0063
-#define regMP0_SMN_C2PMSG_35_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_36 0x0064
-#define regMP0_SMN_C2PMSG_36_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_37 0x0065
-#define regMP0_SMN_C2PMSG_37_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_38 0x0066
-#define regMP0_SMN_C2PMSG_38_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_39 0x0067
-#define regMP0_SMN_C2PMSG_39_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_40 0x0068
-#define regMP0_SMN_C2PMSG_40_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_41 0x0069
-#define regMP0_SMN_C2PMSG_41_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_42 0x006a
-#define regMP0_SMN_C2PMSG_42_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_43 0x006b
-#define regMP0_SMN_C2PMSG_43_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_44 0x006c
-#define regMP0_SMN_C2PMSG_44_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_45 0x006d
-#define regMP0_SMN_C2PMSG_45_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_46 0x006e
-#define regMP0_SMN_C2PMSG_46_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_47 0x006f
-#define regMP0_SMN_C2PMSG_47_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_48 0x0070
-#define regMP0_SMN_C2PMSG_48_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_49 0x0071
-#define regMP0_SMN_C2PMSG_49_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_50 0x0072
-#define regMP0_SMN_C2PMSG_50_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_51 0x0073
-#define regMP0_SMN_C2PMSG_51_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_52 0x0074
-#define regMP0_SMN_C2PMSG_52_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_53 0x0075
-#define regMP0_SMN_C2PMSG_53_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_54 0x0076
-#define regMP0_SMN_C2PMSG_54_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_55 0x0077
-#define regMP0_SMN_C2PMSG_55_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_56 0x0078
-#define regMP0_SMN_C2PMSG_56_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_57 0x0079
-#define regMP0_SMN_C2PMSG_57_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_58 0x007a
-#define regMP0_SMN_C2PMSG_58_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_59 0x007b
-#define regMP0_SMN_C2PMSG_59_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_60 0x007c
-#define regMP0_SMN_C2PMSG_60_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_61 0x007d
-#define regMP0_SMN_C2PMSG_61_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_62 0x007e
-#define regMP0_SMN_C2PMSG_62_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_63 0x007f
-#define regMP0_SMN_C2PMSG_63_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_64 0x0080
-#define regMP0_SMN_C2PMSG_64_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_65 0x0081
-#define regMP0_SMN_C2PMSG_65_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_66 0x0082
-#define regMP0_SMN_C2PMSG_66_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_67 0x0083
-#define regMP0_SMN_C2PMSG_67_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_68 0x0084
-#define regMP0_SMN_C2PMSG_68_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_69 0x0085
-#define regMP0_SMN_C2PMSG_69_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_70 0x0086
-#define regMP0_SMN_C2PMSG_70_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_71 0x0087
-#define regMP0_SMN_C2PMSG_71_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_72 0x0088
-#define regMP0_SMN_C2PMSG_72_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_73 0x0089
-#define regMP0_SMN_C2PMSG_73_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_74 0x008a
-#define regMP0_SMN_C2PMSG_74_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_75 0x008b
-#define regMP0_SMN_C2PMSG_75_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_76 0x008c
-#define regMP0_SMN_C2PMSG_76_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_77 0x008d
-#define regMP0_SMN_C2PMSG_77_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_78 0x008e
-#define regMP0_SMN_C2PMSG_78_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_79 0x008f
-#define regMP0_SMN_C2PMSG_79_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_80 0x0090
-#define regMP0_SMN_C2PMSG_80_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_81 0x0091
-#define regMP0_SMN_C2PMSG_81_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_82 0x0092
-#define regMP0_SMN_C2PMSG_82_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_83 0x0093
-#define regMP0_SMN_C2PMSG_83_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_84 0x0094
-#define regMP0_SMN_C2PMSG_84_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_85 0x0095
-#define regMP0_SMN_C2PMSG_85_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_86 0x0096
-#define regMP0_SMN_C2PMSG_86_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_87 0x0097
-#define regMP0_SMN_C2PMSG_87_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_88 0x0098
-#define regMP0_SMN_C2PMSG_88_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_89 0x0099
-#define regMP0_SMN_C2PMSG_89_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_90 0x009a
-#define regMP0_SMN_C2PMSG_90_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_91 0x009b
-#define regMP0_SMN_C2PMSG_91_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_92 0x009c
-#define regMP0_SMN_C2PMSG_92_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_93 0x009d
-#define regMP0_SMN_C2PMSG_93_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_94 0x009e
-#define regMP0_SMN_C2PMSG_94_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_95 0x009f
-#define regMP0_SMN_C2PMSG_95_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_96 0x00a0
-#define regMP0_SMN_C2PMSG_96_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_97 0x00a1
-#define regMP0_SMN_C2PMSG_97_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_98 0x00a2
-#define regMP0_SMN_C2PMSG_98_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_99 0x00a3
-#define regMP0_SMN_C2PMSG_99_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_100 0x00a4
-#define regMP0_SMN_C2PMSG_100_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_101 0x00a5
-#define regMP0_SMN_C2PMSG_101_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_102 0x00a6
-#define regMP0_SMN_C2PMSG_102_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_103 0x00a7
-#define regMP0_SMN_C2PMSG_103_BASE_IDX 0
-#define regMP0_SMN_IH_CREDIT 0x00c1
-#define regMP0_SMN_IH_CREDIT_BASE_IDX 0
-#define regMP0_SMN_IH_SW_INT 0x00c2
-#define regMP0_SMN_IH_SW_INT_BASE_IDX 0
-#define regMP0_SMN_IH_SW_INT_CTRL 0x00c3
-#define regMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0
-
-
-// addressBlock: mp_SmuMp1_SmnDec
-// base address: 0x0
-#define regMP1_SMN_C2PMSG_32 0x0260
-#define regMP1_SMN_C2PMSG_32_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_33 0x0261
-#define regMP1_SMN_C2PMSG_33_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_34 0x0262
-#define regMP1_SMN_C2PMSG_34_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_35 0x0263
-#define regMP1_SMN_C2PMSG_35_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_36 0x0264
-#define regMP1_SMN_C2PMSG_36_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_37 0x0265
-#define regMP1_SMN_C2PMSG_37_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_38 0x0266
-#define regMP1_SMN_C2PMSG_38_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_39 0x0267
-#define regMP1_SMN_C2PMSG_39_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_40 0x0268
-#define regMP1_SMN_C2PMSG_40_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_41 0x0269
-#define regMP1_SMN_C2PMSG_41_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_42 0x026a
-#define regMP1_SMN_C2PMSG_42_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_43 0x026b
-#define regMP1_SMN_C2PMSG_43_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_44 0x026c
-#define regMP1_SMN_C2PMSG_44_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_45 0x026d
-#define regMP1_SMN_C2PMSG_45_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_46 0x026e
-#define regMP1_SMN_C2PMSG_46_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_47 0x026f
-#define regMP1_SMN_C2PMSG_47_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_48 0x0270
-#define regMP1_SMN_C2PMSG_48_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_49 0x0271
-#define regMP1_SMN_C2PMSG_49_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_50 0x0272
-#define regMP1_SMN_C2PMSG_50_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_51 0x0273
-#define regMP1_SMN_C2PMSG_51_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_52 0x0274
-#define regMP1_SMN_C2PMSG_52_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_53 0x0275
-#define regMP1_SMN_C2PMSG_53_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_54 0x0276
-#define regMP1_SMN_C2PMSG_54_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_55 0x0277
-#define regMP1_SMN_C2PMSG_55_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_56 0x0278
-#define regMP1_SMN_C2PMSG_56_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_57 0x0279
-#define regMP1_SMN_C2PMSG_57_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_58 0x027a
-#define regMP1_SMN_C2PMSG_58_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_59 0x027b
-#define regMP1_SMN_C2PMSG_59_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_60 0x027c
-#define regMP1_SMN_C2PMSG_60_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_61 0x027d
-#define regMP1_SMN_C2PMSG_61_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_62 0x027e
-#define regMP1_SMN_C2PMSG_62_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_63 0x027f
-#define regMP1_SMN_C2PMSG_63_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_64 0x0280
-#define regMP1_SMN_C2PMSG_64_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_65 0x0281
-#define regMP1_SMN_C2PMSG_65_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_66 0x0282
-#define regMP1_SMN_C2PMSG_66_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_67 0x0283
-#define regMP1_SMN_C2PMSG_67_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_68 0x0284
-#define regMP1_SMN_C2PMSG_68_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_69 0x0285
-#define regMP1_SMN_C2PMSG_69_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_70 0x0286
-#define regMP1_SMN_C2PMSG_70_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_71 0x0287
-#define regMP1_SMN_C2PMSG_71_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_72 0x0288
-#define regMP1_SMN_C2PMSG_72_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_73 0x0289
-#define regMP1_SMN_C2PMSG_73_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_74 0x028a
-#define regMP1_SMN_C2PMSG_74_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_75 0x028b
-#define regMP1_SMN_C2PMSG_75_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_76 0x028c
-#define regMP1_SMN_C2PMSG_76_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_77 0x028d
-#define regMP1_SMN_C2PMSG_77_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_78 0x028e
-#define regMP1_SMN_C2PMSG_78_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_79 0x028f
-#define regMP1_SMN_C2PMSG_79_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_80 0x0290
-#define regMP1_SMN_C2PMSG_80_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_81 0x0291
-#define regMP1_SMN_C2PMSG_81_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_82 0x0292
-#define regMP1_SMN_C2PMSG_82_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_83 0x0293
-#define regMP1_SMN_C2PMSG_83_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_84 0x0294
-#define regMP1_SMN_C2PMSG_84_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_85 0x0295
-#define regMP1_SMN_C2PMSG_85_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_86 0x0296
-#define regMP1_SMN_C2PMSG_86_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_87 0x0297
-#define regMP1_SMN_C2PMSG_87_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_88 0x0298
-#define regMP1_SMN_C2PMSG_88_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_89 0x0299
-#define regMP1_SMN_C2PMSG_89_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_90 0x029a
-#define regMP1_SMN_C2PMSG_90_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_91 0x029b
-#define regMP1_SMN_C2PMSG_91_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_92 0x029c
-#define regMP1_SMN_C2PMSG_92_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_93 0x029d
-#define regMP1_SMN_C2PMSG_93_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_94 0x029e
-#define regMP1_SMN_C2PMSG_94_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_95 0x029f
-#define regMP1_SMN_C2PMSG_95_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_96 0x02a0
-#define regMP1_SMN_C2PMSG_96_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_97 0x02a1
-#define regMP1_SMN_C2PMSG_97_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_98 0x02a2
-#define regMP1_SMN_C2PMSG_98_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_99 0x02a3
-#define regMP1_SMN_C2PMSG_99_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_100 0x02a4
-#define regMP1_SMN_C2PMSG_100_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_101 0x02a5
-#define regMP1_SMN_C2PMSG_101_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_102 0x02a6
-#define regMP1_SMN_C2PMSG_102_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_103 0x02a7
-#define regMP1_SMN_C2PMSG_103_BASE_IDX 0
-#define regMP1_SMN_IH_CREDIT 0x02c1
-#define regMP1_SMN_IH_CREDIT_BASE_IDX 0
-#define regMP1_SMN_IH_SW_INT 0x02c2
-#define regMP1_SMN_IH_SW_INT_BASE_IDX 0
-#define regMP1_SMN_IH_SW_INT_CTRL 0x02c3
-#define regMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0
-#define regMP1_SMN_FPS_CNT 0x02c4
-#define regMP1_SMN_FPS_CNT_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH0 0x0340
-#define regMP1_SMN_EXT_SCRATCH0_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH1 0x0341
-#define regMP1_SMN_EXT_SCRATCH1_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH2 0x0342
-#define regMP1_SMN_EXT_SCRATCH2_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH3 0x0343
-#define regMP1_SMN_EXT_SCRATCH3_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH4 0x0344
-#define regMP1_SMN_EXT_SCRATCH4_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH5 0x0345
-#define regMP1_SMN_EXT_SCRATCH5_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH6 0x0346
-#define regMP1_SMN_EXT_SCRATCH6_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH7 0x0347
-#define regMP1_SMN_EXT_SCRATCH7_BASE_IDX 0
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h
deleted file mode 100644
index 2d5e8b58e693..000000000000
--- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h
+++ /dev/null
@@ -1,531 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- */
-#ifndef _mp_13_0_1_SH_MASK_HEADER
-#define _mp_13_0_1_SH_MASK_HEADER
-
-
-// addressBlock: mp_SmuMp0_SmnDec
-//MP0_SMN_C2PMSG_32
-#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_33
-#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_34
-#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_35
-#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_36
-#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_37
-#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_38
-#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_39
-#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_40
-#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_41
-#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_42
-#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_43
-#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_44
-#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_45
-#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_46
-#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_47
-#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_48
-#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_49
-#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_50
-#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_51
-#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_52
-#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_53
-#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_54
-#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_55
-#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_56
-#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_57
-#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_58
-#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_59
-#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_60
-#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_61
-#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_62
-#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_63
-#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_64
-#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_65
-#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_66
-#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_67
-#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_68
-#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_69
-#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_70
-#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_71
-#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_72
-#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_73
-#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_74
-#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_75
-#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_76
-#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_77
-#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_78
-#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_79
-#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_80
-#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_81
-#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_82
-#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_83
-#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_84
-#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_85
-#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_86
-#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_87
-#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_88
-#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_89
-#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_90
-#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_91
-#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_92
-#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_93
-#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_94
-#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_95
-#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_96
-#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_97
-#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_98
-#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_99
-#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_100
-#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_101
-#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_102
-#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_103
-#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_IH_CREDIT
-#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
-#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
-#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
-#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
-//MP0_SMN_IH_SW_INT
-#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0
-#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8
-#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL
-#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L
-//MP0_SMN_IH_SW_INT_CTRL
-#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
-#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
-#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
-#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
-
-
-// addressBlock: mp_SmuMp1Pub_CruDec
-//MP1_FIRMWARE_FLAGS
-#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
-#define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
-#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L
-#define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL
-
-
-// addressBlock: mp_SmuMp1_SmnDec
-//MP1_SMN_C2PMSG_32
-#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_33
-#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_34
-#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_35
-#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_36
-#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_37
-#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_38
-#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_39
-#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_40
-#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_41
-#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_42
-#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_43
-#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_44
-#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_45
-#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_46
-#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_47
-#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_48
-#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_49
-#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_50
-#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_51
-#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_52
-#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_53
-#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_54
-#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_55
-#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_56
-#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_57
-#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_58
-#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_59
-#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_60
-#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_61
-#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_62
-#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_63
-#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_64
-#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_65
-#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_66
-#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_67
-#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_68
-#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_69
-#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_70
-#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_71
-#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_72
-#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_73
-#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_74
-#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_75
-#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_76
-#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_77
-#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_78
-#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_79
-#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_80
-#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_81
-#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_82
-#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_83
-#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_84
-#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_85
-#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_86
-#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_87
-#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_88
-#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_89
-#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_90
-#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_91
-#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_92
-#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_93
-#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_94
-#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_95
-#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_96
-#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_97
-#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_98
-#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_99
-#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_100
-#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_101
-#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_102
-#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_103
-#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_IH_CREDIT
-#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
-#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
-#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
-#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
-//MP1_SMN_IH_SW_INT
-#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0
-#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8
-#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL
-#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L
-//MP1_SMN_IH_SW_INT_CTRL
-#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
-#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
-#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
-#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
-//MP1_SMN_FPS_CNT
-#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
-#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH0
-#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH1
-#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH2
-#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH3
-#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH4
-#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH5
-#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH6
-#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH7
-#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 9f73a2f586d8..e38b191c7b7c 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -560,7 +560,7 @@ struct gpu_metrics_v1_2 {
uint16_t current_vclk1;
uint16_t current_dclk1;
- /* Throttle status */
+ /* Throttle status (ASIC dependent) */
uint32_t throttle_status;
/* Fans */
@@ -648,6 +648,9 @@ struct gpu_metrics_v1_3 {
uint16_t voltage_mem;
uint16_t padding1;
+
+ /* Throttle status (ASIC independent) */
+ uint64_t indep_throttle_status;
};
/*
@@ -754,4 +757,57 @@ struct gpu_metrics_v2_1 {
uint16_t padding[3];
};
+struct gpu_metrics_v2_2 {
+ struct metrics_table_header common_header;
+
+ /* Temperature */
+ uint16_t temperature_gfx; // gfx temperature on APUs
+ uint16_t temperature_soc; // soc temperature on APUs
+ uint16_t temperature_core[8]; // CPU core temperature on APUs
+ uint16_t temperature_l3[2];
+
+ /* Utilization */
+ uint16_t average_gfx_activity;
+ uint16_t average_mm_activity; // UVD or VCN
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Power/Energy */
+ uint16_t average_socket_power; // dGPU + APU power on A + A platform
+ uint16_t average_cpu_power;
+ uint16_t average_soc_power;
+ uint16_t average_gfx_power;
+ uint16_t average_core_power[8]; // CPU core power on APUs
+
+ /* Average clocks */
+ uint16_t average_gfxclk_frequency;
+ uint16_t average_socclk_frequency;
+ uint16_t average_uclk_frequency;
+ uint16_t average_fclk_frequency;
+ uint16_t average_vclk_frequency;
+ uint16_t average_dclk_frequency;
+
+ /* Current clocks */
+ uint16_t current_gfxclk;
+ uint16_t current_socclk;
+ uint16_t current_uclk;
+ uint16_t current_fclk;
+ uint16_t current_vclk;
+ uint16_t current_dclk;
+ uint16_t current_coreclk[8]; // CPU core clocks
+ uint16_t current_l3clk[2];
+
+ /* Throttle status (ASIC dependent) */
+ uint32_t throttle_status;
+
+ /* Fans */
+ uint16_t fan_pwm;
+
+ uint16_t padding[3];
+
+ /* Throttle status (ASIC independent) */
+ uint64_t indep_throttle_status;
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/include/navi10_enum.h b/drivers/gpu/drm/amd/include/navi10_enum.h
index d5ead9680c6e..84bcb96f76ea 100644
--- a/drivers/gpu/drm/amd/include/navi10_enum.h
+++ b/drivers/gpu/drm/amd/include/navi10_enum.h
@@ -430,7 +430,7 @@ ARRAY_2D_DEPTH = 0x00000001,
*/
typedef enum ENUM_NUM_SIMD_PER_CU {
-NUM_SIMD_PER_CU = 0x00000004,
+NUM_SIMD_PER_CU = 0x00000002,
} ENUM_NUM_SIMD_PER_CU;
/*
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index b2335a1d3f98..769f58d5ae1a 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -411,7 +411,8 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_states_info data;
- int i, buf_len, ret;
+ uint32_t i;
+ int buf_len, ret;
if (amdgpu_in_reset(adev))
return -EPERM;
@@ -433,9 +434,9 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
- buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
+ buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
for (i = 0; i < data.nums; i++)
- buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
+ buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
(data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
(data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
(data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
@@ -923,7 +924,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size);
} else {
- size = snprintf(buf, PAGE_SIZE, "\n");
+ size = sysfs_emit(buf, "\n");
}
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1009,7 +1010,7 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
if (adev->powerplay.pp_funcs->get_ppfeature_status)
size = amdgpu_dpm_get_ppfeature_status(adev, buf);
else
- size = snprintf(buf, PAGE_SIZE, "\n");
+ size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1070,7 +1071,7 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
if (adev->powerplay.pp_funcs->print_clock_levels)
size = amdgpu_dpm_print_clock_levels(adev, type, buf);
else
- size = snprintf(buf, PAGE_SIZE, "\n");
+ size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1469,7 +1470,7 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
if (adev->powerplay.pp_funcs->get_power_profile_mode)
size = amdgpu_dpm_get_power_profile_mode(adev, buf);
else
- size = snprintf(buf, PAGE_SIZE, "\n");
+ size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -2901,14 +2902,15 @@ static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
return sprintf(buf, "%i\n", 0);
}
-static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+
+static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
+ struct device_attribute *attr,
+ char *buf,
+ enum pp_power_limit_level pp_limit_level)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
- enum pp_power_limit_level pp_limit_level = PP_PWR_LIMIT_MAX;
uint32_t limit;
ssize_t size;
int r;
@@ -2918,22 +2920,22 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
+ if ( !(pp_funcs && pp_funcs->get_power_limit))
+ return -ENODATA;
+
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
- if (pp_funcs && pp_funcs->get_power_limit)
- r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit,
- pp_limit_level, power_type);
- else
- r = -ENODATA;
+ r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit,
+ pp_limit_level, power_type);
if (!r)
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ size = sysfs_emit(buf, "%u\n", limit * 1000000);
else
- size = snprintf(buf, PAGE_SIZE, "\n");
+ size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2941,85 +2943,31 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
return size;
}
-static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
+
+static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
- enum pp_power_limit_level pp_limit_level = PP_PWR_LIMIT_CURRENT;
- uint32_t limit;
- ssize_t size;
- int r;
+ return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
-
- if (pp_funcs && pp_funcs->get_power_limit)
- r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit,
- pp_limit_level, power_type);
- else
- r = -ENODATA;
-
- if (!r)
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
+}
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
- return size;
}
static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
- enum pp_power_limit_level pp_limit_level = PP_PWR_LIMIT_DEFAULT;
- uint32_t limit;
- ssize_t size;
- int r;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
+ return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
-
- if (pp_funcs && pp_funcs->get_power_limit)
- r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit,
- pp_limit_level, power_type);
- else
- r = -ENODATA;
-
- if (!r)
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-
- return size;
}
+
static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
struct device_attribute *attr,
char *buf)
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 2f7f688424aa..3e89852e4820 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -35,6 +35,48 @@
#define SMU_DPM_USER_PROFILE_RESTORE (1 << 0)
+// Power Throttlers
+#define SMU_THROTTLER_PPT0_BIT 0
+#define SMU_THROTTLER_PPT1_BIT 1
+#define SMU_THROTTLER_PPT2_BIT 2
+#define SMU_THROTTLER_PPT3_BIT 3
+#define SMU_THROTTLER_SPL_BIT 4
+#define SMU_THROTTLER_FPPT_BIT 5
+#define SMU_THROTTLER_SPPT_BIT 6
+#define SMU_THROTTLER_SPPT_APU_BIT 7
+
+// Current Throttlers
+#define SMU_THROTTLER_TDC_GFX_BIT 16
+#define SMU_THROTTLER_TDC_SOC_BIT 17
+#define SMU_THROTTLER_TDC_MEM_BIT 18
+#define SMU_THROTTLER_TDC_VDD_BIT 19
+#define SMU_THROTTLER_TDC_CVIP_BIT 20
+#define SMU_THROTTLER_EDC_CPU_BIT 21
+#define SMU_THROTTLER_EDC_GFX_BIT 22
+#define SMU_THROTTLER_APCC_BIT 23
+
+// Temperature
+#define SMU_THROTTLER_TEMP_GPU_BIT 32
+#define SMU_THROTTLER_TEMP_CORE_BIT 33
+#define SMU_THROTTLER_TEMP_MEM_BIT 34
+#define SMU_THROTTLER_TEMP_EDGE_BIT 35
+#define SMU_THROTTLER_TEMP_HOTSPOT_BIT 36
+#define SMU_THROTTLER_TEMP_SOC_BIT 37
+#define SMU_THROTTLER_TEMP_VR_GFX_BIT 38
+#define SMU_THROTTLER_TEMP_VR_SOC_BIT 39
+#define SMU_THROTTLER_TEMP_VR_MEM0_BIT 40
+#define SMU_THROTTLER_TEMP_VR_MEM1_BIT 41
+#define SMU_THROTTLER_TEMP_LIQUID0_BIT 42
+#define SMU_THROTTLER_TEMP_LIQUID1_BIT 43
+#define SMU_THROTTLER_VRHOT0_BIT 44
+#define SMU_THROTTLER_VRHOT1_BIT 45
+#define SMU_THROTTLER_PROCHOT_CPU_BIT 46
+#define SMU_THROTTLER_PROCHOT_GFX_BIT 47
+
+// Other
+#define SMU_THROTTLER_PPM_BIT 56
+#define SMU_THROTTLER_FIT_BIT 57
+
struct smu_hw_power_state {
unsigned int magic;
};
@@ -723,7 +765,10 @@ struct pptable_funcs {
/**
* @get_power_limit: Get the device's power limits.
*/
- int (*get_power_limit)(struct smu_context *smu);
+ int (*get_power_limit)(struct smu_context *smu,
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit);
/**
* @get_ppt_limit: Get the device's ppt limits.
@@ -932,7 +977,9 @@ struct pptable_funcs {
* @disable_all_features_with_exception: Disable all features with
* exception to those in &mask.
*/
- int (*disable_all_features_with_exception)(struct smu_context *smu, enum smu_feature_mask mask);
+ int (*disable_all_features_with_exception)(struct smu_context *smu,
+ bool no_hw_disablement,
+ enum smu_feature_mask mask);
/**
* @notify_display_change: Enable fast memory clock switching.
@@ -1185,6 +1232,12 @@ struct pptable_funcs {
*/
int (*wait_for_event)(struct smu_context *smu,
enum smu_event_type event, uint64_t event_arg);
+
+ /**
+ * @sned_hbm_bad_pages_num: message SMU to update bad page number
+ * of SMUBUS table.
+ */
+ int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size);
};
typedef enum {
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index a3b28979bc82..1962a5877191 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -244,6 +244,9 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
int smu_v11_0_baco_enter(struct smu_context *smu);
int smu_v11_0_baco_exit(struct smu_context *smu);
+int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_v11_0_baco_seq baco_seq);
+
int smu_v11_0_mode1_reset(struct smu_context *smu);
int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
index 6119a36b2cba..3fea2430dec0 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
@@ -26,6 +26,7 @@
#include "amdgpu_smu.h"
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
+#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x03
#define SMU13_DRIVER_IF_VERSION_ALDE 0x07
/* MP Apertures */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index b4ea8b233240..ebe672142808 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -688,7 +688,10 @@ static int smu_late_init(void *handle)
return ret;
}
- ret = smu_get_asic_power_limits(smu);
+ ret = smu_get_asic_power_limits(smu,
+ &smu->current_power_limit,
+ &smu->default_power_limit,
+ &smu->max_power_limit);
if (ret) {
dev_err(adev->dev, "Failed to get asic power limits!\n");
return ret;
@@ -1379,15 +1382,20 @@ static int smu_disable_dpms(struct smu_context *smu)
if (smu->uploading_custom_pp_table &&
(adev->asic_type >= CHIP_NAVI10) &&
(adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
- return 0;
+ return smu_disable_all_features_with_exception(smu,
+ true,
+ SMU_FEATURE_COUNT);
/*
* For Sienna_Cichlid, PMFW will handle the features disablement properly
* on BACO in. Driver involvement is unnecessary.
*/
- if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ if (((adev->asic_type == CHIP_SIENNA_CICHLID) ||
+ ((adev->asic_type >= CHIP_NAVI10) && (adev->asic_type <= CHIP_NAVI12))) &&
use_baco)
- return 0;
+ return smu_disable_all_features_with_exception(smu,
+ true,
+ SMU_FEATURE_BACO_BIT);
/*
* For gpu reset, runpm and hibernation through BACO,
@@ -1395,6 +1403,7 @@ static int smu_disable_dpms(struct smu_context *smu)
*/
if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
ret = smu_disable_all_features_with_exception(smu,
+ false,
SMU_FEATURE_BACO_BIT);
if (ret)
dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
@@ -1444,10 +1453,14 @@ static int smu_hw_fini(void *handle)
if (smu->is_apu) {
smu_powergate_sdma(&adev->smu, true);
- smu_dpm_set_vcn_enable(smu, false);
- smu_dpm_set_jpeg_enable(smu, false);
}
+ smu_dpm_set_vcn_enable(smu, false);
+ smu_dpm_set_jpeg_enable(smu, false);
+
+ adev->vcn.cur_state = AMD_PG_STATE_GATE;
+ adev->jpeg.cur_state = AMD_PG_STATE_GATE;
+
if (!smu->pm_enabled)
return 0;
@@ -2232,6 +2245,15 @@ int smu_get_power_limit(void *handle,
} else {
switch (limit_level) {
case SMU_PPT_LIMIT_CURRENT:
+ if ((smu->adev->asic_type == CHIP_ALDEBARAN) ||
+ (smu->adev->asic_type == CHIP_SIENNA_CICHLID) ||
+ (smu->adev->asic_type == CHIP_NAVY_FLOUNDER) ||
+ (smu->adev->asic_type == CHIP_DIMGREY_CAVEFISH) ||
+ (smu->adev->asic_type == CHIP_BEIGE_GOBY))
+ ret = smu_get_asic_power_limits(smu,
+ &smu->current_power_limit,
+ NULL,
+ NULL);
*limit = smu->current_power_limit;
break;
case SMU_PPT_LIMIT_DEFAULT:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 5959019f51ad..094df6f87cfc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -211,6 +211,26 @@ static const struct cmn2asic_mapping arcturus_workload_map[PP_SMC_POWER_PROFILE_
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
+static const uint8_t arcturus_throttler_map[] = {
+ [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT),
+ [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
+ [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
+ [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
+ [THROTTLER_TEMP_VR_MEM_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
+ [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
+ [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
+ [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
+ [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
+ [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
+ [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT),
+ [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT),
+ [THROTTLER_PPM_BIT] = (SMU_THROTTLER_PPM_BIT),
+ [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
+ [THROTTLER_APCC_BIT] = (SMU_THROTTLER_APCC_BIT),
+ [THROTTLER_VRHOT0_BIT] = (SMU_THROTTLER_VRHOT0_BIT),
+ [THROTTLER_VRHOT1_BIT] = (SMU_THROTTLER_VRHOT1_BIT),
+};
+
static int arcturus_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -237,7 +257,7 @@ static int arcturus_tables_init(struct smu_context *smu)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) {
kfree(smu_table->metrics_table);
@@ -1174,7 +1194,10 @@ static int arcturus_get_fan_parameters(struct smu_context *smu)
return 0;
}
-static int arcturus_get_power_limit(struct smu_context *smu)
+static int arcturus_get_power_limit(struct smu_context *smu,
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit)
{
struct smu_11_0_powerplay_table *powerplay_table =
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
@@ -1190,17 +1213,24 @@ static int arcturus_get_power_limit(struct smu_context *smu)
power_limit =
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
- smu->current_power_limit = smu->default_power_limit = power_limit;
- if (smu->od_enabled) {
- od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ if (current_power_limit)
+ *current_power_limit = power_limit;
+ if (default_power_limit)
+ *default_power_limit = power_limit;
+
+ if (max_power_limit) {
+ if (smu->od_enabled) {
+ od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
+ dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
+
+ power_limit *= (100 + od_percent);
+ power_limit /= 100;
+ }
- power_limit *= (100 + od_percent);
- power_limit /= 100;
+ *max_power_limit = power_limit;
}
- smu->max_power_limit = power_limit;
return 0;
}
@@ -2278,8 +2308,8 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_1 *gpu_metrics =
- (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_3 *gpu_metrics =
+ (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
SmuMetrics_t metrics;
int ret = 0;
@@ -2289,7 +2319,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
if (ret)
return ret;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2318,6 +2348,9 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
+ arcturus_throttler_map);
gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
@@ -2330,7 +2363,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
*table = (void *)gpu_metrics;
- return sizeof(struct gpu_metrics_v1_1);
+ return sizeof(struct gpu_metrics_v1_3);
}
static const struct pptable_funcs arcturus_ppt_funcs = {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 74a8c676e22c..1ba42b69ce74 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -80,10 +80,10 @@ static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0),
MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
- MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1),
- MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1),
- MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1),
- MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1),
+ MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 0),
+ MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 0),
+ MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 0),
+ MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 0),
MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
@@ -238,6 +238,28 @@ static struct cmn2asic_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] =
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
+static const uint8_t navi1x_throttler_map[] = {
+ [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT),
+ [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
+ [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
+ [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
+ [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
+ [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
+ [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
+ [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT),
+ [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT),
+ [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
+ [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
+ [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
+ [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
+ [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT),
+ [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT),
+ [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
+ [THROTTLER_PPM_BIT] = (SMU_THROTTLER_PPM_BIT),
+ [THROTTLER_APCC_BIT] = (SMU_THROTTLER_APCC_BIT),
+};
+
+
static bool is_asic_secure(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -446,30 +468,6 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
return 0;
}
-static int navi10_set_mp1_state(struct smu_context *smu,
- enum pp_mp1_state mp1_state)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t mp1_fw_flags;
- int ret = 0;
-
- ret = smu_cmn_set_mp1_state(smu, mp1_state);
- if (ret)
- return ret;
-
- if (mp1_state == PP_MP1_STATE_UNLOAD) {
- mp1_fw_flags = RREG32_PCIE(MP1_Public |
- (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
-
- mp1_fw_flags &= ~MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK;
-
- WREG32_PCIE(MP1_Public |
- (smnMP1_FIRMWARE_FLAGS & 0xffffffff), mp1_fw_flags);
- }
-
- return 0;
-}
-
static int navi10_setup_pptable(struct smu_context *smu)
{
int ret = 0;
@@ -2138,7 +2136,10 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
return ret;
}
-static int navi10_get_power_limit(struct smu_context *smu)
+static int navi10_get_power_limit(struct smu_context *smu,
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit)
{
struct smu_11_0_powerplay_table *powerplay_table =
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
@@ -2155,18 +2156,25 @@ static int navi10_get_power_limit(struct smu_context *smu)
power_limit =
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
- smu->current_power_limit = smu->default_power_limit = power_limit;
- if (smu->od_enabled &&
- navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
- od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ if (current_power_limit)
+ *current_power_limit = power_limit;
+ if (default_power_limit)
+ *default_power_limit = power_limit;
- dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
+ if (max_power_limit) {
+ if (smu->od_enabled &&
+ navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+ od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- power_limit *= (100 + od_percent);
- power_limit /= 100;
+ dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
+
+ power_limit *= (100 + od_percent);
+ power_limit /= 100;
+ }
+
+ *max_power_limit = power_limit;
}
- smu->max_power_limit = power_limit;
return 0;
}
@@ -2257,6 +2265,29 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
return 0;
}
+static int navi10_baco_enter(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm)
+ return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
+ else
+ return smu_v11_0_baco_enter(smu);
+}
+
+static int navi10_baco_exit(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm) {
+ /* Wait for PMFW handling for the Dstate change */
+ msleep(10);
+ return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ } else {
+ return smu_v11_0_baco_exit(smu);
+ }
+}
+
static int navi10_set_default_od_settings(struct smu_context *smu)
{
OverDriveTable_t *od_table =
@@ -2676,6 +2707,9 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
+ navi1x_throttler_map);
gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
@@ -2753,6 +2787,9 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
+ navi1x_throttler_map);
gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
@@ -2829,6 +2866,9 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
+ navi1x_throttler_map);
gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
@@ -2911,6 +2951,9 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
+ navi1x_throttler_map);
gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
@@ -3095,8 +3138,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.baco_is_support = smu_v11_0_baco_is_support,
.baco_get_state = smu_v11_0_baco_get_state,
.baco_set_state = smu_v11_0_baco_set_state,
- .baco_enter = smu_v11_0_baco_enter,
- .baco_exit = smu_v11_0_baco_exit,
+ .baco_enter = navi10_baco_enter,
+ .baco_exit = navi10_baco_exit,
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.set_default_od_settings = navi10_set_default_od_settings,
@@ -3112,7 +3155,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_fan_parameters = navi10_get_fan_parameters,
.post_init = navi10_post_smu_init,
.interrupt_work = smu_v11_0_interrupt_work,
- .set_mp1_state = navi10_set_mp1_state,
+ .set_mp1_state = smu_cmn_set_mp1_state,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index f01e919e1f89..c751f717a0da 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -239,6 +239,27 @@ static struct cmn2asic_mapping sienna_cichlid_workload_map[PP_SMC_POWER_PROFILE_
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
+static const uint8_t sienna_cichlid_throttler_map[] = {
+ [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT),
+ [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
+ [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
+ [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
+ [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
+ [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
+ [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
+ [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT),
+ [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT),
+ [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
+ [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
+ [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
+ [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
+ [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT),
+ [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT),
+ [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
+ [THROTTLER_PPM_BIT] = (SMU_THROTTLER_PPM_BIT),
+ [THROTTLER_APCC_BIT] = (SMU_THROTTLER_APCC_BIT),
+};
+
static int
sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
@@ -449,7 +470,7 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;
@@ -1770,7 +1791,10 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return ret;
}
-static int sienna_cichlid_get_power_limit(struct smu_context *smu)
+static int sienna_cichlid_get_power_limit(struct smu_context *smu,
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit)
{
struct smu_11_0_7_powerplay_table *powerplay_table =
(struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table;
@@ -1783,17 +1807,23 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu)
power_limit =
table_member[PPT_THROTTLER_PPT0];
}
- smu->current_power_limit = smu->default_power_limit = power_limit;
- if (smu->od_enabled) {
- od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ if (current_power_limit)
+ *current_power_limit = power_limit;
+ if (default_power_limit)
+ *default_power_limit = power_limit;
+
+ if (max_power_limit) {
+ if (smu->od_enabled) {
+ od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
- dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
+ dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
- power_limit *= (100 + od_percent);
- power_limit /= 100;
+ power_limit *= (100 + od_percent);
+ power_limit /= 100;
+ }
+ *max_power_limit = power_limit;
}
- smu->max_power_limit = power_limit;
return 0;
}
@@ -2100,6 +2130,29 @@ static int sienna_cichlid_run_btc(struct smu_context *smu)
return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
}
+static int sienna_cichlid_baco_enter(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm)
+ return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
+ else
+ return smu_v11_0_baco_enter(smu);
+}
+
+static int sienna_cichlid_baco_exit(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->in_runpm) {
+ /* Wait for PMFW handling for the Dstate change */
+ msleep(10);
+ return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ } else {
+ return smu_v11_0_baco_exit(smu);
+ }
+}
+
static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -3620,8 +3673,8 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_1 *gpu_metrics =
- (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_3 *gpu_metrics =
+ (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
SmuMetricsExternal_t metrics_external;
SmuMetrics_t *metrics =
&(metrics_external.SmuMetrics);
@@ -3635,7 +3688,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
if (ret)
return ret;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics->TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics->TemperatureHotspot;
@@ -3670,6 +3723,9 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1];
gpu_metrics->throttle_status = metrics->ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics->ThrottlerStatus,
+ sienna_cichlid_throttler_map);
gpu_metrics->current_fan_speed = metrics->CurrFanSpeed;
@@ -3692,7 +3748,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
*table = (void *)gpu_metrics;
- return sizeof(struct gpu_metrics_v1_1);
+ return sizeof(struct gpu_metrics_v1_3);
}
static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
@@ -3875,8 +3931,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.baco_is_support = smu_v11_0_baco_is_support,
.baco_get_state = smu_v11_0_baco_get_state,
.baco_set_state = smu_v11_0_baco_set_state,
- .baco_enter = smu_v11_0_baco_enter,
- .baco_exit = smu_v11_0_baco_exit,
+ .baco_enter = sienna_cichlid_baco_enter,
+ .baco_exit = sienna_cichlid_baco_exit,
.mode1_reset_is_support = sienna_cichlid_is_mode1_reset_supported,
.mode1_reset = smu_v11_0_mode1_reset,
.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index e1e1c268f661..0a5d46ac9ccd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1474,7 +1474,8 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
return smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
}
-static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
+int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_v11_0_baco_seq baco_seq)
{
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
}
@@ -1527,6 +1528,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_BEIGE_GOBY:
if (amdgpu_runtime_pm == 2)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
@@ -1578,16 +1580,8 @@ out:
int smu_v11_0_baco_enter(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- /* Arcturus does not need this audio workaround */
- if (adev->asic_type != CHIP_ARCTURUS) {
- ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
- if (ret)
- return ret;
- }
-
ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 77f532a49e37..18681dc458da 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -190,6 +190,20 @@ static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT]
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
+static const uint8_t vangogh_throttler_map[] = {
+ [THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT),
+ [THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT),
+ [THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT),
+ [THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT),
+ [THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT),
+ [THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT),
+ [THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT),
+ [THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT),
+ [THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT),
+ [THROTTLER_STATUS_BIT_TDC_GFX] = (SMU_THROTTLER_TDC_GFX_BIT),
+ [THROTTLER_STATUS_BIT_TDC_CVIP] = (SMU_THROTTLER_TDC_CVIP_BIT),
+};
+
static int vangogh_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -226,7 +240,7 @@ static int vangogh_tables_init(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;
@@ -1632,8 +1646,8 @@ static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v2_1 *gpu_metrics =
- (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v2_2 *gpu_metrics =
+ (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
SmuMetrics_legacy_t metrics;
int ret = 0;
@@ -1641,7 +1655,7 @@ static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
if (ret)
return ret;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
gpu_metrics->temperature_gfx = metrics.GfxTemperature;
gpu_metrics->temperature_soc = metrics.SocTemperature;
@@ -1674,20 +1688,23 @@ static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
+ vangogh_throttler_map);
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
*table = (void *)gpu_metrics;
- return sizeof(struct gpu_metrics_v2_1);
+ return sizeof(struct gpu_metrics_v2_2);
}
static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v2_1 *gpu_metrics =
- (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v2_2 *gpu_metrics =
+ (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
SmuMetrics_t metrics;
int ret = 0;
@@ -1695,7 +1712,7 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
if (ret)
return ret;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
@@ -1735,12 +1752,15 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
+ vangogh_throttler_map);
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
*table = (void *)gpu_metrics;
- return sizeof(struct gpu_metrics_v2_1);
+ return sizeof(struct gpu_metrics_v2_2);
}
static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
@@ -2051,7 +2071,10 @@ static int vangogh_mode2_reset(struct smu_context *smu)
return vangogh_mode_reset(smu, SMU_RESET_MODE_2);
}
-static int vangogh_get_power_limit(struct smu_context *smu)
+static int vangogh_get_power_limit(struct smu_context *smu,
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit)
{
struct smu_11_5_power_context *power_context =
smu->smu_power.power_context;
@@ -2067,8 +2090,12 @@ static int vangogh_get_power_limit(struct smu_context *smu)
return ret;
}
/* convert from milliwatt to watt */
- smu->current_power_limit = smu->default_power_limit = ppt_limit / 1000;
- smu->max_power_limit = 29;
+ if (current_power_limit)
+ *current_power_limit = ppt_limit / 1000;
+ if (default_power_limit)
+ *default_power_limit = ppt_limit / 1000;
+ if (max_power_limit)
+ *max_power_limit = 29;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit);
if (ret) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 1c399c4ab4dc..9a9c24a6ec35 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -128,6 +128,22 @@ static struct cmn2asic_mapping renoir_workload_map[PP_SMC_POWER_PROFILE_COUNT] =
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
+static const uint8_t renoir_throttler_map[] = {
+ [THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT),
+ [THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT),
+ [THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT),
+ [THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT),
+ [THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT),
+ [THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT),
+ [THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT),
+ [THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT),
+ [THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT),
+ [THROTTLER_STATUS_BIT_PROCHOT_CPU] = (SMU_THROTTLER_PROCHOT_CPU_BIT),
+ [THROTTLER_STATUS_BIT_PROCHOT_GFX] = (SMU_THROTTLER_PROCHOT_GFX_BIT),
+ [THROTTLER_STATUS_BIT_EDC_CPU] = (SMU_THROTTLER_EDC_CPU_BIT),
+ [THROTTLER_STATUS_BIT_EDC_GFX] = (SMU_THROTTLER_EDC_GFX_BIT),
+};
+
static int renoir_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -153,7 +169,7 @@ static int renoir_init_smc_tables(struct smu_context *smu)
if (!smu_table->watermarks_table)
goto err2_out;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err3_out;
@@ -1164,6 +1180,28 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
case METRICS_VOLTAGE_VDDSOC:
*value = metrics->Voltage[1];
break;
+ case METRICS_SS_APU_SHARE:
+ /* return the percentage of APU power with respect to APU's power limit.
+ * percentage is reported, this isn't boost value. Smartshift power
+ * boost/shift is only when the percentage is more than 100.
+ */
+ if (metrics->StapmOriginalLimit > 0)
+ *value = (metrics->ApuPower * 100) / metrics->StapmOriginalLimit;
+ else
+ *value = 0;
+ break;
+ case METRICS_SS_DGPU_SHARE:
+ /* return the percentage of dGPU power with respect to dGPU's power limit.
+ * percentage is reported, this isn't boost value. Smartshift power
+ * boost/shift is only when the percentage is more than 100.
+ */
+ if ((metrics->dGpuPower > 0) &&
+ (metrics->StapmCurrentLimit > metrics->StapmOriginalLimit))
+ *value = (metrics->dGpuPower * 100) /
+ (metrics->StapmCurrentLimit - metrics->StapmOriginalLimit);
+ else
+ *value = 0;
+ break;
default:
*value = UINT_MAX;
break;
@@ -1235,6 +1273,18 @@ static int renoir_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_SS_APU_SHARE:
+ ret = renoir_get_smu_metrics_data(smu,
+ METRICS_SS_APU_SHARE,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_SS_DGPU_SHARE:
+ ret = renoir_get_smu_metrics_data(smu,
+ METRICS_SS_DGPU_SHARE,
+ (uint32_t *)data);
+ *size = 4;
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -1264,8 +1314,8 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v2_1 *gpu_metrics =
- (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v2_2 *gpu_metrics =
+ (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
SmuMetrics_t metrics;
int ret = 0;
@@ -1273,7 +1323,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
if (ret)
return ret;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
gpu_metrics->temperature_gfx = metrics.GfxTemperature;
gpu_metrics->temperature_soc = metrics.SocTemperature;
@@ -1311,6 +1361,9 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_l3clk[1] = metrics.L3Frequency[1];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
+ renoir_throttler_map);
gpu_metrics->fan_pwm = metrics.FanPwm;
@@ -1318,7 +1371,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
*table = (void *)gpu_metrics;
- return sizeof(struct gpu_metrics_v2_1);
+ return sizeof(struct gpu_metrics_v2_2);
}
static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
index 9b3a8503f5cd..d4c4c495762c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU13_MGR = smu_v13_0.o aldebaran_ppt.o smu_v13_0_1.o yellow_carp_ppt.o
+SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o
AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 6ee9c4186f02..9316a726195c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -191,6 +191,20 @@ static const struct cmn2asic_mapping aldebaran_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(I2C_COMMANDS),
};
+static const uint8_t aldebaran_throttler_map[] = {
+ [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
+ [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
+ [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
+ [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
+ [THROTTLER_TDC_HBM_BIT] = (SMU_THROTTLER_TDC_MEM_BIT),
+ [THROTTLER_TEMP_GPU_BIT] = (SMU_THROTTLER_TEMP_GPU_BIT),
+ [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
+ [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
+ [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
+ [THROTTLER_TEMP_VR_MEM_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
+ [THROTTLER_APCC_BIT] = (SMU_THROTTLER_APCC_BIT),
+};
+
static int aldebaran_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -213,7 +227,7 @@ static int aldebaran_tables_init(struct smu_context *smu)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_2);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) {
kfree(smu_table->metrics_table);
@@ -510,6 +524,16 @@ static int aldebaran_freqs_in_same_level(int32_t frequency1,
return (abs(frequency1 - frequency2) <= EPSILON);
}
+static bool aldebaran_is_primary(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->smuio.funcs && adev->smuio.funcs->get_die_id)
+ return adev->smuio.funcs->get_die_id(adev) == 0;
+
+ return true;
+}
+
static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
@@ -563,7 +587,10 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->AverageUclkActivity;
break;
case METRICS_AVERAGE_SOCKETPOWER:
- *value = metrics->AverageSocketPower << 8;
+ /* Valid power data is available only from primary die */
+ *value = aldebaran_is_primary(smu) ?
+ metrics->AverageSocketPower << 8 :
+ 0;
break;
case METRICS_TEMPERATURE_EDGE:
*value = metrics->TemperatureEdge *
@@ -1132,7 +1159,10 @@ static int aldebaran_read_sensor(struct smu_context *smu,
return ret;
}
-static int aldebaran_get_power_limit(struct smu_context *smu)
+static int aldebaran_get_power_limit(struct smu_context *smu,
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t power_limit = 0;
@@ -1141,24 +1171,46 @@ static int aldebaran_get_power_limit(struct smu_context *smu)
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
return -EINVAL;
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
+ /* Valid power data is available only from primary die.
+ * For secondary die show the value as 0.
+ */
+ if (aldebaran_is_primary(smu)) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit,
+ &power_limit);
- if (ret) {
- /* the last hope to figure out the ppt limit */
- if (!pptable) {
- dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!");
- return -EINVAL;
+ if (ret) {
+ /* the last hope to figure out the ppt limit */
+ if (!pptable) {
+ dev_err(smu->adev->dev,
+ "Cannot get PPT limit due to pptable missing!");
+ return -EINVAL;
+ }
+ power_limit = pptable->PptLimit;
}
- power_limit = pptable->PptLimit;
}
- smu->current_power_limit = smu->default_power_limit = power_limit;
- if (pptable)
- smu->max_power_limit = pptable->PptLimit;
+ if (current_power_limit)
+ *current_power_limit = power_limit;
+ if (default_power_limit)
+ *default_power_limit = power_limit;
+
+ if (max_power_limit) {
+ if (pptable)
+ *max_power_limit = pptable->PptLimit;
+ }
return 0;
}
+static int aldebaran_set_power_limit(struct smu_context *smu, uint32_t n)
+{
+ /* Power limit can be set only through primary die */
+ if (aldebaran_is_primary(smu))
+ return smu_v13_0_set_power_limit(smu, n);
+
+ return -EINVAL;
+}
+
static int aldebaran_system_features_control(struct smu_context *smu, bool enable)
{
int ret;
@@ -1706,8 +1758,8 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_2 *gpu_metrics =
- (struct gpu_metrics_v1_2 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_3 *gpu_metrics =
+ (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
SmuMetrics_t metrics;
int i, ret = 0;
@@ -1717,7 +1769,7 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
if (ret)
return ret;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 2);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -1730,10 +1782,16 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
gpu_metrics->average_mm_activity = 0;
- gpu_metrics->average_socket_power = metrics.AverageSocketPower;
- gpu_metrics->energy_accumulator =
+ /* Valid power data is available only from primary die */
+ if (aldebaran_is_primary(smu)) {
+ gpu_metrics->average_socket_power = metrics.AverageSocketPower;
+ gpu_metrics->energy_accumulator =
(uint64_t)metrics.EnergyAcc64bitHigh << 32 |
metrics.EnergyAcc64bitLow;
+ } else {
+ gpu_metrics->average_socket_power = 0;
+ gpu_metrics->energy_accumulator = 0;
+ }
gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
@@ -1748,6 +1806,9 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
+ aldebaran_throttler_map);
gpu_metrics->current_fan_speed = 0;
@@ -1769,7 +1830,7 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
*table = (void *)gpu_metrics;
- return sizeof(struct gpu_metrics_v1_2);
+ return sizeof(struct gpu_metrics_v1_3);
}
static int aldebaran_mode2_reset(struct smu_context *smu)
@@ -1862,6 +1923,20 @@ static int aldebaran_set_mp1_state(struct smu_context *smu,
}
}
+static int aldebaran_smu_send_hbm_bad_page_num(struct smu_context *smu,
+ uint32_t size)
+{
+ int ret = 0;
+
+ /* message SMU to update the bad page number on SMUBUS */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetNumBadHbmPagesRetired, size, NULL);
+ if (ret)
+ dev_err(smu->adev->dev, "[%s] failed to message SMU to update HBM bad pages number\n",
+ __func__);
+
+ return ret;
+}
+
static const struct pptable_funcs aldebaran_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = aldebaran_get_allowed_feature_mask,
@@ -1898,7 +1973,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.get_enabled_mask = smu_cmn_get_enabled_mask,
.feature_is_enabled = smu_cmn_feature_is_enabled,
.disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
- .set_power_limit = smu_v13_0_set_power_limit,
+ .set_power_limit = aldebaran_set_power_limit,
.init_max_sustainable_clocks = smu_v13_0_init_max_sustainable_clocks,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
.disable_thermal_alert = smu_v13_0_disable_thermal_alert,
@@ -1924,6 +1999,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.wait_for_event = smu_v13_0_wait_for_event,
.i2c_init = aldebaran_i2c_control_init,
.i2c_fini = aldebaran_i2c_control_fini,
+ .send_hbm_bad_pages_num = aldebaran_smu_send_hbm_bad_page_num,
};
void aldebaran_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index a3dc7194aaf8..a421ba85bd6d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -210,6 +210,9 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
case CHIP_ALDEBARAN:
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
break;
+ case CHIP_YELLOW_CARP:
+ smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP;
+ break;
default:
dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV;
@@ -694,6 +697,27 @@ failed:
return ret;
}
+int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ switch (adev->asic_type) {
+ case CHIP_YELLOW_CARP:
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ return 0;
+ if (enable)
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
+ else
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
int smu_v13_0_system_features_control(struct smu_context *smu,
bool en)
{
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c
deleted file mode 100644
index 61917b49f2bf..000000000000
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-//#include <linux/reboot.h>
-
-#define SWSMU_CODE_LAYER_L3
-
-#include "amdgpu.h"
-#include "amdgpu_smu.h"
-#include "smu_v13_0_1.h"
-#include "soc15_common.h"
-#include "smu_cmn.h"
-#include "atomfirmware.h"
-#include "amdgpu_atomfirmware.h"
-#include "amdgpu_atombios.h"
-#include "atom.h"
-
-#include "asic_reg/mp/mp_13_0_1_offset.h"
-#include "asic_reg/mp/mp_13_0_1_sh_mask.h"
-
-/*
- * DO NOT use these for err/warn/info/debug messages.
- * Use dev_err, dev_warn, dev_info and dev_dbg instead.
- * They are more MGPU friendly.
- */
-#undef pr_err
-#undef pr_warn
-#undef pr_info
-#undef pr_debug
-
-int smu_v13_0_1_check_fw_status(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t mp1_fw_flags;
-
- mp1_fw_flags = RREG32_PCIE(MP1_Public |
- (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
-
- if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
- MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
- return 0;
-
- return -EIO;
-}
-
-int smu_v13_0_1_check_fw_version(struct smu_context *smu)
-{
- uint32_t if_version = 0xff, smu_version = 0xff;
- uint16_t smu_major;
- uint8_t smu_minor, smu_debug;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret)
- return ret;
-
- smu_major = (smu_version >> 16) & 0xffff;
- smu_minor = (smu_version >> 8) & 0xff;
- smu_debug = (smu_version >> 0) & 0xff;
-
- switch (smu->adev->asic_type) {
- case CHIP_YELLOW_CARP:
- smu->smc_driver_if_version = SMU13_0_1_DRIVER_IF_VERSION_YELLOW_CARP;
- break;
-
- default:
- dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
- smu->smc_driver_if_version = SMU13_0_1_DRIVER_IF_VERSION_INV;
- break;
- }
-
- dev_info(smu->adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n",
- smu_version, smu_major, smu_minor, smu_debug);
-
- /*
- * 1. if_version mismatch is not critical as our fw is designed
- * to be backward compatible.
- * 2. New fw usually brings some optimizations. But that's visible
- * only on the paired driver.
- * Considering above, we just leave user a warning message instead
- * of halt driver loading.
- */
- if (if_version != smu->smc_driver_if_version) {
- dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_driver_if_version, if_version,
- smu_version, smu_major, smu_minor, smu_debug);
- dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
- }
-
- return ret;
-}
-
-int smu_v13_0_1_fini_smc_tables(struct smu_context *smu)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
-
- kfree(smu_table->clocks_table);
- smu_table->clocks_table = NULL;
-
- kfree(smu_table->metrics_table);
- smu_table->metrics_table = NULL;
-
- kfree(smu_table->watermarks_table);
- smu_table->watermarks_table = NULL;
-
- return 0;
-}
-
-static int smu_v13_0_1_atom_get_smu_clockinfo(struct amdgpu_device *adev,
- uint8_t clk_id,
- uint8_t syspll_id,
- uint32_t *clk_freq)
-{
- struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
- struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
- int ret, index;
-
- input.clk_id = clk_id;
- input.syspll_id = syspll_id;
- input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
- index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
- getsmuclockinfo);
-
- ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
- (uint32_t *)&input);
- if (ret)
- return -EINVAL;
-
- output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
- *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
-
- return 0;
-}
-
-int smu_v13_0_1_get_vbios_bootup_values(struct smu_context *smu)
-{
- int ret, index;
- uint16_t size;
- uint8_t frev, crev;
- struct atom_common_table_header *header;
- struct atom_firmware_info_v3_4 *v_3_4;
- struct atom_firmware_info_v3_3 *v_3_3;
- struct atom_firmware_info_v3_1 *v_3_1;
-
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- firmwareinfo);
-
- ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
- (uint8_t **)&header);
- if (ret)
- return ret;
-
- if (header->format_revision != 3) {
- dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n");
- return -EINVAL;
- }
-
- switch (header->content_revision) {
- case 0:
- case 1:
- case 2:
- v_3_1 = (struct atom_firmware_info_v3_1 *)header;
- smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
- smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
- smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
- smu->smu_table.boot_values.socclk = 0;
- smu->smu_table.boot_values.dcefclk = 0;
- smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
- smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
- smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
- smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
- smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
- break;
- case 3:
- v_3_3 = (struct atom_firmware_info_v3_3 *)header;
- smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
- smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
- smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
- smu->smu_table.boot_values.socclk = 0;
- smu->smu_table.boot_values.dcefclk = 0;
- smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
- smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
- smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
- smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
- smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
- break;
- case 4:
- default:
- v_3_4 = (struct atom_firmware_info_v3_4 *)header;
- smu->smu_table.boot_values.revision = v_3_4->firmware_revision;
- smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz;
- smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz;
- smu->smu_table.boot_values.socclk = 0;
- smu->smu_table.boot_values.dcefclk = 0;
- smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv;
- smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv;
- smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv;
- smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv;
- smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id;
- break;
- }
-
- smu->smu_table.boot_values.format_revision = header->format_revision;
- smu->smu_table.boot_values.content_revision = header->content_revision;
-
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL0_SOCCLK_ID,
- (uint8_t)0,
- &smu->smu_table.boot_values.socclk);
-
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID,
- (uint8_t)0,
- &smu->smu_table.boot_values.dcefclk);
-
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL0_ECLK_ID,
- (uint8_t)0,
- &smu->smu_table.boot_values.eclk);
-
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL0_VCLK_ID,
- (uint8_t)0,
- &smu->smu_table.boot_values.vclk);
-
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL0_DCLK_ID,
- (uint8_t)0,
- &smu->smu_table.boot_values.dclk);
-
- if ((smu->smu_table.boot_values.format_revision == 3) &&
- (smu->smu_table.boot_values.content_revision >= 2))
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL1_0_FCLK_ID,
- (uint8_t)SMU11_SYSPLL1_2_ID,
- &smu->smu_table.boot_values.fclk);
-
- return 0;
-}
-
-int smu_v13_0_1_set_default_dpm_tables(struct smu_context *smu)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
-
- return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
-}
-
-int smu_v13_0_1_set_driver_table_location(struct smu_context *smu)
-{
- struct smu_table *driver_table = &smu->smu_table.driver_table;
- int ret = 0;
-
- if (!driver_table->mc_address)
- return 0;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(driver_table->mc_address),
- NULL);
-
- if (ret)
- return ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(driver_table->mc_address),
- NULL);
-
- return ret;
-}
-
-int smu_v13_0_1_gfx_off_control(struct smu_context *smu, bool enable)
-{
- int ret = 0;
- struct amdgpu_device *adev = smu->adev;
-
- switch (adev->asic_type) {
- case CHIP_YELLOW_CARP:
- if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
- return 0;
- if (enable)
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
- else
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
- break;
- default:
- break;
- }
-
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 0cd7902d5172..0cfeb9fc7c03 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -25,7 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_smu.h"
-#include "smu_v13_0_1.h"
+#include "smu_v13_0.h"
#include "smu13_driver_if_yellow_carp.h"
#include "yellow_carp_ppt.h"
#include "smu_v13_0_1_ppsmc.h"
@@ -186,13 +186,30 @@ err0_out:
return -ENOMEM;
}
+static int yellow_carp_fini_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ kfree(smu_table->clocks_table);
+ smu_table->clocks_table = NULL;
+
+ kfree(smu_table->metrics_table);
+ smu_table->metrics_table = NULL;
+
+ kfree(smu_table->watermarks_table);
+ smu_table->watermarks_table = NULL;
+
+ return 0;
+}
+
static int yellow_carp_system_features_control(struct smu_context *smu, bool en)
{
struct smu_feature *feature = &smu->smu_feature;
+ struct amdgpu_device *adev = smu->adev;
uint32_t feature_mask[2];
int ret = 0;
- if (!en)
+ if (!en && !adev->in_s0ix)
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
bitmap_zero(feature->enabled, feature->feature_num);
@@ -281,13 +298,9 @@ static int yellow_carp_mode_reset(struct smu_context *smu, int type)
if (index < 0)
return index == -EACCES ? 0 : index;
- mutex_lock(&smu->message_lock);
-
- ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type);
-
- mutex_unlock(&smu->message_lock);
-
- mdelay(10);
+ ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to mode reset!\n");
return ret;
}
@@ -356,6 +369,28 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
case METRICS_VOLTAGE_VDDSOC:
*value = metrics->Voltage[1];
break;
+ case METRICS_SS_APU_SHARE:
+ /* return the percentage of APU power with respect to APU's power limit.
+ * percentage is reported, this isn't boost value. Smartshift power
+ * boost/shift is only when the percentage is more than 100.
+ */
+ if (metrics->StapmOpnLimit > 0)
+ *value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit;
+ else
+ *value = 0;
+ break;
+ case METRICS_SS_DGPU_SHARE:
+ /* return the percentage of dGPU power with respect to dGPU's power limit.
+ * percentage is reported, this isn't boost value. Smartshift power
+ * boost/shift is only when the percentage is more than 100.
+ */
+ if ((metrics->dGpuPower > 0) &&
+ (metrics->StapmCurrentLimit > metrics->StapmOpnLimit))
+ *value = (metrics->dGpuPower * 100) /
+ (metrics->StapmCurrentLimit - metrics->StapmOpnLimit);
+ else
+ *value = 0;
+ break;
default:
*value = UINT_MAX;
break;
@@ -427,6 +462,18 @@ static int yellow_carp_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_SS_APU_SHARE:
+ ret = yellow_carp_get_smu_metrics_data(smu,
+ METRICS_SS_APU_SHARE,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_SS_DGPU_SHARE:
+ ret = yellow_carp_get_smu_metrics_data(smu,
+ METRICS_SS_DGPU_SHARE,
+ (uint32_t *)data);
+ *size = 4;
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -624,6 +671,13 @@ static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v2_1);
}
+static int yellow_carp_set_default_dpm_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
+}
+
static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
long input[], uint32_t size)
{
@@ -1168,17 +1222,17 @@ static int yellow_carp_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
}
static const struct pptable_funcs yellow_carp_ppt_funcs = {
- .check_fw_status = smu_v13_0_1_check_fw_status,
- .check_fw_version = smu_v13_0_1_check_fw_version,
+ .check_fw_status = smu_v13_0_check_fw_status,
+ .check_fw_version = smu_v13_0_check_fw_version,
.init_smc_tables = yellow_carp_init_smc_tables,
- .fini_smc_tables = smu_v13_0_1_fini_smc_tables,
- .get_vbios_bootup_values = smu_v13_0_1_get_vbios_bootup_values,
+ .fini_smc_tables = yellow_carp_fini_smc_tables,
+ .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
.system_features_control = yellow_carp_system_features_control,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
.send_smc_msg = smu_cmn_send_smc_msg,
.dpm_set_vcn_enable = yellow_carp_dpm_set_vcn_enable,
.dpm_set_jpeg_enable = yellow_carp_dpm_set_jpeg_enable,
- .set_default_dpm_table = smu_v13_0_1_set_default_dpm_tables,
+ .set_default_dpm_table = yellow_carp_set_default_dpm_tables,
.read_sensor = yellow_carp_read_sensor,
.is_dpm_running = yellow_carp_is_dpm_running,
.set_watermarks_table = yellow_carp_set_watermarks_table,
@@ -1187,8 +1241,8 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = {
.get_gpu_metrics = yellow_carp_get_gpu_metrics,
.get_enabled_mask = smu_cmn_get_enabled_32_bits_mask,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
- .set_driver_table_location = smu_v13_0_1_set_driver_table_location,
- .gfx_off_control = smu_v13_0_1_gfx_off_control,
+ .set_driver_table_location = smu_v13_0_set_driver_table_location,
+ .gfx_off_control = smu_v13_0_gfx_off_control,
.post_init = yellow_carp_post_smu_init,
.mode2_reset = yellow_carp_mode2_reset,
.get_dpm_ultimate_freq = yellow_carp_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 0882a1dd6797..e802f9a95f08 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -398,6 +398,19 @@ int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu,
}
+uint64_t smu_cmn_get_indep_throttler_status(
+ const unsigned long dep_status,
+ const uint8_t *throttler_map)
+{
+ uint64_t indep_status = 0;
+ uint8_t dep_bit = 0;
+
+ for_each_set_bit(dep_bit, &dep_status, 32)
+ indep_status |= 1ULL << throttler_map[dep_bit];
+
+ return indep_status;
+}
+
int smu_cmn_feature_update_enable_state(struct smu_context *smu,
uint64_t feature_mask,
bool enabled)
@@ -575,23 +588,52 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
return ret;
}
+/**
+ * smu_cmn_disable_all_features_with_exception - disable all dpm features
+ * except this specified by
+ * @mask
+ *
+ * @smu: smu_context pointer
+ * @no_hw_disablement: whether real dpm disablement should be performed
+ * true: update the cache(about dpm enablement state) only
+ * false: real dpm disablement plus cache update
+ * @mask: the dpm feature which should not be disabled
+ * SMU_FEATURE_COUNT: no exception, all dpm features
+ * to disable
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
+ bool no_hw_disablement,
enum smu_feature_mask mask)
{
+ struct smu_feature *feature = &smu->smu_feature;
uint64_t features_to_disable = U64_MAX;
int skipped_feature_id;
- skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_FEATURE,
- mask);
- if (skipped_feature_id < 0)
- return -EINVAL;
+ if (mask != SMU_FEATURE_COUNT) {
+ skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_FEATURE,
+ mask);
+ if (skipped_feature_id < 0)
+ return -EINVAL;
- features_to_disable &= ~(1ULL << skipped_feature_id);
+ features_to_disable &= ~(1ULL << skipped_feature_id);
+ }
- return smu_cmn_feature_update_enable_state(smu,
- features_to_disable,
- 0);
+ if (no_hw_disablement) {
+ mutex_lock(&feature->mutex);
+ bitmap_andnot(feature->enabled, feature->enabled,
+ (unsigned long *)(&features_to_disable), SMU_FEATURE_MAX);
+ mutex_unlock(&feature->mutex);
+
+ return 0;
+ } else {
+ return smu_cmn_feature_update_enable_state(smu,
+ features_to_disable,
+ 0);
+ }
}
int smu_cmn_get_smc_version(struct smu_context *smu,
@@ -773,6 +815,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(2, 1):
structure_size = sizeof(struct gpu_metrics_v2_1);
break;
+ case METRICS_VERSION(2, 2):
+ structure_size = sizeof(struct gpu_metrics_v2_2);
+ break;
default:
return;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index da6ff6f024f9..9add5f16ff56 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -60,6 +60,10 @@ int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu,
uint32_t *feature_mask,
uint32_t num);
+uint64_t smu_cmn_get_indep_throttler_status(
+ const unsigned long dep_status,
+ const uint8_t *throttler_map);
+
int smu_cmn_feature_update_enable_state(struct smu_context *smu,
uint64_t feature_mask,
bool enabled);
@@ -75,6 +79,7 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
uint64_t new_mask);
int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
+ bool no_hw_disablement,
enum smu_feature_mask mask);
int smu_cmn_get_smc_version(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 68d9464ababc..59f9cfff3d61 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -57,7 +57,7 @@
#define smu_feature_set_allowed_mask(smu) smu_ppt_funcs(set_allowed_mask, 0, smu)
#define smu_feature_get_enabled_mask(smu, mask, num) smu_ppt_funcs(get_enabled_mask, 0, smu, mask, num)
#define smu_feature_is_enabled(smu, mask) smu_ppt_funcs(feature_is_enabled, 0, smu, mask)
-#define smu_disable_all_features_with_exception(smu, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask)
+#define smu_disable_all_features_with_exception(smu, no_hw_disablement, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, no_hw_disablement, mask)
#define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0 , smu)
#define smu_notify_display_change(smu) smu_ppt_funcs(notify_display_change, 0, smu)
#define smu_populate_umd_state_clk(smu) smu_ppt_funcs(populate_umd_state_clk, 0, smu)
@@ -82,7 +82,7 @@
#define smu_i2c_fini(smu, control) smu_ppt_funcs(i2c_fini, 0, smu, control)
#define smu_get_unique_id(smu) smu_ppt_funcs(get_unique_id, 0, smu)
#define smu_log_thermal_throttling(smu) smu_ppt_funcs(log_thermal_throttling_event, 0, smu)
-#define smu_get_asic_power_limits(smu) smu_ppt_funcs(get_power_limit, 0, smu)
+#define smu_get_asic_power_limits(smu, current, default, max) smu_ppt_funcs(get_power_limit, 0, smu, current, default, max)
#define smu_get_pp_feature_mask(smu, buf) smu_ppt_funcs(get_pp_feature_mask, 0, smu, buf)
#define smu_set_pp_feature_mask(smu, new_mask) smu_ppt_funcs(set_pp_feature_mask, 0, smu, new_mask)
#define smu_gfx_ulv_control(smu, enablement) smu_ppt_funcs(gfx_ulv_control, 0, smu, enablement)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 05ad75d155e8..cfe4fc69277e 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -232,7 +232,6 @@ static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
pm_runtime_put_sync(dev->dev);
- drm_crtc_vblank_on(c);
}
#define ATMEL_HLCDC_RGB444_OUTPUT BIT(0)
@@ -344,7 +343,16 @@ static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
struct drm_atomic_state *state)
{
+ drm_crtc_vblank_on(c);
+}
+
+static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *c,
+ struct drm_atomic_state *state)
+{
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->dev->event_lock, flags);
if (c->state->event) {
c->state->event->pipe = drm_crtc_index(c);
@@ -354,12 +362,7 @@ static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
crtc->event = c->state->event;
c->state->event = NULL;
}
-}
-
-static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- /* TODO: write common plane control register if available */
+ spin_unlock_irqrestore(&c->dev->event_lock, flags);
}
static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 65af56e47129..f09b6dd8754c 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -593,6 +593,7 @@ static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
dev->mode_config.max_width = dc->desc->max_width;
dev->mode_config.max_height = dc->desc->max_height;
dev->mode_config.funcs = &mode_config_funcs;
+ dev->mode_config.async_page_flip = true;
return 0;
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index a8bbb021684b..d820423fac32 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1,6 +1,7 @@
/*
* Copyright (C) 2014 Red Hat
* Copyright (C) 2014 Intel Corp.
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -1609,9 +1610,20 @@ commit:
}
EXPORT_SYMBOL(__drm_atomic_helper_set_config);
-void drm_atomic_print_state(const struct drm_atomic_state *state)
+/**
+ * drm_atomic_print_new_state - prints drm atomic state
+ * @state: atomic configuration to check
+ * @p: drm printer
+ *
+ * This functions prints the drm atomic state snapshot using the drm printer
+ * which is passed to it. This snapshot can be used for debugging purposes.
+ *
+ * Note that this function looks into the new state objects and hence its not
+ * safe to be used after the call to drm_atomic_helper_commit_hw_done().
+ */
+void drm_atomic_print_new_state(const struct drm_atomic_state *state,
+ struct drm_printer *p)
{
- struct drm_printer p = drm_info_printer(state->dev->dev);
struct drm_plane *plane;
struct drm_plane_state *plane_state;
struct drm_crtc *crtc;
@@ -1620,17 +1632,23 @@ void drm_atomic_print_state(const struct drm_atomic_state *state)
struct drm_connector_state *connector_state;
int i;
+ if (!p) {
+ DRM_ERROR("invalid drm printer\n");
+ return;
+ }
+
DRM_DEBUG_ATOMIC("checking %p\n", state);
for_each_new_plane_in_state(state, plane, plane_state, i)
- drm_atomic_plane_print_state(&p, plane_state);
+ drm_atomic_plane_print_state(p, plane_state);
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
- drm_atomic_crtc_print_state(&p, crtc_state);
+ drm_atomic_crtc_print_state(p, crtc_state);
for_each_new_connector_in_state(state, connector, connector_state, i)
- drm_atomic_connector_print_state(&p, connector_state);
+ drm_atomic_connector_print_state(p, connector_state);
}
+EXPORT_SYMBOL(drm_atomic_print_new_state);
static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
bool take_locks)
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 438e9585b225..7e48d40600ff 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -2,6 +2,7 @@
* Copyright (C) 2014 Red Hat
* Copyright (C) 2014 Intel Corp.
* Copyright (C) 2018 Intel Corp.
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -1321,6 +1322,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_out_fence_state *fence_state;
int ret = 0;
unsigned int i, j, num_fences;
+ struct drm_printer p = drm_info_printer(dev->dev);
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -1453,7 +1455,7 @@ retry:
ret = drm_atomic_nonblocking_commit(state);
} else {
if (drm_debug_enabled(DRM_UT_STATE))
- drm_atomic_print_state(state);
+ drm_atomic_print_new_state(state, &p);
ret = drm_atomic_commit(state);
}
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index f00e5abdbbf4..b59b26a71ad5 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -315,9 +315,10 @@ int drm_master_open(struct drm_file *file_priv)
void drm_master_release(struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
- struct drm_master *master = file_priv->master;
+ struct drm_master *master;
mutex_lock(&dev->master_mutex);
+ master = file_priv->master;
if (file_priv->magic)
idr_remove(&file_priv->master->magic_map, file_priv->magic);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 54d4cf1233e9..1ca51addb589 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -5,6 +5,7 @@
* Jesse Barnes <jesse.barnes@intel.com>
* Copyright © 2014 Intel Corporation
* Daniel Vetter <daniel.vetter@ffwll.ch>
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -236,7 +237,8 @@ int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
int __drm_atomic_helper_set_config(struct drm_mode_set *set,
struct drm_atomic_state *state);
-void drm_atomic_print_state(const struct drm_atomic_state *state);
+void drm_atomic_print_new_state(const struct drm_atomic_state *state,
+ struct drm_printer *p);
/* drm_atomic_uapi.c */
int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 32b7f8983b94..ad0795afc21c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -94,6 +94,9 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
+static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
+ struct drm_dp_mst_branch *branch);
+
#define DBG_PREFIX "[dp_mst]"
#define DP_STR(x) [DP_ ## x] = #x
@@ -2501,7 +2504,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- int old_ddps, old_input, ret, i;
+ int old_ddps, ret;
u8 new_pdt;
bool new_mcs;
bool dowork = false, create_connector = false;
@@ -2533,7 +2536,6 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
}
old_ddps = port->ddps;
- old_input = port->input;
port->input = conn_stat->input_port;
port->ldps = conn_stat->legacy_device_plug_status;
port->ddps = conn_stat->displayport_device_plug_status;
@@ -2555,28 +2557,6 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
dowork = false;
}
- if (!old_input && old_ddps != port->ddps && !port->ddps) {
- for (i = 0; i < mgr->max_payloads; i++) {
- struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
- struct drm_dp_mst_port *port_validated;
-
- if (!vcpi)
- continue;
-
- port_validated =
- container_of(vcpi, struct drm_dp_mst_port, vcpi);
- port_validated =
- drm_dp_mst_topology_get_port_validated(mgr, port_validated);
- if (!port_validated) {
- mutex_lock(&mgr->payload_lock);
- vcpi->num_slots = 0;
- mutex_unlock(&mgr->payload_lock);
- } else {
- drm_dp_mst_topology_put_port(port_validated);
- }
- }
- }
-
if (port->connector)
drm_modeset_unlock(&mgr->base.lock);
else if (create_connector)
@@ -3389,6 +3369,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
struct drm_dp_mst_port *port;
int i, j;
int cur_slots = 1;
+ bool skip;
mutex_lock(&mgr->payload_lock);
for (i = 0; i < mgr->max_payloads; i++) {
@@ -3403,6 +3384,16 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
port = container_of(vcpi, struct drm_dp_mst_port,
vcpi);
+ mutex_lock(&mgr->lock);
+ skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
+ mutex_unlock(&mgr->lock);
+
+ if (skip) {
+ drm_dbg_kms(mgr->dev,
+ "Virtual channel %d is not in current topology\n",
+ i);
+ continue;
+ }
/* Validated ports don't matter if we're releasing
* VCPI
*/
@@ -3410,8 +3401,16 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
port = drm_dp_mst_topology_get_port_validated(
mgr, port);
if (!port) {
- mutex_unlock(&mgr->payload_lock);
- return -EINVAL;
+ if (vcpi->num_slots == payload->num_slots) {
+ cur_slots += vcpi->num_slots;
+ payload->start_slot = req_payload.start_slot;
+ continue;
+ } else {
+ drm_dbg_kms(mgr->dev,
+ "Fail:set payload to invalid sink");
+ mutex_unlock(&mgr->payload_lock);
+ return -EINVAL;
+ }
}
put_port = true;
}
@@ -3495,6 +3494,7 @@ int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
struct drm_dp_mst_port *port;
int i;
int ret = 0;
+ bool skip;
mutex_lock(&mgr->payload_lock);
for (i = 0; i < mgr->max_payloads; i++) {
@@ -3504,6 +3504,13 @@ int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ mutex_lock(&mgr->lock);
+ skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
+ mutex_unlock(&mgr->lock);
+
+ if (skip)
+ continue;
+
drm_dbg_kms(mgr->dev, "payload %d %d\n", i, mgr->payloads[i].payload_state);
if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
@@ -4590,9 +4597,18 @@ EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
+ bool skip;
+
if (!port->vcpi.vcpi)
return;
+ mutex_lock(&mgr->lock);
+ skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
+ mutex_unlock(&mgr->lock);
+
+ if (skip)
+ return;
+
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
port->vcpi.num_slots = 0;
port->vcpi.pbn = 0;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 53d314103a37..98ae00661656 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -117,17 +117,18 @@ int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
- struct drm_master *master = file_priv->master;
+ struct drm_master *master;
- mutex_lock(&master->dev->master_mutex);
+ mutex_lock(&dev->master_mutex);
+ master = file_priv->master;
if (u->unique_len >= master->unique_len) {
if (copy_to_user(u->unique, master->unique, master->unique_len)) {
- mutex_unlock(&master->dev->master_mutex);
+ mutex_unlock(&dev->master_mutex);
return -EFAULT;
}
}
u->unique_len = master->unique_len;
- mutex_unlock(&master->dev->master_mutex);
+ mutex_unlock(&dev->master_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
index 02718e3e859e..3aaee4730ec6 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -163,7 +163,7 @@ static inline int hyperv_pipe_init(struct hyperv_drm_device *hv)
&hyperv_pipe_funcs,
hyperv_formats,
ARRAY_SIZE(hyperv_formats),
- NULL,
+ hyperv_modifiers,
&hv->connector);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index b63d374dff23..f960f5d7664e 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -125,7 +125,7 @@ config DRM_I915_GVT_KVMGT
tristate "Enable KVM/VFIO support for Intel GVT-g"
depends on DRM_I915_GVT
depends on KVM
- depends on VFIO_MDEV && VFIO_MDEV_DEVICE
+ depends on VFIO_MDEV
default n
help
Choose this option if you want to enable KVMGT support for
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 16812488c5dd..43ec7fcd3f5d 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -729,8 +729,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
u32 tmp;
enum port port;
enum transcoder dsi_trans;
@@ -1253,15 +1253,36 @@ static void gen11_dsi_pre_enable(struct intel_atomic_state *state,
gen11_dsi_set_transcoder_timings(encoder, pipe_config);
}
+/*
+ * Wa_1409054076:icl,jsl,ehl
+ * When pipe A is disabled and MIPI DSI is enabled on pipe B,
+ * the AMT KVMR feature will incorrectly see pipe A as enabled.
+ * Set 0x42080 bit 23=1 before enabling DSI on pipe B and leave
+ * it set while DSI is enabled on pipe B
+ */
+static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B)
+ intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
+ IGNORE_KVMR_PIPE_A,
+ enable ? IGNORE_KVMR_PIPE_A : 0);
+}
static void gen11_dsi_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
+ /* Wa_1409054076:icl,jsl,ehl */
+ icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, true);
+
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
@@ -1415,6 +1436,7 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(old_conn_state->crtc);
/* step1: turn off backlight */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
@@ -1423,6 +1445,9 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
/* step2d,e: disable transcoder and wait */
gen11_dsi_disable_transcoder(encoder);
+ /* Wa_1409054076:icl,jsl,ehl */
+ icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, false);
+
/* step2f,g: powerdown panel */
gen11_dsi_powerdown_panel(encoder);
@@ -1548,6 +1573,22 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
pipe_config->mode_flags |= I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
}
+static void gen11_dsi_sync_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = intel_crtc->pipe;
+
+ /* wa verify 1409054076:icl,jsl,ehl */
+ if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B &&
+ !(intel_de_read(dev_priv, CHICKEN_PAR1_1) & IGNORE_KVMR_PIPE_A))
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] BIOS left IGNORE_KVMR_PIPE_A cleared with pipe B enabled\n",
+ encoder->base.base.id,
+ encoder->base.name);
+}
+
static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
@@ -1966,6 +2007,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->post_disable = gen11_dsi_post_disable;
encoder->port = port;
encoder->get_config = gen11_dsi_get_config;
+ encoder->sync_state = gen11_dsi_sync_state;
encoder->update_pipe = intel_panel_update_backlight;
encoder->compute_config = gen11_dsi_compute_config;
encoder->get_hw_state = gen11_dsi_get_hw_state;
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 648f1c0d3d39..408f82b0dc7d 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -38,6 +38,7 @@
#include "intel_crt.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
@@ -1081,6 +1082,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->base.enable_clock = hsw_ddi_enable_clock;
crt->base.disable_clock = hsw_ddi_disable_clock;
crt->base.is_clock_enabled = hsw_ddi_is_clock_enabled;
+
+ intel_ddi_buf_trans_init(&crt->base);
} else {
if (HAS_PCH_SPLIT(dev_priv)) {
crt->base.compute_config = pch_crt_compute_config;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 95ff1707b4bd..448c4d99ac35 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -163,12 +163,12 @@ static void intel_crtc_free(struct intel_crtc *crtc)
kfree(crtc);
}
-static void intel_crtc_destroy(struct drm_crtc *crtc)
+static void intel_crtc_destroy(struct drm_crtc *_crtc)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
- drm_crtc_cleanup(crtc);
- kfree(intel_crtc);
+ drm_crtc_cleanup(&crtc->base);
+ kfree(crtc);
}
static int intel_crtc_late_register(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 390869bd6b63..26a3aa73fcc4 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -95,24 +95,18 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
* values in advance. This function programs the correct values for
* DP/eDP/FDI use cases.
*/
-void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
int i, n_entries;
enum port port = encoder->port;
- const struct ddi_buf_trans *ddi_translations;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
- ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv,
- &n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- ddi_translations = intel_ddi_get_buf_trans_edp(encoder,
- &n_entries);
- else
- ddi_translations = intel_ddi_get_buf_trans_dp(encoder,
- &n_entries);
+ const struct intel_ddi_buf_trans *ddi_translations;
+
+ ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ return;
/* If we're boosting the current, set bit 31 of trans1 */
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
@@ -121,9 +115,9 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
for (i = 0; i < n_entries; i++) {
intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
- ddi_translations[i].trans1 | iboost_bit);
+ ddi_translations->entries[i].hsw.trans1 | iboost_bit);
intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
- ddi_translations[i].trans2);
+ ddi_translations->entries[i].hsw.trans2);
}
}
@@ -132,17 +126,17 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
* values in advance. This function programs the correct values for
* HDMI/DVI use cases.
*/
-static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
- int level)
+static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int level)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
int n_entries;
enum port port = encoder->port;
- const struct ddi_buf_trans *ddi_translations;
-
- ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
+ const struct intel_ddi_buf_trans *ddi_translations;
+ ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
@@ -155,9 +149,9 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
/* Entry 9 is for HDMI: */
intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
- ddi_translations[level].trans1 | iboost_bit);
+ ddi_translations->entries[level].hsw.trans1 | iboost_bit);
intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
- ddi_translations[level].trans2);
+ ddi_translations->entries[level].hsw.trans2);
}
void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
@@ -948,22 +942,16 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
iboost = intel_bios_encoder_dp_boost_level(encoder->devdata);
if (iboost == 0) {
- const struct ddi_buf_trans *ddi_translations;
+ const struct intel_ddi_buf_trans *ddi_translations;
int n_entries;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- ddi_translations = intel_ddi_get_buf_trans_edp(encoder, &n_entries);
- else
- ddi_translations = intel_ddi_get_buf_trans_dp(encoder, &n_entries);
-
+ ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
- iboost = ddi_translations[level].i_boost;
+ iboost = ddi_translations->entries[level].hsw.i_boost;
}
/* Make sure that the requested I_boost is valid */
@@ -983,21 +971,21 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
int level)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct bxt_ddi_buf_trans *ddi_translations;
+ const struct intel_ddi_buf_trans *ddi_translations;
enum port port = encoder->port;
int n_entries;
- ddi_translations = bxt_get_buf_trans(encoder, crtc_state, &n_entries);
+ ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
bxt_ddi_phy_set_signal_level(dev_priv, port,
- ddi_translations[level].margin,
- ddi_translations[level].scale,
- ddi_translations[level].enable,
- ddi_translations[level].deemphasis);
+ ddi_translations->entries[level].bxt.margin,
+ ddi_translations->entries[level].bxt.scale,
+ ddi_translations->entries[level].bxt.enable,
+ ddi_translations->entries[level].bxt.deemphasis);
}
static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
@@ -1005,36 +993,9 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
int n_entries;
- if (DISPLAY_VER(dev_priv) >= 12) {
- if (intel_phy_is_combo(dev_priv, phy))
- tgl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
- else if (IS_ALDERLAKE_P(dev_priv))
- adlp_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
- else
- tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
- } else if (DISPLAY_VER(dev_priv) == 11) {
- if (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE))
- jsl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
- else if (IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
- ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
- else if (intel_phy_is_combo(dev_priv, phy))
- icl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
- else
- icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
- } else if (IS_CANNONLAKE(dev_priv)) {
- cnl_get_buf_trans(encoder, crtc_state, &n_entries);
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- bxt_get_buf_trans(encoder, crtc_state, &n_entries);
- } else {
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- intel_ddi_get_buf_trans_edp(encoder, &n_entries);
- else
- intel_ddi_get_buf_trans_dp(encoder, &n_entries);
- }
+ encoder->get_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
n_entries = 1;
@@ -1061,13 +1022,12 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
int level)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct cnl_ddi_buf_trans *ddi_translations;
+ const struct intel_ddi_buf_trans *ddi_translations;
enum port port = encoder->port;
int n_entries, ln;
u32 val;
- ddi_translations = cnl_get_buf_trans(encoder, crtc_state, &n_entries);
-
+ ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
@@ -1083,8 +1043,8 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
val = intel_de_read(dev_priv, CNL_PORT_TX_DW2_LN0(port));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
- val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
- val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
+ val |= SWING_SEL_UPPER(ddi_translations->entries[level].cnl.dw2_swing_sel);
+ val |= SWING_SEL_LOWER(ddi_translations->entries[level].cnl.dw2_swing_sel);
/* Rcomp scalar is fixed as 0x98 for every table entry */
val |= RCOMP_SCALAR(0x98);
intel_de_write(dev_priv, CNL_PORT_TX_DW2_GRP(port), val);
@@ -1095,9 +1055,9 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
- val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
- val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
- val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
+ val |= POST_CURSOR_1(ddi_translations->entries[level].cnl.dw4_post_cursor_1);
+ val |= POST_CURSOR_2(ddi_translations->entries[level].cnl.dw4_post_cursor_2);
+ val |= CURSOR_COEFF(ddi_translations->entries[level].cnl.dw4_cursor_coeff);
intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val);
}
@@ -1112,7 +1072,7 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
/* Program PORT_TX_DW7 */
val = intel_de_read(dev_priv, CNL_PORT_TX_DW7_LN0(port));
val &= ~N_SCALAR_MASK;
- val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
+ val |= N_SCALAR(ddi_translations->entries[level].cnl.dw7_n_scalar);
intel_de_write(dev_priv, CNL_PORT_TX_DW7_GRP(port), val);
}
@@ -1182,20 +1142,12 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
int level)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct cnl_ddi_buf_trans *ddi_translations;
+ const struct intel_ddi_buf_trans *ddi_translations;
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
int n_entries, ln;
u32 val;
- if (DISPLAY_VER(dev_priv) >= 12)
- ddi_translations = tgl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
- else if (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE))
- ddi_translations = jsl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
- else if (IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
- ddi_translations = ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
- else
- ddi_translations = icl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
-
+ ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
@@ -1223,8 +1175,8 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
- val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
- val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
+ val |= SWING_SEL_UPPER(ddi_translations->entries[level].cnl.dw2_swing_sel);
+ val |= SWING_SEL_LOWER(ddi_translations->entries[level].cnl.dw2_swing_sel);
/* Program Rcomp scalar for every table entry */
val |= RCOMP_SCALAR(0x98);
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val);
@@ -1235,16 +1187,16 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
- val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
- val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
- val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
+ val |= POST_CURSOR_1(ddi_translations->entries[level].cnl.dw4_post_cursor_1);
+ val |= POST_CURSOR_2(ddi_translations->entries[level].cnl.dw4_post_cursor_2);
+ val |= CURSOR_COEFF(ddi_translations->entries[level].cnl.dw4_cursor_coeff);
intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* Program PORT_TX_DW7 */
val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN0(phy));
val &= ~N_SCALAR_MASK;
- val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
+ val |= N_SCALAR(ddi_translations->entries[level].cnl.dw7_n_scalar);
intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val);
}
@@ -1315,15 +1267,14 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
- const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
+ const struct intel_ddi_buf_trans *ddi_translations;
int n_entries, ln;
u32 val;
if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
return;
- ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
-
+ ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
@@ -1345,13 +1296,13 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
- ddi_translations[level].cri_txdeemph_override_17_12);
+ ddi_translations->entries[level].mg.cri_txdeemph_override_17_12);
intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val);
val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
- ddi_translations[level].cri_txdeemph_override_17_12);
+ ddi_translations->entries[level].mg.cri_txdeemph_override_17_12);
intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val);
}
@@ -1361,9 +1312,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
- ddi_translations[level].cri_txdeemph_override_5_0) |
+ ddi_translations->entries[level].mg.cri_txdeemph_override_5_0) |
CRI_TXDEEMPH_OVERRIDE_11_6(
- ddi_translations[level].cri_txdeemph_override_11_6) |
+ ddi_translations->entries[level].mg.cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val);
@@ -1371,9 +1322,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
- ddi_translations[level].cri_txdeemph_override_5_0) |
+ ddi_translations->entries[level].mg.cri_txdeemph_override_5_0) |
CRI_TXDEEMPH_OVERRIDE_11_6(
- ddi_translations[level].cri_txdeemph_override_11_6) |
+ ddi_translations->entries[level].mg.cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val);
@@ -1453,18 +1404,14 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
- const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
+ const struct intel_ddi_buf_trans *ddi_translations;
u32 val, dpcnt_mask, dpcnt_val;
int n_entries, ln;
if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
return;
- if (IS_ALDERLAKE_P(dev_priv))
- ddi_translations = adlp_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
- else
- ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
-
+ ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
@@ -1473,9 +1420,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
DKL_TX_DE_EMPAHSIS_COEFF_MASK |
DKL_TX_VSWING_CONTROL_MASK);
- dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations[level].dkl_vswing_control);
- dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations[level].dkl_de_emphasis_control);
- dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control);
+ dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations->entries[level].dkl.dkl_vswing_control);
+ dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations->entries[level].dkl.dkl_de_emphasis_control);
+ dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations->entries[level].dkl.dkl_preshoot_control);
for (ln = 0; ln < 2; ln++) {
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
@@ -1791,10 +1738,23 @@ static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum intel_dpll_id id;
+ u32 val;
+
+ val = intel_de_read(i915, DG1_DPCLKA_CFGCR0(phy));
+ val &= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ val >>= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
+ id = val;
+
+ /*
+ * _DG1_DPCLKA0_CFGCR0 maps between DPLL 0 and 1 with one bit for phy A
+ * and B while _DG1_DPCLKA1_CFGCR0 maps between DPLL 2 and 3 with one
+ * bit for phy C and D.
+ */
+ if (phy >= PHY_C)
+ id += DPLL_ID_DG1_DPLL2;
- return _cnl_ddi_get_pll(i915, DG1_DPCLKA_CFGCR0(phy),
- DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
- DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
+ return intel_get_shared_dpll_by_id(i915, id);
}
static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
@@ -2702,7 +2662,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
bxt_ddi_vswing_sequence(encoder, crtc_state, level);
else
- intel_prepare_dp_ddi_buffers(encoder, crtc_state);
+ hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
intel_ddi_power_up_lanes(encoder, crtc_state);
@@ -2810,6 +2770,7 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
conn_state);
/* FIXME precompute everything properly */
+ /* FIXME how do we turn infoframes off again? */
if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink)
dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
@@ -3149,7 +3110,7 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
bxt_ddi_vswing_sequence(encoder, crtc_state, level);
else
- intel_prepare_hdmi_ddi_buffers(encoder, level);
+ hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state, level);
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
skl_ddi_set_iboost(encoder, crtc_state, level);
@@ -3577,7 +3538,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 temp, flags = 0;
@@ -3640,7 +3601,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
- intel_dp_get_m_n(intel_crtc, pipe_config);
+ intel_dp_get_m_n(crtc, pipe_config);
if (DISPLAY_VER(dev_priv) >= 11) {
i915_reg_t dp_tp_ctl = dp_tp_ctl_reg(encoder, pipe_config);
@@ -3670,7 +3631,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
pipe_config->mst_master_transcoder =
REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
- intel_dp_get_m_n(intel_crtc, pipe_config);
+ intel_dp_get_m_n(crtc, pipe_config);
pipe_config->infoframes.enable |=
intel_hdmi_infoframes_enabled(encoder, pipe_config);
@@ -4496,6 +4457,36 @@ static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port)
return false;
}
+static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ intel_dp_encoder_suspend(encoder);
+
+ if (!intel_phy_is_tc(i915, phy))
+ return;
+
+ intel_tc_port_disconnect_phy(dig_port);
+}
+
+static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ intel_dp_encoder_shutdown(encoder);
+
+ if (!intel_phy_is_tc(i915, phy))
+ return;
+
+ intel_tc_port_disconnect_phy(dig_port);
+}
+
#define port_tc_name(port) ((port) - PORT_TC1 + '1')
#define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1')
@@ -4605,8 +4596,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->get_hw_state = intel_ddi_get_hw_state;
encoder->sync_state = intel_ddi_sync_state;
encoder->initial_fastset_check = intel_ddi_initial_fastset_check;
- encoder->suspend = intel_dp_encoder_suspend;
- encoder->shutdown = intel_dp_encoder_shutdown;
+ encoder->suspend = intel_ddi_encoder_suspend;
+ encoder->shutdown = intel_ddi_encoder_shutdown;
encoder->get_power_domains = intel_ddi_get_power_domains;
encoder->type = INTEL_OUTPUT_DDI;
@@ -4674,6 +4665,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->get_config = hsw_ddi_get_config;
}
+ intel_ddi_buf_trans_init(encoder);
+
if (DISPLAY_VER(dev_priv) >= 13)
encoder->hpd_pin = xelpd_hpd_pin(dev_priv, port);
else if (IS_DG1(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 59c6b01d4199..7d448485d887 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -40,8 +40,8 @@ bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder);
void hsw_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
-void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
+void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 8bfd00f49f2a..63b1ae830d9a 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -13,858 +13,1129 @@
* them for both DP and FDI transports, allowing those ports to
* automatically adapt to HDMI connections as well
*/
-static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
- { 0x00FFFFFF, 0x0006000E, 0x0 },
- { 0x00D75FFF, 0x0005000A, 0x0 },
- { 0x00C30FFF, 0x00040006, 0x0 },
- { 0x80AAAFFF, 0x000B0000, 0x0 },
- { 0x00FFFFFF, 0x0005000A, 0x0 },
- { 0x00D75FFF, 0x000C0004, 0x0 },
- { 0x80C30FFF, 0x000B0000, 0x0 },
- { 0x00FFFFFF, 0x00040006, 0x0 },
- { 0x80D75FFF, 0x000B0000, 0x0 },
-};
-
-static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
- { 0x00FFFFFF, 0x0007000E, 0x0 },
- { 0x00D75FFF, 0x000F000A, 0x0 },
- { 0x00C30FFF, 0x00060006, 0x0 },
- { 0x00AAAFFF, 0x001E0000, 0x0 },
- { 0x00FFFFFF, 0x000F000A, 0x0 },
- { 0x00D75FFF, 0x00160004, 0x0 },
- { 0x00C30FFF, 0x001E0000, 0x0 },
- { 0x00FFFFFF, 0x00060006, 0x0 },
- { 0x00D75FFF, 0x001E0000, 0x0 },
-};
-
-static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
- /* Idx NT mV d T mV d db */
- { 0x00FFFFFF, 0x0006000E, 0x0 },/* 0: 400 400 0 */
- { 0x00E79FFF, 0x000E000C, 0x0 },/* 1: 400 500 2 */
- { 0x00D75FFF, 0x0005000A, 0x0 },/* 2: 400 600 3.5 */
- { 0x00FFFFFF, 0x0005000A, 0x0 },/* 3: 600 600 0 */
- { 0x00E79FFF, 0x001D0007, 0x0 },/* 4: 600 750 2 */
- { 0x00D75FFF, 0x000C0004, 0x0 },/* 5: 600 900 3.5 */
- { 0x00FFFFFF, 0x00040006, 0x0 },/* 6: 800 800 0 */
- { 0x80E79FFF, 0x00030002, 0x0 },/* 7: 800 1000 2 */
- { 0x00FFFFFF, 0x00140005, 0x0 },/* 8: 850 850 0 */
- { 0x00FFFFFF, 0x000C0004, 0x0 },/* 9: 900 900 0 */
- { 0x00FFFFFF, 0x001C0003, 0x0 },/* 10: 950 950 0 */
- { 0x80FFFFFF, 0x00030002, 0x0 },/* 11: 1000 1000 0 */
-};
-
-static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
- { 0x00FFFFFF, 0x00000012, 0x0 },
- { 0x00EBAFFF, 0x00020011, 0x0 },
- { 0x00C71FFF, 0x0006000F, 0x0 },
- { 0x00AAAFFF, 0x000E000A, 0x0 },
- { 0x00FFFFFF, 0x00020011, 0x0 },
- { 0x00DB6FFF, 0x0005000F, 0x0 },
- { 0x00BEEFFF, 0x000A000C, 0x0 },
- { 0x00FFFFFF, 0x0005000F, 0x0 },
- { 0x00DB6FFF, 0x000A000C, 0x0 },
-};
-
-static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
- { 0x00FFFFFF, 0x0007000E, 0x0 },
- { 0x00D75FFF, 0x000E000A, 0x0 },
- { 0x00BEFFFF, 0x00140006, 0x0 },
- { 0x80B2CFFF, 0x001B0002, 0x0 },
- { 0x00FFFFFF, 0x000E000A, 0x0 },
- { 0x00DB6FFF, 0x00160005, 0x0 },
- { 0x80C71FFF, 0x001A0002, 0x0 },
- { 0x00F7DFFF, 0x00180004, 0x0 },
- { 0x80D75FFF, 0x001B0002, 0x0 },
-};
-
-static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
- { 0x00FFFFFF, 0x0001000E, 0x0 },
- { 0x00D75FFF, 0x0004000A, 0x0 },
- { 0x00C30FFF, 0x00070006, 0x0 },
- { 0x00AAAFFF, 0x000C0000, 0x0 },
- { 0x00FFFFFF, 0x0004000A, 0x0 },
- { 0x00D75FFF, 0x00090004, 0x0 },
- { 0x00C30FFF, 0x000C0000, 0x0 },
- { 0x00FFFFFF, 0x00070006, 0x0 },
- { 0x00D75FFF, 0x000C0000, 0x0 },
-};
-
-static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
- /* Idx NT mV d T mV df db */
- { 0x00FFFFFF, 0x0007000E, 0x0 },/* 0: 400 400 0 */
- { 0x00D75FFF, 0x000E000A, 0x0 },/* 1: 400 600 3.5 */
- { 0x00BEFFFF, 0x00140006, 0x0 },/* 2: 400 800 6 */
- { 0x00FFFFFF, 0x0009000D, 0x0 },/* 3: 450 450 0 */
- { 0x00FFFFFF, 0x000E000A, 0x0 },/* 4: 600 600 0 */
- { 0x00D7FFFF, 0x00140006, 0x0 },/* 5: 600 800 2.5 */
- { 0x80CB2FFF, 0x001B0002, 0x0 },/* 6: 600 1000 4.5 */
- { 0x00FFFFFF, 0x00140006, 0x0 },/* 7: 800 800 0 */
- { 0x80E79FFF, 0x001B0002, 0x0 },/* 8: 800 1000 2 */
- { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */
+static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_dp[] = {
+ { .hsw = { 0x00FFFFFF, 0x0006000E, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x0005000A, 0x0 } },
+ { .hsw = { 0x00C30FFF, 0x00040006, 0x0 } },
+ { .hsw = { 0x80AAAFFF, 0x000B0000, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x0005000A, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x000C0004, 0x0 } },
+ { .hsw = { 0x80C30FFF, 0x000B0000, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x00040006, 0x0 } },
+ { .hsw = { 0x80D75FFF, 0x000B0000, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans hsw_ddi_translations_dp = {
+ .entries = _hsw_ddi_translations_dp,
+ .num_entries = ARRAY_SIZE(_hsw_ddi_translations_dp),
+};
+
+static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_fdi[] = {
+ { .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x000F000A, 0x0 } },
+ { .hsw = { 0x00C30FFF, 0x00060006, 0x0 } },
+ { .hsw = { 0x00AAAFFF, 0x001E0000, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x000F000A, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x00160004, 0x0 } },
+ { .hsw = { 0x00C30FFF, 0x001E0000, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x00060006, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x001E0000, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans hsw_ddi_translations_fdi = {
+ .entries = _hsw_ddi_translations_fdi,
+ .num_entries = ARRAY_SIZE(_hsw_ddi_translations_fdi),
+};
+
+static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_hdmi[] = {
+ /* Idx NT mV d T mV d db */
+ { .hsw = { 0x00FFFFFF, 0x0006000E, 0x0 } }, /* 0: 400 400 0 */
+ { .hsw = { 0x00E79FFF, 0x000E000C, 0x0 } }, /* 1: 400 500 2 */
+ { .hsw = { 0x00D75FFF, 0x0005000A, 0x0 } }, /* 2: 400 600 3.5 */
+ { .hsw = { 0x00FFFFFF, 0x0005000A, 0x0 } }, /* 3: 600 600 0 */
+ { .hsw = { 0x00E79FFF, 0x001D0007, 0x0 } }, /* 4: 600 750 2 */
+ { .hsw = { 0x00D75FFF, 0x000C0004, 0x0 } }, /* 5: 600 900 3.5 */
+ { .hsw = { 0x00FFFFFF, 0x00040006, 0x0 } }, /* 6: 800 800 0 */
+ { .hsw = { 0x80E79FFF, 0x00030002, 0x0 } }, /* 7: 800 1000 2 */
+ { .hsw = { 0x00FFFFFF, 0x00140005, 0x0 } }, /* 8: 850 850 0 */
+ { .hsw = { 0x00FFFFFF, 0x000C0004, 0x0 } }, /* 9: 900 900 0 */
+ { .hsw = { 0x00FFFFFF, 0x001C0003, 0x0 } }, /* 10: 950 950 0 */
+ { .hsw = { 0x80FFFFFF, 0x00030002, 0x0 } }, /* 11: 1000 1000 0 */
+};
+
+static const struct intel_ddi_buf_trans hsw_ddi_translations_hdmi = {
+ .entries = _hsw_ddi_translations_hdmi,
+ .num_entries = ARRAY_SIZE(_hsw_ddi_translations_hdmi),
+ .hdmi_default_entry = 6,
+};
+
+static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_edp[] = {
+ { .hsw = { 0x00FFFFFF, 0x00000012, 0x0 } },
+ { .hsw = { 0x00EBAFFF, 0x00020011, 0x0 } },
+ { .hsw = { 0x00C71FFF, 0x0006000F, 0x0 } },
+ { .hsw = { 0x00AAAFFF, 0x000E000A, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x00020011, 0x0 } },
+ { .hsw = { 0x00DB6FFF, 0x0005000F, 0x0 } },
+ { .hsw = { 0x00BEEFFF, 0x000A000C, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x0005000F, 0x0 } },
+ { .hsw = { 0x00DB6FFF, 0x000A000C, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans bdw_ddi_translations_edp = {
+ .entries = _bdw_ddi_translations_edp,
+ .num_entries = ARRAY_SIZE(_bdw_ddi_translations_edp),
+};
+
+static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_dp[] = {
+ { .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x000E000A, 0x0 } },
+ { .hsw = { 0x00BEFFFF, 0x00140006, 0x0 } },
+ { .hsw = { 0x80B2CFFF, 0x001B0002, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x000E000A, 0x0 } },
+ { .hsw = { 0x00DB6FFF, 0x00160005, 0x0 } },
+ { .hsw = { 0x80C71FFF, 0x001A0002, 0x0 } },
+ { .hsw = { 0x00F7DFFF, 0x00180004, 0x0 } },
+ { .hsw = { 0x80D75FFF, 0x001B0002, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans bdw_ddi_translations_dp = {
+ .entries = _bdw_ddi_translations_dp,
+ .num_entries = ARRAY_SIZE(_bdw_ddi_translations_dp),
+};
+
+static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_fdi[] = {
+ { .hsw = { 0x00FFFFFF, 0x0001000E, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x0004000A, 0x0 } },
+ { .hsw = { 0x00C30FFF, 0x00070006, 0x0 } },
+ { .hsw = { 0x00AAAFFF, 0x000C0000, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x0004000A, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x00090004, 0x0 } },
+ { .hsw = { 0x00C30FFF, 0x000C0000, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x00070006, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x000C0000, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans bdw_ddi_translations_fdi = {
+ .entries = _bdw_ddi_translations_fdi,
+ .num_entries = ARRAY_SIZE(_bdw_ddi_translations_fdi),
+};
+
+static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_hdmi[] = {
+ /* Idx NT mV d T mV df db */
+ { .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } }, /* 0: 400 400 0 */
+ { .hsw = { 0x00D75FFF, 0x000E000A, 0x0 } }, /* 1: 400 600 3.5 */
+ { .hsw = { 0x00BEFFFF, 0x00140006, 0x0 } }, /* 2: 400 800 6 */
+ { .hsw = { 0x00FFFFFF, 0x0009000D, 0x0 } }, /* 3: 450 450 0 */
+ { .hsw = { 0x00FFFFFF, 0x000E000A, 0x0 } }, /* 4: 600 600 0 */
+ { .hsw = { 0x00D7FFFF, 0x00140006, 0x0 } }, /* 5: 600 800 2.5 */
+ { .hsw = { 0x80CB2FFF, 0x001B0002, 0x0 } }, /* 6: 600 1000 4.5 */
+ { .hsw = { 0x00FFFFFF, 0x00140006, 0x0 } }, /* 7: 800 800 0 */
+ { .hsw = { 0x80E79FFF, 0x001B0002, 0x0 } }, /* 8: 800 1000 2 */
+ { .hsw = { 0x80FFFFFF, 0x001B0002, 0x0 } }, /* 9: 1000 1000 0 */
+};
+
+static const struct intel_ddi_buf_trans bdw_ddi_translations_hdmi = {
+ .entries = _bdw_ddi_translations_hdmi,
+ .num_entries = ARRAY_SIZE(_bdw_ddi_translations_hdmi),
+ .hdmi_default_entry = 7,
};
/* Skylake H and S */
-static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
- { 0x00002016, 0x000000A0, 0x0 },
- { 0x00005012, 0x0000009B, 0x0 },
- { 0x00007011, 0x00000088, 0x0 },
- { 0x80009010, 0x000000C0, 0x1 },
- { 0x00002016, 0x0000009B, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000C0, 0x1 },
- { 0x00002016, 0x000000DF, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 },
+static const union intel_ddi_buf_trans_entry _skl_ddi_translations_dp[] = {
+ { .hsw = { 0x00002016, 0x000000A0, 0x0 } },
+ { .hsw = { 0x00005012, 0x0000009B, 0x0 } },
+ { .hsw = { 0x00007011, 0x00000088, 0x0 } },
+ { .hsw = { 0x80009010, 0x000000C0, 0x1 } },
+ { .hsw = { 0x00002016, 0x0000009B, 0x0 } },
+ { .hsw = { 0x00005012, 0x00000088, 0x0 } },
+ { .hsw = { 0x80007011, 0x000000C0, 0x1 } },
+ { .hsw = { 0x00002016, 0x000000DF, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x1 } },
+};
+
+static const struct intel_ddi_buf_trans skl_ddi_translations_dp = {
+ .entries = _skl_ddi_translations_dp,
+ .num_entries = ARRAY_SIZE(_skl_ddi_translations_dp),
};
/* Skylake U */
-static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
- { 0x0000201B, 0x000000A2, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x1 },
- { 0x80009010, 0x000000C0, 0x1 },
- { 0x0000201B, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 },
- { 0x80007011, 0x000000C0, 0x1 },
- { 0x00002016, 0x00000088, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 },
+static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_dp[] = {
+ { .hsw = { 0x0000201B, 0x000000A2, 0x0 } },
+ { .hsw = { 0x00005012, 0x00000088, 0x0 } },
+ { .hsw = { 0x80007011, 0x000000CD, 0x1 } },
+ { .hsw = { 0x80009010, 0x000000C0, 0x1 } },
+ { .hsw = { 0x0000201B, 0x0000009D, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x1 } },
+ { .hsw = { 0x80007011, 0x000000C0, 0x1 } },
+ { .hsw = { 0x00002016, 0x00000088, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x1 } },
+};
+
+static const struct intel_ddi_buf_trans skl_u_ddi_translations_dp = {
+ .entries = _skl_u_ddi_translations_dp,
+ .num_entries = ARRAY_SIZE(_skl_u_ddi_translations_dp),
};
/* Skylake Y */
-static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
- { 0x00000018, 0x000000A2, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x3 },
- { 0x80009010, 0x000000C0, 0x3 },
- { 0x00000018, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
- { 0x80007011, 0x000000C0, 0x3 },
- { 0x00000018, 0x00000088, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
+static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_dp[] = {
+ { .hsw = { 0x00000018, 0x000000A2, 0x0 } },
+ { .hsw = { 0x00005012, 0x00000088, 0x0 } },
+ { .hsw = { 0x80007011, 0x000000CD, 0x3 } },
+ { .hsw = { 0x80009010, 0x000000C0, 0x3 } },
+ { .hsw = { 0x00000018, 0x0000009D, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x3 } },
+ { .hsw = { 0x80007011, 0x000000C0, 0x3 } },
+ { .hsw = { 0x00000018, 0x00000088, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x3 } },
+};
+
+static const struct intel_ddi_buf_trans skl_y_ddi_translations_dp = {
+ .entries = _skl_y_ddi_translations_dp,
+ .num_entries = ARRAY_SIZE(_skl_y_ddi_translations_dp),
};
/* Kabylake H and S */
-static const struct ddi_buf_trans kbl_ddi_translations_dp[] = {
- { 0x00002016, 0x000000A0, 0x0 },
- { 0x00005012, 0x0000009B, 0x0 },
- { 0x00007011, 0x00000088, 0x0 },
- { 0x80009010, 0x000000C0, 0x1 },
- { 0x00002016, 0x0000009B, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000C0, 0x1 },
- { 0x00002016, 0x00000097, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 },
+static const union intel_ddi_buf_trans_entry _kbl_ddi_translations_dp[] = {
+ { .hsw = { 0x00002016, 0x000000A0, 0x0 } },
+ { .hsw = { 0x00005012, 0x0000009B, 0x0 } },
+ { .hsw = { 0x00007011, 0x00000088, 0x0 } },
+ { .hsw = { 0x80009010, 0x000000C0, 0x1 } },
+ { .hsw = { 0x00002016, 0x0000009B, 0x0 } },
+ { .hsw = { 0x00005012, 0x00000088, 0x0 } },
+ { .hsw = { 0x80007011, 0x000000C0, 0x1 } },
+ { .hsw = { 0x00002016, 0x00000097, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x1 } },
+};
+
+static const struct intel_ddi_buf_trans kbl_ddi_translations_dp = {
+ .entries = _kbl_ddi_translations_dp,
+ .num_entries = ARRAY_SIZE(_kbl_ddi_translations_dp),
};
/* Kabylake U */
-static const struct ddi_buf_trans kbl_u_ddi_translations_dp[] = {
- { 0x0000201B, 0x000000A1, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x3 },
- { 0x80009010, 0x000000C0, 0x3 },
- { 0x0000201B, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
- { 0x80007011, 0x000000C0, 0x3 },
- { 0x00002016, 0x0000004F, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
+static const union intel_ddi_buf_trans_entry _kbl_u_ddi_translations_dp[] = {
+ { .hsw = { 0x0000201B, 0x000000A1, 0x0 } },
+ { .hsw = { 0x00005012, 0x00000088, 0x0 } },
+ { .hsw = { 0x80007011, 0x000000CD, 0x3 } },
+ { .hsw = { 0x80009010, 0x000000C0, 0x3 } },
+ { .hsw = { 0x0000201B, 0x0000009D, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x3 } },
+ { .hsw = { 0x80007011, 0x000000C0, 0x3 } },
+ { .hsw = { 0x00002016, 0x0000004F, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x3 } },
+};
+
+static const struct intel_ddi_buf_trans kbl_u_ddi_translations_dp = {
+ .entries = _kbl_u_ddi_translations_dp,
+ .num_entries = ARRAY_SIZE(_kbl_u_ddi_translations_dp),
};
/* Kabylake Y */
-static const struct ddi_buf_trans kbl_y_ddi_translations_dp[] = {
- { 0x00001017, 0x000000A1, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x3 },
- { 0x8000800F, 0x000000C0, 0x3 },
- { 0x00001017, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
- { 0x80007011, 0x000000C0, 0x3 },
- { 0x00001017, 0x0000004C, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
+static const union intel_ddi_buf_trans_entry _kbl_y_ddi_translations_dp[] = {
+ { .hsw = { 0x00001017, 0x000000A1, 0x0 } },
+ { .hsw = { 0x00005012, 0x00000088, 0x0 } },
+ { .hsw = { 0x80007011, 0x000000CD, 0x3 } },
+ { .hsw = { 0x8000800F, 0x000000C0, 0x3 } },
+ { .hsw = { 0x00001017, 0x0000009D, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x3 } },
+ { .hsw = { 0x80007011, 0x000000C0, 0x3 } },
+ { .hsw = { 0x00001017, 0x0000004C, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x3 } },
+};
+
+static const struct intel_ddi_buf_trans kbl_y_ddi_translations_dp = {
+ .entries = _kbl_y_ddi_translations_dp,
+ .num_entries = ARRAY_SIZE(_kbl_y_ddi_translations_dp),
};
/*
* Skylake/Kabylake H and S
* eDP 1.4 low vswing translation parameters
*/
-static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
- { 0x00000018, 0x000000A8, 0x0 },
- { 0x00004013, 0x000000A9, 0x0 },
- { 0x00007011, 0x000000A2, 0x0 },
- { 0x00009010, 0x0000009C, 0x0 },
- { 0x00000018, 0x000000A9, 0x0 },
- { 0x00006013, 0x000000A2, 0x0 },
- { 0x00007011, 0x000000A6, 0x0 },
- { 0x00000018, 0x000000AB, 0x0 },
- { 0x00007013, 0x0000009F, 0x0 },
- { 0x00000018, 0x000000DF, 0x0 },
+static const union intel_ddi_buf_trans_entry _skl_ddi_translations_edp[] = {
+ { .hsw = { 0x00000018, 0x000000A8, 0x0 } },
+ { .hsw = { 0x00004013, 0x000000A9, 0x0 } },
+ { .hsw = { 0x00007011, 0x000000A2, 0x0 } },
+ { .hsw = { 0x00009010, 0x0000009C, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000A9, 0x0 } },
+ { .hsw = { 0x00006013, 0x000000A2, 0x0 } },
+ { .hsw = { 0x00007011, 0x000000A6, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000AB, 0x0 } },
+ { .hsw = { 0x00007013, 0x0000009F, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000DF, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans skl_ddi_translations_edp = {
+ .entries = _skl_ddi_translations_edp,
+ .num_entries = ARRAY_SIZE(_skl_ddi_translations_edp),
};
/*
* Skylake/Kabylake U
* eDP 1.4 low vswing translation parameters
*/
-static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
- { 0x00000018, 0x000000A8, 0x0 },
- { 0x00004013, 0x000000A9, 0x0 },
- { 0x00007011, 0x000000A2, 0x0 },
- { 0x00009010, 0x0000009C, 0x0 },
- { 0x00000018, 0x000000A9, 0x0 },
- { 0x00006013, 0x000000A2, 0x0 },
- { 0x00007011, 0x000000A6, 0x0 },
- { 0x00002016, 0x000000AB, 0x0 },
- { 0x00005013, 0x0000009F, 0x0 },
- { 0x00000018, 0x000000DF, 0x0 },
+static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_edp[] = {
+ { .hsw = { 0x00000018, 0x000000A8, 0x0 } },
+ { .hsw = { 0x00004013, 0x000000A9, 0x0 } },
+ { .hsw = { 0x00007011, 0x000000A2, 0x0 } },
+ { .hsw = { 0x00009010, 0x0000009C, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000A9, 0x0 } },
+ { .hsw = { 0x00006013, 0x000000A2, 0x0 } },
+ { .hsw = { 0x00007011, 0x000000A6, 0x0 } },
+ { .hsw = { 0x00002016, 0x000000AB, 0x0 } },
+ { .hsw = { 0x00005013, 0x0000009F, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000DF, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans skl_u_ddi_translations_edp = {
+ .entries = _skl_u_ddi_translations_edp,
+ .num_entries = ARRAY_SIZE(_skl_u_ddi_translations_edp),
};
/*
* Skylake/Kabylake Y
* eDP 1.4 low vswing translation parameters
*/
-static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
- { 0x00000018, 0x000000A8, 0x0 },
- { 0x00004013, 0x000000AB, 0x0 },
- { 0x00007011, 0x000000A4, 0x0 },
- { 0x00009010, 0x000000DF, 0x0 },
- { 0x00000018, 0x000000AA, 0x0 },
- { 0x00006013, 0x000000A4, 0x0 },
- { 0x00007011, 0x0000009D, 0x0 },
- { 0x00000018, 0x000000A0, 0x0 },
- { 0x00006012, 0x000000DF, 0x0 },
- { 0x00000018, 0x0000008A, 0x0 },
+static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_edp[] = {
+ { .hsw = { 0x00000018, 0x000000A8, 0x0 } },
+ { .hsw = { 0x00004013, 0x000000AB, 0x0 } },
+ { .hsw = { 0x00007011, 0x000000A4, 0x0 } },
+ { .hsw = { 0x00009010, 0x000000DF, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000AA, 0x0 } },
+ { .hsw = { 0x00006013, 0x000000A4, 0x0 } },
+ { .hsw = { 0x00007011, 0x0000009D, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000A0, 0x0 } },
+ { .hsw = { 0x00006012, 0x000000DF, 0x0 } },
+ { .hsw = { 0x00000018, 0x0000008A, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans skl_y_ddi_translations_edp = {
+ .entries = _skl_y_ddi_translations_edp,
+ .num_entries = ARRAY_SIZE(_skl_y_ddi_translations_edp),
};
/* Skylake/Kabylake U, H and S */
-static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
- { 0x00000018, 0x000000AC, 0x0 },
- { 0x00005012, 0x0000009D, 0x0 },
- { 0x00007011, 0x00000088, 0x0 },
- { 0x00000018, 0x000000A1, 0x0 },
- { 0x00000018, 0x00000098, 0x0 },
- { 0x00004013, 0x00000088, 0x0 },
- { 0x80006012, 0x000000CD, 0x1 },
- { 0x00000018, 0x000000DF, 0x0 },
- { 0x80003015, 0x000000CD, 0x1 }, /* Default */
- { 0x80003015, 0x000000C0, 0x1 },
- { 0x80000018, 0x000000C0, 0x1 },
+static const union intel_ddi_buf_trans_entry _skl_ddi_translations_hdmi[] = {
+ { .hsw = { 0x00000018, 0x000000AC, 0x0 } },
+ { .hsw = { 0x00005012, 0x0000009D, 0x0 } },
+ { .hsw = { 0x00007011, 0x00000088, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000A1, 0x0 } },
+ { .hsw = { 0x00000018, 0x00000098, 0x0 } },
+ { .hsw = { 0x00004013, 0x00000088, 0x0 } },
+ { .hsw = { 0x80006012, 0x000000CD, 0x1 } },
+ { .hsw = { 0x00000018, 0x000000DF, 0x0 } },
+ { .hsw = { 0x80003015, 0x000000CD, 0x1 } }, /* Default */
+ { .hsw = { 0x80003015, 0x000000C0, 0x1 } },
+ { .hsw = { 0x80000018, 0x000000C0, 0x1 } },
+};
+
+static const struct intel_ddi_buf_trans skl_ddi_translations_hdmi = {
+ .entries = _skl_ddi_translations_hdmi,
+ .num_entries = ARRAY_SIZE(_skl_ddi_translations_hdmi),
+ .hdmi_default_entry = 8,
};
/* Skylake/Kabylake Y */
-static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
- { 0x00000018, 0x000000A1, 0x0 },
- { 0x00005012, 0x000000DF, 0x0 },
- { 0x80007011, 0x000000CB, 0x3 },
- { 0x00000018, 0x000000A4, 0x0 },
- { 0x00000018, 0x0000009D, 0x0 },
- { 0x00004013, 0x00000080, 0x0 },
- { 0x80006013, 0x000000C0, 0x3 },
- { 0x00000018, 0x0000008A, 0x0 },
- { 0x80003015, 0x000000C0, 0x3 }, /* Default */
- { 0x80003015, 0x000000C0, 0x3 },
- { 0x80000018, 0x000000C0, 0x3 },
-};
-
-
-static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
- /* Idx NT mV diff db */
- { 52, 0x9A, 0, 128, }, /* 0: 400 0 */
- { 78, 0x9A, 0, 85, }, /* 1: 400 3.5 */
- { 104, 0x9A, 0, 64, }, /* 2: 400 6 */
- { 154, 0x9A, 0, 43, }, /* 3: 400 9.5 */
- { 77, 0x9A, 0, 128, }, /* 4: 600 0 */
- { 116, 0x9A, 0, 85, }, /* 5: 600 3.5 */
- { 154, 0x9A, 0, 64, }, /* 6: 600 6 */
- { 102, 0x9A, 0, 128, }, /* 7: 800 0 */
- { 154, 0x9A, 0, 85, }, /* 8: 800 3.5 */
- { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */
-};
-
-static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
+static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_hdmi[] = {
+ { .hsw = { 0x00000018, 0x000000A1, 0x0 } },
+ { .hsw = { 0x00005012, 0x000000DF, 0x0 } },
+ { .hsw = { 0x80007011, 0x000000CB, 0x3 } },
+ { .hsw = { 0x00000018, 0x000000A4, 0x0 } },
+ { .hsw = { 0x00000018, 0x0000009D, 0x0 } },
+ { .hsw = { 0x00004013, 0x00000080, 0x0 } },
+ { .hsw = { 0x80006013, 0x000000C0, 0x3 } },
+ { .hsw = { 0x00000018, 0x0000008A, 0x0 } },
+ { .hsw = { 0x80003015, 0x000000C0, 0x3 } }, /* Default */
+ { .hsw = { 0x80003015, 0x000000C0, 0x3 } },
+ { .hsw = { 0x80000018, 0x000000C0, 0x3 } },
+};
+
+static const struct intel_ddi_buf_trans skl_y_ddi_translations_hdmi = {
+ .entries = _skl_y_ddi_translations_hdmi,
+ .num_entries = ARRAY_SIZE(_skl_y_ddi_translations_hdmi),
+ .hdmi_default_entry = 8,
+};
+
+static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_dp[] = {
+ /* Idx NT mV diff db */
+ { .bxt = { 52, 0x9A, 0, 128, } }, /* 0: 400 0 */
+ { .bxt = { 78, 0x9A, 0, 85, } }, /* 1: 400 3.5 */
+ { .bxt = { 104, 0x9A, 0, 64, } }, /* 2: 400 6 */
+ { .bxt = { 154, 0x9A, 0, 43, } }, /* 3: 400 9.5 */
+ { .bxt = { 77, 0x9A, 0, 128, } }, /* 4: 600 0 */
+ { .bxt = { 116, 0x9A, 0, 85, } }, /* 5: 600 3.5 */
+ { .bxt = { 154, 0x9A, 0, 64, } }, /* 6: 600 6 */
+ { .bxt = { 102, 0x9A, 0, 128, } }, /* 7: 800 0 */
+ { .bxt = { 154, 0x9A, 0, 85, } }, /* 8: 800 3.5 */
+ { .bxt = { 154, 0x9A, 1, 128, } }, /* 9: 1200 0 */
+};
+
+static const struct intel_ddi_buf_trans bxt_ddi_translations_dp = {
+ .entries = _bxt_ddi_translations_dp,
+ .num_entries = ARRAY_SIZE(_bxt_ddi_translations_dp),
+};
+
+static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_edp[] = {
/* Idx NT mV diff db */
- { 26, 0, 0, 128, }, /* 0: 200 0 */
- { 38, 0, 0, 112, }, /* 1: 200 1.5 */
- { 48, 0, 0, 96, }, /* 2: 200 4 */
- { 54, 0, 0, 69, }, /* 3: 200 6 */
- { 32, 0, 0, 128, }, /* 4: 250 0 */
- { 48, 0, 0, 104, }, /* 5: 250 1.5 */
- { 54, 0, 0, 85, }, /* 6: 250 4 */
- { 43, 0, 0, 128, }, /* 7: 300 0 */
- { 54, 0, 0, 101, }, /* 8: 300 1.5 */
- { 48, 0, 0, 128, }, /* 9: 300 0 */
+ { .bxt = { 26, 0, 0, 128, } }, /* 0: 200 0 */
+ { .bxt = { 38, 0, 0, 112, } }, /* 1: 200 1.5 */
+ { .bxt = { 48, 0, 0, 96, } }, /* 2: 200 4 */
+ { .bxt = { 54, 0, 0, 69, } }, /* 3: 200 6 */
+ { .bxt = { 32, 0, 0, 128, } }, /* 4: 250 0 */
+ { .bxt = { 48, 0, 0, 104, } }, /* 5: 250 1.5 */
+ { .bxt = { 54, 0, 0, 85, } }, /* 6: 250 4 */
+ { .bxt = { 43, 0, 0, 128, } }, /* 7: 300 0 */
+ { .bxt = { 54, 0, 0, 101, } }, /* 8: 300 1.5 */
+ { .bxt = { 48, 0, 0, 128, } }, /* 9: 300 0 */
+};
+
+static const struct intel_ddi_buf_trans bxt_ddi_translations_edp = {
+ .entries = _bxt_ddi_translations_edp,
+ .num_entries = ARRAY_SIZE(_bxt_ddi_translations_edp),
};
/* BSpec has 2 recommended values - entries 0 and 8.
* Using the entry with higher vswing.
*/
-static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
- /* Idx NT mV diff db */
- { 52, 0x9A, 0, 128, }, /* 0: 400 0 */
- { 52, 0x9A, 0, 85, }, /* 1: 400 3.5 */
- { 52, 0x9A, 0, 64, }, /* 2: 400 6 */
- { 42, 0x9A, 0, 43, }, /* 3: 400 9.5 */
- { 77, 0x9A, 0, 128, }, /* 4: 600 0 */
- { 77, 0x9A, 0, 85, }, /* 5: 600 3.5 */
- { 77, 0x9A, 0, 64, }, /* 6: 600 6 */
- { 102, 0x9A, 0, 128, }, /* 7: 800 0 */
- { 102, 0x9A, 0, 85, }, /* 8: 800 3.5 */
- { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */
+static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_hdmi[] = {
+ /* Idx NT mV diff db */
+ { .bxt = { 52, 0x9A, 0, 128, } }, /* 0: 400 0 */
+ { .bxt = { 52, 0x9A, 0, 85, } }, /* 1: 400 3.5 */
+ { .bxt = { 52, 0x9A, 0, 64, } }, /* 2: 400 6 */
+ { .bxt = { 42, 0x9A, 0, 43, } }, /* 3: 400 9.5 */
+ { .bxt = { 77, 0x9A, 0, 128, } }, /* 4: 600 0 */
+ { .bxt = { 77, 0x9A, 0, 85, } }, /* 5: 600 3.5 */
+ { .bxt = { 77, 0x9A, 0, 64, } }, /* 6: 600 6 */
+ { .bxt = { 102, 0x9A, 0, 128, } }, /* 7: 800 0 */
+ { .bxt = { 102, 0x9A, 0, 85, } }, /* 8: 800 3.5 */
+ { .bxt = { 154, 0x9A, 1, 128, } }, /* 9: 1200 0 */
+};
+
+static const struct intel_ddi_buf_trans bxt_ddi_translations_hdmi = {
+ .entries = _bxt_ddi_translations_hdmi,
+ .num_entries = ARRAY_SIZE(_bxt_ddi_translations_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_bxt_ddi_translations_hdmi) - 1,
};
/* Voltage Swing Programming for VccIO 0.85V for DP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_85V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
- { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
- { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
- { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
- { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */
- { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
- { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_dp_0_85V[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x5D, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .cnl = { 0xA, 0x6A, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */
+ { .cnl = { 0xB, 0x7A, 0x32, 0x00, 0x0D } }, /* 350 700 6.0 */
+ { .cnl = { 0x6, 0x7C, 0x2D, 0x00, 0x12 } }, /* 350 900 8.2 */
+ { .cnl = { 0xA, 0x69, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .cnl = { 0xB, 0x7A, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */
+ { .cnl = { 0x6, 0x7C, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */
+ { .cnl = { 0xB, 0x7D, 0x3C, 0x00, 0x03 } }, /* 650 725 0.9 */
+ { .cnl = { 0x6, 0x7C, 0x34, 0x00, 0x0B } }, /* 600 900 3.5 */
+ { .cnl = { 0x6, 0x7B, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans cnl_ddi_translations_dp_0_85V = {
+ .entries = _cnl_ddi_translations_dp_0_85V,
+ .num_entries = ARRAY_SIZE(_cnl_ddi_translations_dp_0_85V),
};
/* Voltage Swing Programming for VccIO 0.85V for HDMI */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_85V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
- { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
- { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
- { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 */
- { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
+static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_hdmi_0_85V[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x60, 0x3F, 0x00, 0x00 } }, /* 450 450 0.0 */
+ { .cnl = { 0xB, 0x73, 0x36, 0x00, 0x09 } }, /* 450 650 3.2 */
+ { .cnl = { 0x6, 0x7F, 0x31, 0x00, 0x0E } }, /* 450 850 5.5 */
+ { .cnl = { 0xB, 0x73, 0x3F, 0x00, 0x00 } }, /* 650 650 0.0 */
+ { .cnl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 650 850 2.3 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 850 850 0.0 */
+ { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 850 3.0 */
+};
+
+static const struct intel_ddi_buf_trans cnl_ddi_translations_hdmi_0_85V = {
+ .entries = _cnl_ddi_translations_hdmi_0_85V,
+ .num_entries = ARRAY_SIZE(_cnl_ddi_translations_hdmi_0_85V),
+ .hdmi_default_entry = ARRAY_SIZE(_cnl_ddi_translations_hdmi_0_85V) - 1,
};
/* Voltage Swing Programming for VccIO 0.85V for eDP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_85V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x66, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
- { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
- { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
- { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
- { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
- { 0xA, 0x66, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
- { 0xB, 0x70, 0x3C, 0x00, 0x03 }, /* 460 600 2.3 */
- { 0xC, 0x75, 0x3C, 0x00, 0x03 }, /* 537 700 2.3 */
- { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
+static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_edp_0_85V[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x66, 0x3A, 0x00, 0x05 } }, /* 384 500 2.3 */
+ { .cnl = { 0x0, 0x7F, 0x38, 0x00, 0x07 } }, /* 153 200 2.3 */
+ { .cnl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 192 250 2.3 */
+ { .cnl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } }, /* 230 300 2.3 */
+ { .cnl = { 0x9, 0x7F, 0x38, 0x00, 0x07 } }, /* 269 350 2.3 */
+ { .cnl = { 0xA, 0x66, 0x3C, 0x00, 0x03 } }, /* 446 500 1.0 */
+ { .cnl = { 0xB, 0x70, 0x3C, 0x00, 0x03 } }, /* 460 600 2.3 */
+ { .cnl = { 0xC, 0x75, 0x3C, 0x00, 0x03 } }, /* 537 700 2.3 */
+ { .cnl = { 0x2, 0x7F, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */
+};
+
+static const struct intel_ddi_buf_trans cnl_ddi_translations_edp_0_85V = {
+ .entries = _cnl_ddi_translations_edp_0_85V,
+ .num_entries = ARRAY_SIZE(_cnl_ddi_translations_edp_0_85V),
};
/* Voltage Swing Programming for VccIO 0.95V for DP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_95V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
- { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
- { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
- { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
- { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */
- { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
- { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_dp_0_95V[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x5D, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .cnl = { 0xA, 0x6A, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */
+ { .cnl = { 0xB, 0x7A, 0x32, 0x00, 0x0D } }, /* 350 700 6.0 */
+ { .cnl = { 0x6, 0x7C, 0x2D, 0x00, 0x12 } }, /* 350 900 8.2 */
+ { .cnl = { 0xA, 0x69, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .cnl = { 0xB, 0x7A, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */
+ { .cnl = { 0x6, 0x7C, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */
+ { .cnl = { 0xB, 0x7D, 0x3C, 0x00, 0x03 } }, /* 650 725 0.9 */
+ { .cnl = { 0x6, 0x7C, 0x34, 0x00, 0x0B } }, /* 600 900 3.5 */
+ { .cnl = { 0x6, 0x7B, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans cnl_ddi_translations_dp_0_95V = {
+ .entries = _cnl_ddi_translations_dp_0_95V,
+ .num_entries = ARRAY_SIZE(_cnl_ddi_translations_dp_0_95V),
};
/* Voltage Swing Programming for VccIO 0.95V for HDMI */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_95V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x5C, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
- { 0xB, 0x69, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
- { 0x5, 0x76, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
- { 0xA, 0x5E, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
- { 0xB, 0x69, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
- { 0xB, 0x79, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
- { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */
- { 0x5, 0x76, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */
- { 0x6, 0x7D, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */
- { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
+static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_hdmi_0_95V[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x5C, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */
+ { .cnl = { 0xB, 0x69, 0x37, 0x00, 0x08 } }, /* 400 600 3.5 */
+ { .cnl = { 0x5, 0x76, 0x31, 0x00, 0x0E } }, /* 400 800 6.0 */
+ { .cnl = { 0xA, 0x5E, 0x3F, 0x00, 0x00 } }, /* 450 450 0.0 */
+ { .cnl = { 0xB, 0x69, 0x3F, 0x00, 0x00 } }, /* 600 600 0.0 */
+ { .cnl = { 0xB, 0x79, 0x35, 0x00, 0x0A } }, /* 600 850 3.0 */
+ { .cnl = { 0x6, 0x7D, 0x32, 0x00, 0x0D } }, /* 600 1000 4.4 */
+ { .cnl = { 0x5, 0x76, 0x3F, 0x00, 0x00 } }, /* 800 800 0.0 */
+ { .cnl = { 0x6, 0x7D, 0x39, 0x00, 0x06 } }, /* 800 1000 1.9 */
+ { .cnl = { 0x6, 0x7F, 0x39, 0x00, 0x06 } }, /* 850 1050 1.8 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1050 1050 0.0 */
+};
+
+static const struct intel_ddi_buf_trans cnl_ddi_translations_hdmi_0_95V = {
+ .entries = _cnl_ddi_translations_hdmi_0_95V,
+ .num_entries = ARRAY_SIZE(_cnl_ddi_translations_hdmi_0_95V),
+ .hdmi_default_entry = ARRAY_SIZE(_cnl_ddi_translations_hdmi_0_95V) - 1,
};
/* Voltage Swing Programming for VccIO 0.95V for eDP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_95V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x61, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
- { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
- { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
- { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
- { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
- { 0xA, 0x61, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
- { 0xB, 0x68, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */
- { 0xC, 0x6E, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */
- { 0x4, 0x7F, 0x3A, 0x00, 0x05 }, /* 460 600 2.3 */
- { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
+static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_edp_0_95V[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x61, 0x3A, 0x00, 0x05 } }, /* 384 500 2.3 */
+ { .cnl = { 0x0, 0x7F, 0x38, 0x00, 0x07 } }, /* 153 200 2.3 */
+ { .cnl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 192 250 2.3 */
+ { .cnl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } }, /* 230 300 2.3 */
+ { .cnl = { 0x9, 0x7F, 0x38, 0x00, 0x07 } }, /* 269 350 2.3 */
+ { .cnl = { 0xA, 0x61, 0x3C, 0x00, 0x03 } }, /* 446 500 1.0 */
+ { .cnl = { 0xB, 0x68, 0x39, 0x00, 0x06 } }, /* 460 600 2.3 */
+ { .cnl = { 0xC, 0x6E, 0x39, 0x00, 0x06 } }, /* 537 700 2.3 */
+ { .cnl = { 0x4, 0x7F, 0x3A, 0x00, 0x05 } }, /* 460 600 2.3 */
+ { .cnl = { 0x2, 0x7F, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */
+};
+
+static const struct intel_ddi_buf_trans cnl_ddi_translations_edp_0_95V = {
+ .entries = _cnl_ddi_translations_edp_0_95V,
+ .num_entries = ARRAY_SIZE(_cnl_ddi_translations_edp_0_95V),
};
/* Voltage Swing Programming for VccIO 1.05V for DP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_1_05V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
- { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
- { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
- { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 400 1050 8.4 */
- { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
- { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
- { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 550 1050 5.6 */
- { 0x5, 0x76, 0x3E, 0x00, 0x01 }, /* 850 900 0.5 */
- { 0x6, 0x7F, 0x36, 0x00, 0x09 }, /* 750 1050 2.9 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
+static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_dp_1_05V[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x58, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */
+ { .cnl = { 0xB, 0x64, 0x37, 0x00, 0x08 } }, /* 400 600 3.5 */
+ { .cnl = { 0x5, 0x70, 0x31, 0x00, 0x0E } }, /* 400 800 6.0 */
+ { .cnl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 400 1050 8.4 */
+ { .cnl = { 0xB, 0x64, 0x3F, 0x00, 0x00 } }, /* 600 600 0.0 */
+ { .cnl = { 0x5, 0x73, 0x35, 0x00, 0x0A } }, /* 600 850 3.0 */
+ { .cnl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 550 1050 5.6 */
+ { .cnl = { 0x5, 0x76, 0x3E, 0x00, 0x01 } }, /* 850 900 0.5 */
+ { .cnl = { 0x6, 0x7F, 0x36, 0x00, 0x09 } }, /* 750 1050 2.9 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1050 1050 0.0 */
+};
+
+static const struct intel_ddi_buf_trans cnl_ddi_translations_dp_1_05V = {
+ .entries = _cnl_ddi_translations_dp_1_05V,
+ .num_entries = ARRAY_SIZE(_cnl_ddi_translations_dp_1_05V),
};
/* Voltage Swing Programming for VccIO 1.05V for HDMI */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_1_05V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
- { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
- { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
- { 0xA, 0x5B, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
- { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
- { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
- { 0x6, 0x7C, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */
- { 0x5, 0x70, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */
- { 0x6, 0x7C, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */
- { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
+static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_hdmi_1_05V[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x58, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */
+ { .cnl = { 0xB, 0x64, 0x37, 0x00, 0x08 } }, /* 400 600 3.5 */
+ { .cnl = { 0x5, 0x70, 0x31, 0x00, 0x0E } }, /* 400 800 6.0 */
+ { .cnl = { 0xA, 0x5B, 0x3F, 0x00, 0x00 } }, /* 450 450 0.0 */
+ { .cnl = { 0xB, 0x64, 0x3F, 0x00, 0x00 } }, /* 600 600 0.0 */
+ { .cnl = { 0x5, 0x73, 0x35, 0x00, 0x0A } }, /* 600 850 3.0 */
+ { .cnl = { 0x6, 0x7C, 0x32, 0x00, 0x0D } }, /* 600 1000 4.4 */
+ { .cnl = { 0x5, 0x70, 0x3F, 0x00, 0x00 } }, /* 800 800 0.0 */
+ { .cnl = { 0x6, 0x7C, 0x39, 0x00, 0x06 } }, /* 800 1000 1.9 */
+ { .cnl = { 0x6, 0x7F, 0x39, 0x00, 0x06 } }, /* 850 1050 1.8 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1050 1050 0.0 */
+};
+
+static const struct intel_ddi_buf_trans cnl_ddi_translations_hdmi_1_05V = {
+ .entries = _cnl_ddi_translations_hdmi_1_05V,
+ .num_entries = ARRAY_SIZE(_cnl_ddi_translations_hdmi_1_05V),
+ .hdmi_default_entry = ARRAY_SIZE(_cnl_ddi_translations_hdmi_1_05V) - 1,
};
/* Voltage Swing Programming for VccIO 1.05V for eDP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x5E, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
- { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
- { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
- { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
- { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
- { 0xA, 0x5E, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
- { 0xB, 0x64, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */
- { 0xE, 0x6A, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */
- { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
+static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_edp_1_05V[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x5E, 0x3A, 0x00, 0x05 } }, /* 384 500 2.3 */
+ { .cnl = { 0x0, 0x7F, 0x38, 0x00, 0x07 } }, /* 153 200 2.3 */
+ { .cnl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 192 250 2.3 */
+ { .cnl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } }, /* 230 300 2.3 */
+ { .cnl = { 0x9, 0x7F, 0x38, 0x00, 0x07 } }, /* 269 350 2.3 */
+ { .cnl = { 0xA, 0x5E, 0x3C, 0x00, 0x03 } }, /* 446 500 1.0 */
+ { .cnl = { 0xB, 0x64, 0x39, 0x00, 0x06 } }, /* 460 600 2.3 */
+ { .cnl = { 0xE, 0x6A, 0x39, 0x00, 0x06 } }, /* 537 700 2.3 */
+ { .cnl = { 0x2, 0x7F, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */
+};
+
+static const struct intel_ddi_buf_trans cnl_ddi_translations_edp_1_05V = {
+ .entries = _cnl_ddi_translations_edp_1_05V,
+ .num_entries = ARRAY_SIZE(_cnl_ddi_translations_edp_1_05V),
};
/* icl_combo_phy_ddi_translations */
-static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
- { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
- { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = {
- /* NT mV Trans mV db */
- { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
- { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
- { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
- { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
- { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
- { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
- { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
- { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
- { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
- { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
- { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
- { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
- { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
- { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */
- { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
-};
-
-static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
- { 0xC, 0x64, 0x34, 0x00, 0x0B }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 350 900 8.2 */
- { 0xA, 0x46, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x64, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */
- { 0xC, 0x61, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x38, 0x00, 0x07 }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr[] = {
- /* NT mV Trans mV db */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
- { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
- { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
- { 0xA, 0x35, 0x36, 0x00, 0x09 }, /* 200 350 4.9 */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
- { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
- { 0xA, 0x35, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
- { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
- { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[] = {
- /* NT mV Trans mV db */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 250 1.9 */
- { 0x1, 0x7F, 0x3D, 0x00, 0x02 }, /* 200 300 3.5 */
- { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 200 350 4.9 */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
- { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 300 1.6 */
- { 0xA, 0x35, 0x3A, 0x00, 0x05 }, /* 250 350 2.9 */
- { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
- { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
- { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
- { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
- { 0xC, 0x60, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
- { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
- { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
- { 0xC, 0x58, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr[] = {
- /* Voltage swing pre-emphasis */
- { 0x18, 0x00, 0x00 }, /* 0 0 */
- { 0x1D, 0x00, 0x05 }, /* 0 1 */
- { 0x24, 0x00, 0x0C }, /* 0 2 */
- { 0x2B, 0x00, 0x14 }, /* 0 3 */
- { 0x21, 0x00, 0x00 }, /* 1 0 */
- { 0x2B, 0x00, 0x08 }, /* 1 1 */
- { 0x30, 0x00, 0x0F }, /* 1 2 */
- { 0x31, 0x00, 0x03 }, /* 2 0 */
- { 0x34, 0x00, 0x0B }, /* 2 1 */
- { 0x3F, 0x00, 0x00 }, /* 3 0 */
-};
-
-static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3[] = {
- /* Voltage swing pre-emphasis */
- { 0x18, 0x00, 0x00 }, /* 0 0 */
- { 0x1D, 0x00, 0x05 }, /* 0 1 */
- { 0x24, 0x00, 0x0C }, /* 0 2 */
- { 0x2B, 0x00, 0x14 }, /* 0 3 */
- { 0x26, 0x00, 0x00 }, /* 1 0 */
- { 0x2C, 0x00, 0x07 }, /* 1 1 */
- { 0x33, 0x00, 0x0C }, /* 1 2 */
- { 0x2E, 0x00, 0x00 }, /* 2 0 */
- { 0x36, 0x00, 0x09 }, /* 2 1 */
- { 0x3F, 0x00, 0x00 }, /* 3 0 */
-};
-
-static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi[] = {
- /* HDMI Preset VS Pre-emph */
- { 0x1A, 0x0, 0x0 }, /* 1 400mV 0dB */
- { 0x20, 0x0, 0x0 }, /* 2 500mV 0dB */
- { 0x29, 0x0, 0x0 }, /* 3 650mV 0dB */
- { 0x32, 0x0, 0x0 }, /* 4 800mV 0dB */
- { 0x3F, 0x0, 0x0 }, /* 5 1000mV 0dB */
- { 0x3A, 0x0, 0x5 }, /* 6 Full -1.5 dB */
- { 0x39, 0x0, 0x6 }, /* 7 Full -1.8 dB */
- { 0x38, 0x0, 0x7 }, /* 8 Full -2 dB */
- { 0x37, 0x0, 0x8 }, /* 9 Full -2.5 dB */
- { 0x36, 0x0, 0x9 }, /* 10 Full -3 dB */
-};
-
-static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = {
- /* VS pre-emp Non-trans mV Pre-emph dB */
- { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
- { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
- { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
- { 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */
- { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
- { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
- { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
- { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
- { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
- { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
-};
-
-static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans_hbr2[] = {
- /* VS pre-emp Non-trans mV Pre-emph dB */
- { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
- { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
- { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
- { 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */
- { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
- { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
- { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
- { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
- { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
- { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
-};
-
-static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = {
- /* HDMI Preset VS Pre-emph */
- { 0x7, 0x0, 0x0 }, /* 1 400mV 0dB */
- { 0x6, 0x0, 0x0 }, /* 2 500mV 0dB */
- { 0x4, 0x0, 0x0 }, /* 3 650mV 0dB */
- { 0x2, 0x0, 0x0 }, /* 4 800mV 0dB */
- { 0x0, 0x0, 0x0 }, /* 5 1000mV 0dB */
- { 0x0, 0x0, 0x5 }, /* 6 Full -1.5 dB */
- { 0x0, 0x0, 0x6 }, /* 7 Full -1.8 dB */
- { 0x0, 0x0, 0x7 }, /* 8 Full -2 dB */
- { 0x0, 0x0, 0x8 }, /* 9 Full -2.5 dB */
- { 0x0, 0x0, 0xA }, /* 10 Full -3 dB */
-};
-
-static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
- { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7D, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
- { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
- { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
- { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x63, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x61, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
- { 0x6, 0x7B, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
- { 0xC, 0x60, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
- { 0xC, 0x7F, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
- { 0xC, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x6F, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */
- { 0x6, 0x60, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .cnl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
+ { .cnl = { 0xC, 0x71, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */
+ { .cnl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */
+ { .cnl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .cnl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
+ { .cnl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */
+ { .cnl = { 0xC, 0x6C, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */
+ { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3 = {
+ .entries = _icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3),
+};
+
+static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0x0, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
+ { .cnl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 200 250 1.9 */
+ { .cnl = { 0x1, 0x7F, 0x33, 0x00, 0x0C } }, /* 200 300 3.5 */
+ { .cnl = { 0x9, 0x7F, 0x31, 0x00, 0x0E } }, /* 200 350 4.9 */
+ { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */
+ { .cnl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } }, /* 250 300 1.6 */
+ { .cnl = { 0x9, 0x7F, 0x35, 0x00, 0x0A } }, /* 250 350 2.9 */
+ { .cnl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */
+ { .cnl = { 0x9, 0x7F, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */
+ { .cnl = { 0x9, 0x7F, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+};
+
+static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2 = {
+ .entries = _icl_combo_phy_ddi_translations_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_edp_hbr2),
+};
+
+static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_hdmi[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x60, 0x3F, 0x00, 0x00 } }, /* 450 450 0.0 */
+ { .cnl = { 0xB, 0x73, 0x36, 0x00, 0x09 } }, /* 450 650 3.2 */
+ { .cnl = { 0x6, 0x7F, 0x31, 0x00, 0x0E } }, /* 450 850 5.5 */
+ { .cnl = { 0xB, 0x73, 0x3F, 0x00, 0x00 } }, /* 650 650 0.0 ALS */
+ { .cnl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 650 850 2.3 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 850 850 0.0 */
+ { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 850 3.0 */
+};
+
+static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi = {
+ .entries = _icl_combo_phy_ddi_translations_hdmi,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_icl_combo_phy_ddi_translations_hdmi) - 1,
+};
+
+static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_dp[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .cnl = { 0xA, 0x47, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */
+ { .cnl = { 0xC, 0x64, 0x34, 0x00, 0x0B } }, /* 350 700 6.0 */
+ { .cnl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 350 900 8.2 */
+ { .cnl = { 0xA, 0x46, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .cnl = { 0xC, 0x64, 0x38, 0x00, 0x07 } }, /* 500 700 2.9 */
+ { .cnl = { 0x6, 0x7F, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */
+ { .cnl = { 0xC, 0x61, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
+ { .cnl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } }, /* 600 900 3.5 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans ehl_combo_phy_ddi_translations_dp = {
+ .entries = _ehl_combo_phy_ddi_translations_dp,
+ .num_entries = ARRAY_SIZE(_ehl_combo_phy_ddi_translations_dp),
+};
+
+static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
+ { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 250 1.9 */
+ { .cnl = { 0x1, 0x7F, 0x3D, 0x00, 0x02 } }, /* 200 300 3.5 */
+ { .cnl = { 0xA, 0x35, 0x39, 0x00, 0x06 } }, /* 200 350 4.9 */
+ { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */
+ { .cnl = { 0x1, 0x7F, 0x3C, 0x00, 0x03 } }, /* 250 300 1.6 */
+ { .cnl = { 0xA, 0x35, 0x39, 0x00, 0x06 } }, /* 250 350 2.9 */
+ { .cnl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */
+ { .cnl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */
+ { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+};
+
+static const struct intel_ddi_buf_trans ehl_combo_phy_ddi_translations_edp_hbr2 = {
+ .entries = _ehl_combo_phy_ddi_translations_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_ehl_combo_phy_ddi_translations_edp_hbr2),
+};
+
+static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp_hbr[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
+ { .cnl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 200 250 1.9 */
+ { .cnl = { 0x1, 0x7F, 0x33, 0x00, 0x0C } }, /* 200 300 3.5 */
+ { .cnl = { 0xA, 0x35, 0x36, 0x00, 0x09 } }, /* 200 350 4.9 */
+ { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */
+ { .cnl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } }, /* 250 300 1.6 */
+ { .cnl = { 0xA, 0x35, 0x35, 0x00, 0x0A } }, /* 250 350 2.9 */
+ { .cnl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */
+ { .cnl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */
+ { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+};
+
+static const struct intel_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr = {
+ .entries = _jsl_combo_phy_ddi_translations_edp_hbr,
+ .num_entries = ARRAY_SIZE(_jsl_combo_phy_ddi_translations_edp_hbr),
+};
+
+static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
+ { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 250 1.9 */
+ { .cnl = { 0x1, 0x7F, 0x3D, 0x00, 0x02 } }, /* 200 300 3.5 */
+ { .cnl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 200 350 4.9 */
+ { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */
+ { .cnl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 300 1.6 */
+ { .cnl = { 0xA, 0x35, 0x3A, 0x00, 0x05 } }, /* 250 350 2.9 */
+ { .cnl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */
+ { .cnl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */
+ { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+};
+
+static const struct intel_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2 = {
+ .entries = _jsl_combo_phy_ddi_translations_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_jsl_combo_phy_ddi_translations_edp_hbr2),
+};
+
+static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .cnl = { 0xA, 0x48, 0x35, 0x00, 0x0A } }, /* 350 500 3.1 */
+ { .cnl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */
+ { .cnl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 350 900 8.2 */
+ { .cnl = { 0xA, 0x43, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .cnl = { 0xC, 0x60, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */
+ { .cnl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */
+ { .cnl = { 0xC, 0x60, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
+ { .cnl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 600 900 3.5 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr = {
+ .entries = _dg1_combo_phy_ddi_translations_dp_rbr_hbr,
+ .num_entries = ARRAY_SIZE(_dg1_combo_phy_ddi_translations_dp_rbr_hbr),
+};
+
+static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .cnl = { 0xA, 0x48, 0x35, 0x00, 0x0A } }, /* 350 500 3.1 */
+ { .cnl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */
+ { .cnl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 350 900 8.2 */
+ { .cnl = { 0xA, 0x43, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .cnl = { 0xC, 0x60, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */
+ { .cnl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */
+ { .cnl = { 0xC, 0x58, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
+ { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3 = {
+ .entries = _dg1_combo_phy_ddi_translations_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_dg1_combo_phy_ddi_translations_dp_hbr2_hbr3),
+};
+
+static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_rbr_hbr[] = {
+ /* Voltage swing pre-emphasis */
+ { .mg = { 0x18, 0x00, 0x00 } }, /* 0 0 */
+ { .mg = { 0x1D, 0x00, 0x05 } }, /* 0 1 */
+ { .mg = { 0x24, 0x00, 0x0C } }, /* 0 2 */
+ { .mg = { 0x2B, 0x00, 0x14 } }, /* 0 3 */
+ { .mg = { 0x21, 0x00, 0x00 } }, /* 1 0 */
+ { .mg = { 0x2B, 0x00, 0x08 } }, /* 1 1 */
+ { .mg = { 0x30, 0x00, 0x0F } }, /* 1 2 */
+ { .mg = { 0x31, 0x00, 0x03 } }, /* 2 0 */
+ { .mg = { 0x34, 0x00, 0x0B } }, /* 2 1 */
+ { .mg = { 0x3F, 0x00, 0x00 } }, /* 3 0 */
+};
+
+static const struct intel_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr = {
+ .entries = _icl_mg_phy_ddi_translations_rbr_hbr,
+ .num_entries = ARRAY_SIZE(_icl_mg_phy_ddi_translations_rbr_hbr),
+};
+
+static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hbr2_hbr3[] = {
+ /* Voltage swing pre-emphasis */
+ { .mg = { 0x18, 0x00, 0x00 } }, /* 0 0 */
+ { .mg = { 0x1D, 0x00, 0x05 } }, /* 0 1 */
+ { .mg = { 0x24, 0x00, 0x0C } }, /* 0 2 */
+ { .mg = { 0x2B, 0x00, 0x14 } }, /* 0 3 */
+ { .mg = { 0x26, 0x00, 0x00 } }, /* 1 0 */
+ { .mg = { 0x2C, 0x00, 0x07 } }, /* 1 1 */
+ { .mg = { 0x33, 0x00, 0x0C } }, /* 1 2 */
+ { .mg = { 0x2E, 0x00, 0x00 } }, /* 2 0 */
+ { .mg = { 0x36, 0x00, 0x09 } }, /* 2 1 */
+ { .mg = { 0x3F, 0x00, 0x00 } }, /* 3 0 */
+};
+
+static const struct intel_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3 = {
+ .entries = _icl_mg_phy_ddi_translations_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_icl_mg_phy_ddi_translations_hbr2_hbr3),
+};
+
+static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hdmi[] = {
+ /* HDMI Preset VS Pre-emph */
+ { .mg = { 0x1A, 0x0, 0x0 } }, /* 1 400mV 0dB */
+ { .mg = { 0x20, 0x0, 0x0 } }, /* 2 500mV 0dB */
+ { .mg = { 0x29, 0x0, 0x0 } }, /* 3 650mV 0dB */
+ { .mg = { 0x32, 0x0, 0x0 } }, /* 4 800mV 0dB */
+ { .mg = { 0x3F, 0x0, 0x0 } }, /* 5 1000mV 0dB */
+ { .mg = { 0x3A, 0x0, 0x5 } }, /* 6 Full -1.5 dB */
+ { .mg = { 0x39, 0x0, 0x6 } }, /* 7 Full -1.8 dB */
+ { .mg = { 0x38, 0x0, 0x7 } }, /* 8 Full -2 dB */
+ { .mg = { 0x37, 0x0, 0x8 } }, /* 9 Full -2.5 dB */
+ { .mg = { 0x36, 0x0, 0x9 } }, /* 10 Full -3 dB */
+};
+
+static const struct intel_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi = {
+ .entries = _icl_mg_phy_ddi_translations_hdmi,
+ .num_entries = ARRAY_SIZE(_icl_mg_phy_ddi_translations_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_icl_mg_phy_ddi_translations_hdmi) - 1,
+};
+
+static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hbr[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */
+ { .dkl = { 0x5, 0x0, 0x05 } }, /* 0 1 400mV 3.5 dB */
+ { .dkl = { 0x2, 0x0, 0x0B } }, /* 0 2 400mV 6 dB */
+ { .dkl = { 0x0, 0x0, 0x18 } }, /* 0 3 400mV 9.5 dB */
+ { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */
+ { .dkl = { 0x2, 0x0, 0x08 } }, /* 1 1 600mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */
+ { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */
+ { .dkl = { 0x0, 0x0, 0x0B } }, /* 2 1 800mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB HDMI default */
+};
+
+static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_dp_hbr = {
+ .entries = _tgl_dkl_phy_ddi_translations_dp_hbr,
+ .num_entries = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_dp_hbr),
+};
+
+static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hbr2[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */
+ { .dkl = { 0x5, 0x0, 0x05 } }, /* 0 1 400mV 3.5 dB */
+ { .dkl = { 0x2, 0x0, 0x0B } }, /* 0 2 400mV 6 dB */
+ { .dkl = { 0x0, 0x0, 0x19 } }, /* 0 3 400mV 9.5 dB */
+ { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */
+ { .dkl = { 0x2, 0x0, 0x08 } }, /* 1 1 600mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */
+ { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */
+ { .dkl = { 0x0, 0x0, 0x0B } }, /* 2 1 800mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB HDMI default */
+};
+
+static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_dp_hbr2 = {
+ .entries = _tgl_dkl_phy_ddi_translations_dp_hbr2,
+ .num_entries = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_dp_hbr2),
+};
+
+static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_hdmi[] = {
+ /* HDMI Preset VS Pre-emph */
+ { .dkl = { 0x7, 0x0, 0x0 } }, /* 1 400mV 0dB */
+ { .dkl = { 0x6, 0x0, 0x0 } }, /* 2 500mV 0dB */
+ { .dkl = { 0x4, 0x0, 0x0 } }, /* 3 650mV 0dB */
+ { .dkl = { 0x2, 0x0, 0x0 } }, /* 4 800mV 0dB */
+ { .dkl = { 0x0, 0x0, 0x0 } }, /* 5 1000mV 0dB */
+ { .dkl = { 0x0, 0x0, 0x5 } }, /* 6 Full -1.5 dB */
+ { .dkl = { 0x0, 0x0, 0x6 } }, /* 7 Full -1.8 dB */
+ { .dkl = { 0x0, 0x0, 0x7 } }, /* 8 Full -2 dB */
+ { .dkl = { 0x0, 0x0, 0x8 } }, /* 9 Full -2.5 dB */
+ { .dkl = { 0x0, 0x0, 0xA } }, /* 10 Full -3 dB */
+};
+
+static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_hdmi = {
+ .entries = _tgl_dkl_phy_ddi_translations_hdmi,
+ .num_entries = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_hdmi) - 1,
+};
+
+static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .cnl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
+ { .cnl = { 0xC, 0x71, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */
+ { .cnl = { 0x6, 0x7D, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */
+ { .cnl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .cnl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
+ { .cnl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */
+ { .cnl = { 0xC, 0x6C, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */
+ { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr = {
+ .entries = _tgl_combo_phy_ddi_translations_dp_hbr,
+ .num_entries = ARRAY_SIZE(_tgl_combo_phy_ddi_translations_dp_hbr),
+};
+
+static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .cnl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
+ { .cnl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */
+ { .cnl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */
+ { .cnl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .cnl = { 0xC, 0x63, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
+ { .cnl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */
+ { .cnl = { 0xC, 0x61, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */
+ { .cnl = { 0x6, 0x7B, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2 = {
+ .entries = _tgl_combo_phy_ddi_translations_dp_hbr2,
+ .num_entries = ARRAY_SIZE(_tgl_combo_phy_ddi_translations_dp_hbr2),
+};
+
+static const union intel_ddi_buf_trans_entry _tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .cnl = { 0xA, 0x4F, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */
+ { .cnl = { 0xC, 0x60, 0x32, 0x00, 0x0D } }, /* 350 700 6.0 */
+ { .cnl = { 0xC, 0x7F, 0x2D, 0x00, 0x12 } }, /* 350 900 8.2 */
+ { .cnl = { 0xC, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .cnl = { 0xC, 0x6F, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */
+ { .cnl = { 0x6, 0x7D, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */
+ { .cnl = { 0x6, 0x60, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */
+ { .cnl = { 0x6, 0x7F, 0x34, 0x00, 0x0B } }, /* 600 900 3.5 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2 = {
+ .entries = _tgl_uy_combo_phy_ddi_translations_dp_hbr2,
+ .num_entries = ARRAY_SIZE(_tgl_uy_combo_phy_ddi_translations_dp_hbr2),
};
/*
* Cloned the HOBL entry to comply with the voltage and pre-emphasis entries
* that DisplayPort specification requires
*/
-static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = {
- /* VS pre-emp */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 0 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 1 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 2 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 3 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 0 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 1 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 2 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 0 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */
-};
-
-static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x2F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
- { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7D, 0x2A, 0x00, 0x15 }, /* 350 900 8.2 */
- { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x6E, 0x3E, 0x00, 0x01 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x50, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
- { 0xC, 0x61, 0x33, 0x00, 0x0C }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2E, 0x00, 0x11 }, /* 350 900 8.2 */
- { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x5F, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x5F, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
- { 0x6, 0x7E, 0x36, 0x00, 0x09 }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct tgl_dkl_phy_ddi_buf_trans adlp_dkl_phy_dp_ddi_trans_hbr[] = {
- /* VS pre-emp Non-trans mV Pre-emph dB */
- { 0x7, 0x0, 0x01 }, /* 0 0 400mV 0 dB */
- { 0x5, 0x0, 0x06 }, /* 0 1 400mV 3.5 dB */
- { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
- { 0x0, 0x0, 0x17 }, /* 0 3 400mV 9.5 dB */
- { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
- { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
- { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
- { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
- { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
- { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB */
-};
-
-static const struct tgl_dkl_phy_ddi_buf_trans adlp_dkl_phy_dp_ddi_trans_hbr2_hbr3[] = {
- /* VS pre-emp Non-trans mV Pre-emph dB */
- { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
- { 0x5, 0x0, 0x04 }, /* 0 1 400mV 3.5 dB */
- { 0x2, 0x0, 0x0A }, /* 0 2 400mV 6 dB */
- { 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */
- { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
- { 0x2, 0x0, 0x06 }, /* 1 1 600mV 3.5 dB */
- { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
- { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
- { 0x0, 0x0, 0x09 }, /* 2 1 800mV 3.5 dB */
- { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB */
-};
-
-bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table)
-{
- return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
-}
+static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = {
+ /* VS pre-emp */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 0 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 1 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 2 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 3 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1 0 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1 1 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1 2 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 2 0 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 2 1 */
+};
-static const struct ddi_buf_trans *
-bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl = {
+ .entries = _tgl_combo_phy_ddi_translations_edp_hbr2_hobl,
+ .num_entries = ARRAY_SIZE(_tgl_combo_phy_ddi_translations_edp_hbr2_hobl),
+};
- if (dev_priv->vbt.edp.low_vswing) {
- *n_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
- return bdw_ddi_translations_edp;
- } else {
- *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
- return bdw_ddi_translations_dp;
- }
-}
+static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x2F, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .cnl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
+ { .cnl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */
+ { .cnl = { 0x6, 0x7D, 0x2A, 0x00, 0x15 } }, /* 350 900 8.2 */
+ { .cnl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .cnl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
+ { .cnl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */
+ { .cnl = { 0xC, 0x6E, 0x3E, 0x00, 0x01 } }, /* 650 700 0.6 */
+ { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
-static const struct ddi_buf_trans *
-skl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+static const struct intel_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr = {
+ .entries = _rkl_combo_phy_ddi_translations_dp_hbr,
+ .num_entries = ARRAY_SIZE(_rkl_combo_phy_ddi_translations_dp_hbr),
+};
- if (IS_SKL_ULX(dev_priv)) {
- *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
- return skl_y_ddi_translations_dp;
- } else if (IS_SKL_ULT(dev_priv)) {
- *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
- return skl_u_ddi_translations_dp;
- } else {
- *n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
- return skl_ddi_translations_dp;
- }
-}
+static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .cnl = { 0xA, 0x50, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */
+ { .cnl = { 0xC, 0x61, 0x33, 0x00, 0x0C } }, /* 350 700 6.0 */
+ { .cnl = { 0x6, 0x7F, 0x2E, 0x00, 0x11 } }, /* 350 900 8.2 */
+ { .cnl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .cnl = { 0xC, 0x5F, 0x38, 0x00, 0x07 } }, /* 500 700 2.9 */
+ { .cnl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */
+ { .cnl = { 0xC, 0x5F, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
+ { .cnl = { 0x6, 0x7E, 0x36, 0x00, 0x09 } }, /* 600 900 3.5 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
-static const struct ddi_buf_trans *
-kbl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+static const struct intel_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3 = {
+ .entries = _rkl_combo_phy_ddi_translations_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_rkl_combo_phy_ddi_translations_dp_hbr2_hbr3),
+};
- if (IS_KBL_ULX(dev_priv) ||
- IS_CFL_ULX(dev_priv) ||
- IS_CML_ULX(dev_priv)) {
- *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
- return kbl_y_ddi_translations_dp;
- } else if (IS_KBL_ULT(dev_priv) ||
- IS_CFL_ULT(dev_priv) ||
- IS_CML_ULT(dev_priv)) {
- *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
- return kbl_u_ddi_translations_dp;
- } else {
- *n_entries = ARRAY_SIZE(kbl_ddi_translations_dp);
- return kbl_ddi_translations_dp;
- }
-}
+static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .cnl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
+ { .cnl = { 0xC, 0x63, 0x30, 0x00, 0x0F } }, /* 350 700 6.0 */
+ { .cnl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */
+ { .cnl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .cnl = { 0xC, 0x63, 0x37, 0x00, 0x08 } }, /* 500 700 2.9 */
+ { .cnl = { 0x6, 0x7F, 0x31, 0x00, 0x0E } }, /* 500 900 5.1 */
+ { .cnl = { 0xC, 0x61, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */
+ { .cnl = { 0x6, 0x7B, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_dp_hbr2_hbr3 = {
+ .entries = _adls_combo_phy_ddi_translations_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_dp_hbr2_hbr3),
+};
+
+static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0x9, 0x70, 0x3C, 0x00, 0x03 } }, /* 200 200 0.0 */
+ { .cnl = { 0x9, 0x6D, 0x3A, 0x00, 0x05 } }, /* 200 250 1.9 */
+ { .cnl = { 0x9, 0x7F, 0x36, 0x00, 0x09 } }, /* 200 300 3.5 */
+ { .cnl = { 0x4, 0x59, 0x32, 0x00, 0x0D } }, /* 200 350 4.9 */
+ { .cnl = { 0x2, 0x77, 0x3A, 0x00, 0x05 } }, /* 250 250 0.0 */
+ { .cnl = { 0x2, 0x7F, 0x38, 0x00, 0x07 } }, /* 250 300 1.6 */
+ { .cnl = { 0x4, 0x5A, 0x36, 0x00, 0x09 } }, /* 250 350 2.9 */
+ { .cnl = { 0x4, 0x5E, 0x3D, 0x00, 0x04 } }, /* 300 300 0.0 */
+ { .cnl = { 0x4, 0x65, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */
+ { .cnl = { 0x4, 0x6F, 0x3A, 0x00, 0x05 } }, /* 350 350 0.0 */
+};
+
+static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_edp_hbr2 = {
+ .entries = _adls_combo_phy_ddi_translations_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_edp_hbr2),
+};
+
+static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_edp_hbr3[] = {
+ /* NT mV Trans mV db */
+ { .cnl = { 0xA, 0x5E, 0x34, 0x00, 0x0B } }, /* 350 350 0.0 */
+ { .cnl = { 0xA, 0x69, 0x32, 0x00, 0x0D } }, /* 350 500 3.1 */
+ { .cnl = { 0xC, 0x74, 0x31, 0x00, 0x0E } }, /* 350 700 6.0 */
+ { .cnl = { 0x6, 0x7F, 0x2E, 0x00, 0x11 } }, /* 350 900 8.2 */
+ { .cnl = { 0xA, 0x5C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .cnl = { 0xC, 0x7F, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
+ { .cnl = { 0x6, 0x7F, 0x33, 0x00, 0x0C } }, /* 500 900 5.1 */
+ { .cnl = { 0xC, 0x7F, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
+ { .cnl = { 0x6, 0x7F, 0x3C, 0x00, 0x03 } }, /* 600 900 3.5 */
+ { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_edp_hbr3 = {
+ .entries = _adls_combo_phy_ddi_translations_edp_hbr3,
+ .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_edp_hbr3),
+};
-static const struct ddi_buf_trans *
-skl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
+static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_hbr[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { .dkl = { 0x7, 0x0, 0x01 } }, /* 0 0 400mV 0 dB */
+ { .dkl = { 0x5, 0x0, 0x06 } }, /* 0 1 400mV 3.5 dB */
+ { .dkl = { 0x2, 0x0, 0x0B } }, /* 0 2 400mV 6 dB */
+ { .dkl = { 0x0, 0x0, 0x17 } }, /* 0 3 400mV 9.5 dB */
+ { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */
+ { .dkl = { 0x2, 0x0, 0x08 } }, /* 1 1 600mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */
+ { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */
+ { .dkl = { 0x0, 0x0, 0x0B } }, /* 2 1 800mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB */
+};
+
+static const struct intel_ddi_buf_trans adlp_dkl_phy_ddi_translations_dp_hbr = {
+ .entries = _adlp_dkl_phy_ddi_translations_dp_hbr,
+ .num_entries = ARRAY_SIZE(_adlp_dkl_phy_ddi_translations_dp_hbr),
+};
+
+static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */
+ { .dkl = { 0x5, 0x0, 0x04 } }, /* 0 1 400mV 3.5 dB */
+ { .dkl = { 0x2, 0x0, 0x0A } }, /* 0 2 400mV 6 dB */
+ { .dkl = { 0x0, 0x0, 0x18 } }, /* 0 3 400mV 9.5 dB */
+ { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */
+ { .dkl = { 0x2, 0x0, 0x06 } }, /* 1 1 600mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */
+ { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */
+ { .dkl = { 0x0, 0x0, 0x09 } }, /* 2 1 800mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB */
+};
+
+static const struct intel_ddi_buf_trans adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3 = {
+ .entries = _adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3),
+};
+
+bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ return table == &tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
+}
- if (dev_priv->vbt.edp.low_vswing) {
- if (IS_SKL_ULX(dev_priv) ||
- IS_KBL_ULX(dev_priv) ||
- IS_CFL_ULX(dev_priv) ||
- IS_CML_ULX(dev_priv)) {
- *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
- return skl_y_ddi_translations_edp;
- } else if (IS_SKL_ULT(dev_priv) ||
- IS_KBL_ULT(dev_priv) ||
- IS_CFL_ULT(dev_priv) ||
- IS_CML_ULT(dev_priv)) {
- *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
- return skl_u_ddi_translations_edp;
- } else {
- *n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
- return skl_ddi_translations_edp;
- }
- }
+static const struct intel_ddi_buf_trans *
+intel_get_buf_trans(const struct intel_ddi_buf_trans *ddi_translations, int *num_entries)
+{
+ *num_entries = ddi_translations->num_entries;
+ return ddi_translations;
+}
- if (IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv))
- return kbl_get_buf_trans_dp(encoder, n_entries);
+static const struct intel_ddi_buf_trans *
+hsw_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ return intel_get_buf_trans(&hsw_ddi_translations_fdi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&hsw_ddi_translations_hdmi, n_entries);
else
- return skl_get_buf_trans_dp(encoder, n_entries);
+ return intel_get_buf_trans(&hsw_ddi_translations_dp, n_entries);
}
-static const struct ddi_buf_trans *
-skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+static const struct intel_ddi_buf_trans *
+bdw_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
{
- if (IS_SKL_ULX(dev_priv) ||
- IS_KBL_ULX(dev_priv) ||
- IS_CFL_ULX(dev_priv) ||
- IS_CML_ULX(dev_priv)) {
- *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
- return skl_y_ddi_translations_hdmi;
- } else {
- *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
- return skl_ddi_translations_hdmi;
- }
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ return intel_get_buf_trans(&bdw_ddi_translations_fdi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&bdw_ddi_translations_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ i915->vbt.edp.low_vswing)
+ return intel_get_buf_trans(&bdw_ddi_translations_edp, n_entries);
+ else
+ return intel_get_buf_trans(&bdw_ddi_translations_dp, n_entries);
}
static int skl_buf_trans_num_entries(enum port port, int n_entries)
@@ -876,146 +1147,143 @@ static int skl_buf_trans_num_entries(enum port port, int n_entries)
return min(n_entries, 9);
}
-const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
+static const struct intel_ddi_buf_trans *
+_skl_get_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_ddi_buf_trans *ddi_translations,
+ int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv)) {
- const struct ddi_buf_trans *ddi_translations =
- kbl_get_buf_trans_dp(encoder, n_entries);
- *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
- return ddi_translations;
- } else if (IS_SKYLAKE(dev_priv)) {
- const struct ddi_buf_trans *ddi_translations =
- skl_get_buf_trans_dp(encoder, n_entries);
- *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
- return ddi_translations;
- } else if (IS_BROADWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
- return bdw_ddi_translations_dp;
- } else if (IS_HASWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
- return hsw_ddi_translations_dp;
- }
-
- *n_entries = 0;
- return NULL;
+ ddi_translations = intel_get_buf_trans(ddi_translations, n_entries);
+ *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
+ return ddi_translations;
}
-const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
+static const struct intel_ddi_buf_trans *
+skl_y_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
- const struct ddi_buf_trans *ddi_translations =
- skl_get_buf_trans_edp(encoder, n_entries);
- *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
- return ddi_translations;
- } else if (IS_BROADWELL(dev_priv)) {
- return bdw_get_buf_trans_edp(encoder, n_entries);
- } else if (IS_HASWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
- return hsw_ddi_translations_dp;
- }
-
- *n_entries = 0;
- return NULL;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&skl_y_ddi_translations_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ i915->vbt.edp.low_vswing)
+ return _skl_get_buf_trans_dp(encoder, &skl_y_ddi_translations_edp, n_entries);
+ else
+ return _skl_get_buf_trans_dp(encoder, &skl_y_ddi_translations_dp, n_entries);
}
-const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
- int *n_entries)
+static const struct intel_ddi_buf_trans *
+skl_u_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
{
- if (IS_BROADWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
- return bdw_ddi_translations_fdi;
- } else if (IS_HASWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
- return hsw_ddi_translations_fdi;
- }
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- *n_entries = 0;
- return NULL;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ i915->vbt.edp.low_vswing)
+ return _skl_get_buf_trans_dp(encoder, &skl_u_ddi_translations_edp, n_entries);
+ else
+ return _skl_get_buf_trans_dp(encoder, &skl_u_ddi_translations_dp, n_entries);
}
-const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder,
- int *n_entries)
+static const struct intel_ddi_buf_trans *
+skl_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
- return skl_get_buf_trans_hdmi(dev_priv, n_entries);
- } else if (IS_BROADWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- return bdw_ddi_translations_hdmi;
- } else if (IS_HASWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
- return hsw_ddi_translations_hdmi;
- }
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- *n_entries = 0;
- return NULL;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ i915->vbt.edp.low_vswing)
+ return _skl_get_buf_trans_dp(encoder, &skl_ddi_translations_edp, n_entries);
+ else
+ return _skl_get_buf_trans_dp(encoder, &skl_ddi_translations_dp, n_entries);
}
-static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
+static const struct intel_ddi_buf_trans *
+kbl_y_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
{
- *n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
- return bxt_ddi_translations_dp;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&skl_y_ddi_translations_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ i915->vbt.edp.low_vswing)
+ return _skl_get_buf_trans_dp(encoder, &skl_y_ddi_translations_edp, n_entries);
+ else
+ return _skl_get_buf_trans_dp(encoder, &kbl_y_ddi_translations_dp, n_entries);
}
-static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
+static const struct intel_ddi_buf_trans *
+kbl_u_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (dev_priv->vbt.edp.low_vswing) {
- *n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
- return bxt_ddi_translations_edp;
- }
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- return bxt_get_buf_trans_dp(encoder, n_entries);
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ i915->vbt.edp.low_vswing)
+ return _skl_get_buf_trans_dp(encoder, &skl_u_ddi_translations_edp, n_entries);
+ else
+ return _skl_get_buf_trans_dp(encoder, &kbl_u_ddi_translations_dp, n_entries);
}
-static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
+static const struct intel_ddi_buf_trans *
+kbl_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
{
- *n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
- return bxt_ddi_translations_hdmi;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ i915->vbt.edp.low_vswing)
+ return _skl_get_buf_trans_dp(encoder, &skl_ddi_translations_edp, n_entries);
+ else
+ return _skl_get_buf_trans_dp(encoder, &kbl_ddi_translations_dp, n_entries);
}
-const struct bxt_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
bxt_get_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return bxt_get_buf_trans_hdmi(encoder, n_entries);
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- return bxt_get_buf_trans_edp(encoder, n_entries);
- return bxt_get_buf_trans_dp(encoder, n_entries);
+ return intel_get_buf_trans(&bxt_ddi_translations_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ i915->vbt.edp.low_vswing)
+ return intel_get_buf_trans(&bxt_ddi_translations_edp, n_entries);
+ else
+ return intel_get_buf_trans(&bxt_ddi_translations_dp, n_entries);
}
-static const struct cnl_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (voltage == VOLTAGE_INFO_0_85V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
- return cnl_ddi_translations_hdmi_0_85V;
+ return intel_get_buf_trans(&cnl_ddi_translations_hdmi_0_85V,
+ n_entries);
} else if (voltage == VOLTAGE_INFO_0_95V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V);
- return cnl_ddi_translations_hdmi_0_95V;
+ return intel_get_buf_trans(&cnl_ddi_translations_hdmi_0_95V,
+ n_entries);
} else if (voltage == VOLTAGE_INFO_1_05V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V);
- return cnl_ddi_translations_hdmi_1_05V;
+ return intel_get_buf_trans(&cnl_ddi_translations_hdmi_1_05V,
+ n_entries);
} else {
*n_entries = 1; /* shut up gcc */
MISSING_CASE(voltage);
@@ -1023,21 +1291,21 @@ cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
return NULL;
}
-static const struct cnl_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (voltage == VOLTAGE_INFO_0_85V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
- return cnl_ddi_translations_dp_0_85V;
+ return intel_get_buf_trans(&cnl_ddi_translations_dp_0_85V,
+ n_entries);
} else if (voltage == VOLTAGE_INFO_0_95V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V);
- return cnl_ddi_translations_dp_0_95V;
+ return intel_get_buf_trans(&cnl_ddi_translations_dp_0_95V,
+ n_entries);
} else if (voltage == VOLTAGE_INFO_1_05V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V);
- return cnl_ddi_translations_dp_1_05V;
+ return intel_get_buf_trans(&cnl_ddi_translations_dp_1_05V,
+ n_entries);
} else {
*n_entries = 1; /* shut up gcc */
MISSING_CASE(voltage);
@@ -1045,7 +1313,7 @@ cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
return NULL;
}
-static const struct cnl_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -1053,14 +1321,14 @@ cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
if (dev_priv->vbt.edp.low_vswing) {
if (voltage == VOLTAGE_INFO_0_85V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
- return cnl_ddi_translations_edp_0_85V;
+ return intel_get_buf_trans(&cnl_ddi_translations_edp_0_85V,
+ n_entries);
} else if (voltage == VOLTAGE_INFO_0_95V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
- return cnl_ddi_translations_edp_0_95V;
+ return intel_get_buf_trans(&cnl_ddi_translations_edp_0_95V,
+ n_entries);
} else if (voltage == VOLTAGE_INFO_1_05V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V);
- return cnl_ddi_translations_edp_1_05V;
+ return intel_get_buf_trans(&cnl_ddi_translations_edp_1_05V,
+ n_entries);
} else {
*n_entries = 1; /* shut up gcc */
MISSING_CASE(voltage);
@@ -1071,7 +1339,7 @@ cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
}
}
-const struct cnl_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
cnl_get_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
@@ -1083,25 +1351,16 @@ cnl_get_buf_trans(struct intel_encoder *encoder,
return cnl_get_buf_trans_dp(encoder, n_entries);
}
-static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
- return icl_combo_phy_ddi_translations_hdmi;
-}
-
-static const struct cnl_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
icl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
- return icl_combo_phy_ddi_translations_dp_hbr2;
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ n_entries);
}
-static const struct cnl_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
@@ -1109,176 +1368,109 @@ icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (crtc_state->port_clock > 540000) {
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
- return icl_combo_phy_ddi_translations_edp_hbr3;
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ n_entries);
} else if (dev_priv->vbt.edp.low_vswing) {
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
- return icl_combo_phy_ddi_translations_edp_hbr2;
- } else if (IS_DG1(dev_priv) && crtc_state->port_clock > 270000) {
- *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_hbr2_hbr3);
- return dg1_combo_phy_ddi_translations_dp_hbr2_hbr3;
- } else if (IS_DG1(dev_priv)) {
- *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_rbr_hbr);
- return dg1_combo_phy_ddi_translations_dp_rbr_hbr;
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2,
+ n_entries);
}
return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
}
-const struct cnl_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
icl_get_combo_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return icl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return icl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
}
-static const struct icl_mg_phy_ddi_buf_trans *
-icl_get_mg_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi);
- return icl_mg_phy_ddi_translations_hdmi;
-}
-
-static const struct icl_mg_phy_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
icl_get_mg_buf_trans_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
if (crtc_state->port_clock > 270000) {
- *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hbr2_hbr3);
- return icl_mg_phy_ddi_translations_hbr2_hbr3;
+ return intel_get_buf_trans(&icl_mg_phy_ddi_translations_hbr2_hbr3,
+ n_entries);
} else {
- *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr);
- return icl_mg_phy_ddi_translations_rbr_hbr;
+ return intel_get_buf_trans(&icl_mg_phy_ddi_translations_rbr_hbr,
+ n_entries);
}
}
-const struct icl_mg_phy_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
icl_get_mg_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return icl_get_mg_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ return intel_get_buf_trans(&icl_mg_phy_ddi_translations_hdmi, n_entries);
else
return icl_get_mg_buf_trans_dp(encoder, crtc_state, n_entries);
}
-static const struct cnl_ddi_buf_trans *
-ehl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
- return icl_combo_phy_ddi_translations_hdmi;
-}
-
-static const struct cnl_ddi_buf_trans *
-ehl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp);
- return ehl_combo_phy_ddi_translations_dp;
-}
-
-static const struct cnl_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
ehl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (dev_priv->vbt.edp.low_vswing) {
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
- return icl_combo_phy_ddi_translations_edp_hbr2;
- }
-
- return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+ if (crtc_state->port_clock > 270000)
+ return intel_get_buf_trans(&ehl_combo_phy_ddi_translations_edp_hbr2, n_entries);
+ else
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2, n_entries);
}
-const struct cnl_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
ehl_get_combo_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return ehl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ dev_priv->vbt.edp.low_vswing)
return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
- return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct cnl_ddi_buf_trans *
-jsl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
- return icl_combo_phy_ddi_translations_hdmi;
-}
-
-static const struct cnl_ddi_buf_trans *
-jsl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
- return icl_combo_phy_ddi_translations_dp_hbr2;
+ return intel_get_buf_trans(&ehl_combo_phy_ddi_translations_dp, n_entries);
}
-static const struct cnl_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
jsl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (dev_priv->vbt.edp.low_vswing) {
- if (crtc_state->port_clock > 270000) {
- *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr2);
- return jsl_combo_phy_ddi_translations_edp_hbr2;
- } else {
- *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr);
- return jsl_combo_phy_ddi_translations_edp_hbr;
- }
- }
-
- return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+ if (crtc_state->port_clock > 270000)
+ return intel_get_buf_trans(&jsl_combo_phy_ddi_translations_edp_hbr2, n_entries);
+ else
+ return intel_get_buf_trans(&jsl_combo_phy_ddi_translations_edp_hbr, n_entries);
}
-const struct cnl_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
jsl_get_combo_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return jsl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ dev_priv->vbt.edp.low_vswing)
return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
- return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, n_entries);
}
-static const struct cnl_ddi_buf_trans *
-tgl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
- return icl_combo_phy_ddi_translations_hdmi;
-}
-
-static const struct cnl_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
@@ -1286,28 +1478,20 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (crtc_state->port_clock > 270000) {
- if (IS_ROCKETLAKE(dev_priv)) {
- *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr2_hbr3);
- return rkl_combo_phy_ddi_translations_dp_hbr2_hbr3;
- } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
- *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2);
- return tgl_uy_combo_phy_ddi_translations_dp_hbr2;
+ if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+ return intel_get_buf_trans(&tgl_uy_combo_phy_ddi_translations_dp_hbr2,
+ n_entries);
} else {
- *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
- return tgl_combo_phy_ddi_translations_dp_hbr2;
+ return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_dp_hbr2,
+ n_entries);
}
} else {
- if (IS_ROCKETLAKE(dev_priv)) {
- *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr);
- return rkl_combo_phy_ddi_translations_dp_hbr;
- } else {
- *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
- return tgl_combo_phy_ddi_translations_dp_hbr;
- }
+ return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_dp_hbr,
+ n_entries);
}
}
-static const struct cnl_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
@@ -1316,87 +1500,213 @@ tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (crtc_state->port_clock > 540000) {
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
- return icl_combo_phy_ddi_translations_edp_hbr3;
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ n_entries);
} else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
- *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl);
- return tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
+ return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl,
+ n_entries);
} else if (dev_priv->vbt.edp.low_vswing) {
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
- return icl_combo_phy_ddi_translations_edp_hbr2;
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2,
+ n_entries);
}
return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
}
-const struct cnl_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
tgl_get_combo_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return tgl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
}
-static const struct tgl_dkl_phy_ddi_buf_trans *
-tgl_get_dkl_buf_trans_hdmi(struct intel_encoder *encoder,
+static const struct intel_ddi_buf_trans *
+dg1_get_combo_buf_trans_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- *n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
- return tgl_dkl_phy_hdmi_ddi_trans;
+ if (crtc_state->port_clock > 270000)
+ return intel_get_buf_trans(&dg1_combo_phy_ddi_translations_dp_hbr2_hbr3,
+ n_entries);
+ else
+ return intel_get_buf_trans(&dg1_combo_phy_ddi_translations_dp_rbr_hbr,
+ n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+dg1_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (crtc_state->port_clock > 540000)
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ n_entries);
+ else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed)
+ return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl,
+ n_entries);
+ else if (dev_priv->vbt.edp.low_vswing)
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2,
+ n_entries);
+ else
+ return dg1_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
}
-static const struct tgl_dkl_phy_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
+dg1_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return dg1_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return dg1_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+rkl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000)
+ return intel_get_buf_trans(&rkl_combo_phy_ddi_translations_dp_hbr2_hbr3, n_entries);
+ else
+ return intel_get_buf_trans(&rkl_combo_phy_ddi_translations_dp_hbr, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+rkl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (crtc_state->port_clock > 540000) {
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ n_entries);
+ } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
+ return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl,
+ n_entries);
+ } else if (dev_priv->vbt.edp.low_vswing) {
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2,
+ n_entries);
+ }
+
+ return rkl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+rkl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return rkl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return rkl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+adls_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000)
+ return intel_get_buf_trans(&adls_combo_phy_ddi_translations_dp_hbr2_hbr3, n_entries);
+ else
+ return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_dp_hbr, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+adls_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (crtc_state->port_clock > 540000)
+ return intel_get_buf_trans(&adls_combo_phy_ddi_translations_edp_hbr3, n_entries);
+ else if (i915->vbt.edp.hobl && !intel_dp->hobl_failed)
+ return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl, n_entries);
+ else if (i915->vbt.edp.low_vswing)
+ return intel_get_buf_trans(&adls_combo_phy_ddi_translations_edp_hbr2, n_entries);
+ else
+ return adls_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+adls_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return adls_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return adls_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
tgl_get_dkl_buf_trans_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
if (crtc_state->port_clock > 270000) {
- *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans_hbr2);
- return tgl_dkl_phy_dp_ddi_trans_hbr2;
+ return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_dp_hbr2,
+ n_entries);
} else {
- *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
- return tgl_dkl_phy_dp_ddi_trans;
+ return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_dp_hbr,
+ n_entries);
}
}
-const struct tgl_dkl_phy_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
tgl_get_dkl_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_hdmi, n_entries);
else
return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
}
-static const struct tgl_dkl_phy_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
adlp_get_dkl_buf_trans_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
if (crtc_state->port_clock > 270000) {
- *n_entries = ARRAY_SIZE(adlp_dkl_phy_dp_ddi_trans_hbr2_hbr3);
- return adlp_dkl_phy_dp_ddi_trans_hbr2_hbr3;
+ return intel_get_buf_trans(&adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3,
+ n_entries);
+ } else {
+ return intel_get_buf_trans(&adlp_dkl_phy_ddi_translations_dp_hbr,
+ n_entries);
}
-
- *n_entries = ARRAY_SIZE(adlp_dkl_phy_dp_ddi_trans_hbr);
- return adlp_dkl_phy_dp_ddi_trans_hbr;
}
-const struct tgl_dkl_phy_ddi_buf_trans *
+static const struct intel_ddi_buf_trans *
adlp_get_dkl_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_hdmi, n_entries);
else
return adlp_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
}
@@ -1406,43 +1716,70 @@ int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
int *default_entry)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ const struct intel_ddi_buf_trans *ddi_translations;
int n_entries;
- if (DISPLAY_VER(dev_priv) >= 12) {
- if (intel_phy_is_combo(dev_priv, phy))
- tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries);
- else
- tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, &n_entries);
- *default_entry = n_entries - 1;
- } else if (DISPLAY_VER(dev_priv) == 11) {
- if (intel_phy_is_combo(dev_priv, phy))
- icl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries);
- else
- icl_get_mg_buf_trans_hdmi(encoder, crtc_state, &n_entries);
- *default_entry = n_entries - 1;
- } else if (IS_CANNONLAKE(dev_priv)) {
- cnl_get_buf_trans_hdmi(encoder, &n_entries);
- *default_entry = n_entries - 1;
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- bxt_get_buf_trans_hdmi(encoder, &n_entries);
- *default_entry = n_entries - 1;
- } else if (DISPLAY_VER(dev_priv) == 9) {
- intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
- *default_entry = 8;
- } else if (IS_BROADWELL(dev_priv)) {
- intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
- *default_entry = 7;
- } else if (IS_HASWELL(dev_priv)) {
- intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
- *default_entry = 6;
- } else {
- drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n");
+ ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+
+ if (drm_WARN_ON(&dev_priv->drm, !ddi_translations)) {
+ *default_entry = 0;
return 0;
}
- if (drm_WARN_ON_ONCE(&dev_priv->drm, n_entries == 0))
- return 0;
+ *default_entry = ddi_translations->hdmi_default_entry;
return n_entries;
}
+
+void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (IS_ALDERLAKE_P(i915)) {
+ if (intel_phy_is_combo(i915, phy))
+ encoder->get_buf_trans = tgl_get_combo_buf_trans;
+ else
+ encoder->get_buf_trans = adlp_get_dkl_buf_trans;
+ } else if (IS_ALDERLAKE_S(i915)) {
+ encoder->get_buf_trans = adls_get_combo_buf_trans;
+ } else if (IS_ROCKETLAKE(i915)) {
+ encoder->get_buf_trans = rkl_get_combo_buf_trans;
+ } else if (IS_DG1(i915)) {
+ encoder->get_buf_trans = dg1_get_combo_buf_trans;
+ } else if (DISPLAY_VER(i915) >= 12) {
+ if (intel_phy_is_combo(i915, phy))
+ encoder->get_buf_trans = tgl_get_combo_buf_trans;
+ else
+ encoder->get_buf_trans = tgl_get_dkl_buf_trans;
+ } else if (DISPLAY_VER(i915) == 11) {
+ if (IS_PLATFORM(i915, INTEL_JASPERLAKE))
+ encoder->get_buf_trans = jsl_get_combo_buf_trans;
+ else if (IS_PLATFORM(i915, INTEL_ELKHARTLAKE))
+ encoder->get_buf_trans = ehl_get_combo_buf_trans;
+ else if (intel_phy_is_combo(i915, phy))
+ encoder->get_buf_trans = icl_get_combo_buf_trans;
+ else
+ encoder->get_buf_trans = icl_get_mg_buf_trans;
+ } else if (IS_CANNONLAKE(i915)) {
+ encoder->get_buf_trans = cnl_get_buf_trans;
+ } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ encoder->get_buf_trans = bxt_get_buf_trans;
+ } else if (IS_CML_ULX(i915) || IS_CFL_ULX(i915) || IS_KBL_ULX(i915)) {
+ encoder->get_buf_trans = kbl_y_get_buf_trans;
+ } else if (IS_CML_ULT(i915) || IS_CFL_ULT(i915) || IS_KBL_ULT(i915)) {
+ encoder->get_buf_trans = kbl_u_get_buf_trans;
+ } else if (IS_COMETLAKE(i915) || IS_COFFEELAKE(i915) || IS_KABYLAKE(i915)) {
+ encoder->get_buf_trans = kbl_get_buf_trans;
+ } else if (IS_SKL_ULX(i915)) {
+ encoder->get_buf_trans = skl_y_get_buf_trans;
+ } else if (IS_SKL_ULT(i915)) {
+ encoder->get_buf_trans = skl_u_get_buf_trans;
+ } else if (IS_SKYLAKE(i915)) {
+ encoder->get_buf_trans = skl_get_buf_trans;
+ } else if (IS_BROADWELL(i915)) {
+ encoder->get_buf_trans = bdw_get_buf_trans;
+ } else {
+ encoder->get_buf_trans = hsw_get_buf_trans;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
index 4c2efab38642..05226eb46cd6 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -12,7 +12,7 @@ struct drm_i915_private;
struct intel_encoder;
struct intel_crtc_state;
-struct ddi_buf_trans {
+struct hsw_ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
u32 trans2; /* vref sel, vswing */
u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
@@ -45,60 +45,26 @@ struct tgl_dkl_phy_ddi_buf_trans {
u32 dkl_de_emphasis_control;
};
-bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table);
+union intel_ddi_buf_trans_entry {
+ struct hsw_ddi_buf_trans hsw;
+ struct bxt_ddi_buf_trans bxt;
+ struct cnl_ddi_buf_trans cnl;
+ struct icl_mg_phy_ddi_buf_trans mg;
+ struct tgl_dkl_phy_ddi_buf_trans dkl;
+};
+
+struct intel_ddi_buf_trans {
+ const union intel_ddi_buf_trans_entry *entries;
+ u8 num_entries;
+ u8 hdmi_default_entry;
+};
+
+bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table);
int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *default_entry);
-const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries);
-const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
- int *n_entries);
-const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder,
- int *n_entries);
-const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries);
-
-const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries);
-
-const struct tgl_dkl_phy_ddi_buf_trans *
-adlp_get_dkl_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries);
-const struct cnl_ddi_buf_trans *
-tgl_get_combo_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries);
-const struct tgl_dkl_phy_ddi_buf_trans *
-tgl_get_dkl_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries);
-const struct cnl_ddi_buf_trans *
-jsl_get_combo_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries);
-const struct cnl_ddi_buf_trans *
-ehl_get_combo_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries);
-const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries);
-const struct icl_mg_phy_ddi_buf_trans *
-icl_get_mg_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries);
-
-const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries);
+void intel_ddi_buf_trans_init(struct intel_encoder *encoder);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 6be1b31af07b..eec6c9e9cda7 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -1914,20 +1914,50 @@ static void intel_dpt_unpin(struct i915_address_space *vm)
i915_vma_put(dpt->vma);
}
+static bool
+intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
+ const struct intel_initial_plane_config *plane_config,
+ struct drm_framebuffer **fb,
+ struct i915_vma **vma)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (!crtc_state->uapi.active)
+ continue;
+
+ if (!plane_state->ggtt_vma)
+ continue;
+
+ if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
+ *fb = plane_state->hw.fb;
+ *vma = plane_state->ggtt_vma;
+ return true;
+ }
+ }
+
+ return false;
+}
+
static void
-intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
+intel_find_initial_plane_obj(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *c;
- struct drm_plane *primary = intel_crtc->base.primary;
- struct drm_plane_state *plane_state = primary->state;
- struct intel_plane *intel_plane = to_intel_plane(primary);
- struct intel_plane_state *intel_state =
- to_intel_plane_state(plane_state);
struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(intel_crtc->base.state);
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
struct drm_framebuffer *fb;
struct i915_vma *vma;
@@ -1939,7 +1969,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
if (!plane_config->fb)
return;
- if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
+ if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
fb = &plane_config->fb->base;
vma = plane_config->vma;
goto valid_fb;
@@ -1949,25 +1979,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
* Failed to alloc the obj, check to see if we should share
* an fb with another CRTC instead
*/
- for_each_crtc(dev, c) {
- struct intel_plane_state *state;
-
- if (c == &intel_crtc->base)
- continue;
-
- if (!to_intel_crtc_state(c->state)->uapi.active)
- continue;
-
- state = to_intel_plane_state(c->primary->state);
- if (!state->ggtt_vma)
- continue;
-
- if (intel_plane_ggtt_offset(state) == plane_config->base) {
- fb = state->hw.fb;
- vma = state->ggtt_vma;
- goto valid_fb;
- }
- }
+ if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
+ goto valid_fb;
/*
* We've failed to reconstruct the BIOS FB. Current display state
@@ -1976,7 +1989,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
* simplest solution is to just disable the primary plane now and
* pretend the BIOS never had it enabled.
*/
- intel_plane_disable_noatomic(intel_crtc, intel_plane);
+ intel_plane_disable_noatomic(crtc, plane);
if (crtc_state->bigjoiner) {
struct intel_crtc *slave =
crtc_state->bigjoiner_linked_crtc;
@@ -1986,40 +1999,38 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
return;
valid_fb:
- plane_state->rotation = plane_config->rotation;
- intel_fb_fill_view(to_intel_framebuffer(fb), plane_state->rotation,
- &intel_state->view);
+ plane_state->uapi.rotation = plane_config->rotation;
+ intel_fb_fill_view(to_intel_framebuffer(fb),
+ plane_state->uapi.rotation, &plane_state->view);
__i915_vma_pin(vma);
- intel_state->ggtt_vma = i915_vma_get(vma);
- if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
- if (vma->fence)
- intel_state->flags |= PLANE_HAS_FENCE;
+ plane_state->ggtt_vma = i915_vma_get(vma);
+ if (intel_plane_uses_fence(plane_state) &&
+ i915_vma_pin_fence(vma) == 0 && vma->fence)
+ plane_state->flags |= PLANE_HAS_FENCE;
- plane_state->src_x = 0;
- plane_state->src_y = 0;
- plane_state->src_w = fb->width << 16;
- plane_state->src_h = fb->height << 16;
+ plane_state->uapi.src_x = 0;
+ plane_state->uapi.src_y = 0;
+ plane_state->uapi.src_w = fb->width << 16;
+ plane_state->uapi.src_h = fb->height << 16;
- plane_state->crtc_x = 0;
- plane_state->crtc_y = 0;
- plane_state->crtc_w = fb->width;
- plane_state->crtc_h = fb->height;
+ plane_state->uapi.crtc_x = 0;
+ plane_state->uapi.crtc_y = 0;
+ plane_state->uapi.crtc_w = fb->width;
+ plane_state->uapi.crtc_h = fb->height;
if (plane_config->tiling)
dev_priv->preserve_bios_swizzle = true;
- plane_state->fb = fb;
+ plane_state->uapi.fb = fb;
drm_framebuffer_get(fb);
- plane_state->crtc = &intel_crtc->base;
- intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
- intel_crtc);
+ plane_state->uapi.crtc = &crtc->base;
+ intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
- atomic_or(to_intel_plane(primary)->frontbuffer_bit,
- &to_intel_frontbuffer(fb)->bits);
+ atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
}
unsigned int
@@ -2706,10 +2717,10 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
intel_wait_for_vblank(dev_priv, crtc->pipe);
}
-static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
+static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
{
- if (intel_crtc->overlay)
- (void) intel_overlay_switch_off(intel_crtc->overlay);
+ if (crtc->overlay)
+ (void) intel_overlay_switch_off(crtc->overlay);
/* Let userspace switch the overlay on again. In most cases userspace
* has to recompute where to put it anyway.
@@ -6473,23 +6484,21 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx)
{
- struct intel_crtc *intel_crtc;
- struct intel_encoder *intel_encoder =
+ struct intel_encoder *encoder =
intel_attached_encoder(to_intel_connector(connector));
- struct drm_crtc *possible_crtc;
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_crtc *crtc = NULL;
- struct drm_device *dev = encoder->dev;
+ struct intel_crtc *possible_crtc;
+ struct intel_crtc *crtc = NULL;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state = NULL, *restore_state = NULL;
struct drm_connector_state *connector_state;
struct intel_crtc_state *crtc_state;
- int ret, i = -1;
+ int ret;
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, connector->name,
- encoder->base.id, encoder->name);
+ encoder->base.base.id, encoder->base.name);
old->restore_state = NULL;
@@ -6507,9 +6516,9 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
/* See if we already have a CRTC for this connector */
if (connector->state->crtc) {
- crtc = connector->state->crtc;
+ crtc = to_intel_crtc(connector->state->crtc);
- ret = drm_modeset_lock(&crtc->mutex, ctx);
+ ret = drm_modeset_lock(&crtc->base.mutex, ctx);
if (ret)
goto fail;
@@ -6518,17 +6527,17 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
}
/* Find an unused one (if possible) */
- for_each_crtc(dev, possible_crtc) {
- i++;
- if (!(encoder->possible_crtcs & (1 << i)))
+ for_each_intel_crtc(dev, possible_crtc) {
+ if (!(encoder->base.possible_crtcs &
+ drm_crtc_mask(&possible_crtc->base)))
continue;
- ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
+ ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
if (ret)
goto fail;
- if (possible_crtc->state->enable) {
- drm_modeset_unlock(&possible_crtc->mutex);
+ if (possible_crtc->base.state->enable) {
+ drm_modeset_unlock(&possible_crtc->base.mutex);
continue;
}
@@ -6547,8 +6556,6 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
}
found:
- intel_crtc = to_intel_crtc(crtc);
-
state = drm_atomic_state_alloc(dev);
restore_state = drm_atomic_state_alloc(dev);
if (!state || !restore_state) {
@@ -6565,11 +6572,11 @@ found:
goto fail;
}
- ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
+ ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
if (ret)
goto fail;
- crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+ crtc_state = intel_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto fail;
@@ -6582,15 +6589,15 @@ found:
if (ret)
goto fail;
- ret = intel_modeset_disable_planes(state, crtc);
+ ret = intel_modeset_disable_planes(state, &crtc->base);
if (ret)
goto fail;
ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
if (!ret)
- ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
+ ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
if (!ret)
- ret = drm_atomic_add_affected_planes(restore_state, crtc);
+ ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
if (ret) {
drm_dbg_kms(&dev_priv->drm,
"Failed to create a copy of old state to restore: %i\n",
@@ -6609,7 +6616,7 @@ found:
drm_atomic_state_put(state);
/* let the connector get through one full cycle before testing */
- intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
return true;
fail:
@@ -7281,12 +7288,13 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
}
if (dev_priv->display.compute_pipe_wm) {
- ret = dev_priv->display.compute_pipe_wm(crtc_state);
+ ret = dev_priv->display.compute_pipe_wm(state, crtc);
if (ret) {
drm_dbg_kms(&dev_priv->drm,
"Target pipe watermarks are invalid\n");
return ret;
}
+
}
if (dev_priv->display.compute_intermediate_wm) {
@@ -7299,7 +7307,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
* old state and the new state. We can program these
* immediately.
*/
- ret = dev_priv->display.compute_intermediate_wm(crtc_state);
+ ret = dev_priv->display.compute_intermediate_wm(state, crtc);
if (ret) {
drm_dbg_kms(&dev_priv->drm,
"No valid intermediate pipe watermarks are possible\n");
@@ -9618,7 +9626,6 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
struct intel_crtc *slave, *master;
@@ -9634,15 +9641,15 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
if (!new_crtc_state->bigjoiner)
return 0;
- if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
+ slave = intel_dsc_get_bigjoiner_secondary(crtc);
+ if (!slave) {
DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
"CRTC + 1 to be used, doesn't exist\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
- slave = new_crtc_state->bigjoiner_linked_crtc =
- intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
+ new_crtc_state->bigjoiner_linked_crtc = slave;
slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
master = crtc;
if (IS_ERR(slave_crtc_state))
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 88bb05d5c483..d5af5708c9da 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -544,6 +544,11 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
seq_printf(m, "fw loaded: %s\n", yesno(intel_dmc_has_payload(dev_priv)));
seq_printf(m, "path: %s\n", dmc->fw_path);
+ seq_printf(m, "Pipe A fw support: %s\n",
+ yesno(GRAPHICS_VER(dev_priv) >= 12));
+ seq_printf(m, "Pipe A fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEA].payload));
+ seq_printf(m, "Pipe B fw support: %s\n", yesno(IS_ALDERLAKE_P(dev_priv)));
+ seq_printf(m, "Pipe B fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEB].payload));
if (!intel_dmc_has_payload(dev_priv))
goto out;
@@ -582,7 +587,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
out:
seq_printf(m, "program base: 0x%08x\n",
- intel_de_read(dev_priv, DMC_PROGRAM(0)));
+ intel_de_read(dev_priv, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
seq_printf(m, "ssp base: 0x%08x\n",
intel_de_read(dev_priv, DMC_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, DMC_HTP_SKL));
@@ -1225,7 +1230,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
static void drrs_status_per_crtc(struct seq_file *m,
struct drm_device *dev,
- struct intel_crtc *intel_crtc)
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_drrs *drrs = &dev_priv->drrs;
@@ -1237,7 +1242,7 @@ static void drrs_status_per_crtc(struct seq_file *m,
drm_for_each_connector_iter(connector, &conn_iter) {
bool supported = false;
- if (connector->state->crtc != &intel_crtc->base)
+ if (connector->state->crtc != &crtc->base)
continue;
seq_printf(m, "%s:\n", connector->name);
@@ -1252,7 +1257,7 @@ static void drrs_status_per_crtc(struct seq_file *m,
seq_puts(m, "\n");
- if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
+ if (to_intel_crtc_state(crtc->base.state)->has_drrs) {
struct intel_panel *panel;
mutex_lock(&drrs->mutex);
@@ -1298,16 +1303,16 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *intel_crtc;
+ struct intel_crtc *crtc;
int active_crtc_cnt = 0;
drm_modeset_lock_all(dev);
- for_each_intel_crtc(dev, intel_crtc) {
- if (intel_crtc->base.state->active) {
+ for_each_intel_crtc(dev, crtc) {
+ if (crtc->base.state->active) {
active_crtc_cnt++;
seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
- drrs_status_per_crtc(m, dev, intel_crtc);
+ drrs_status_per_crtc(m, dev, crtc);
}
}
drm_modeset_unlock_all(dev);
@@ -2064,7 +2069,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
size_t cnt, loff_t *ppos)
{
struct drm_i915_private *dev_priv = filp->private_data;
- struct intel_crtc *intel_crtc;
+ struct intel_crtc *crtc;
struct drm_device *dev = &dev_priv->drm;
int ret;
bool reset;
@@ -2076,15 +2081,15 @@ i915_fifo_underrun_reset_write(struct file *filp,
if (!reset)
return cnt;
- for_each_intel_crtc(dev, intel_crtc) {
+ for_each_intel_crtc(dev, crtc) {
struct drm_crtc_commit *commit;
struct intel_crtc_state *crtc_state;
- ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
+ ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
if (ret)
return ret;
- crtc_state = to_intel_crtc_state(intel_crtc->base.state);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
commit = crtc_state->uapi.commit;
if (commit) {
ret = wait_for_completion_interruptible(&commit->hw_done);
@@ -2095,12 +2100,12 @@ i915_fifo_underrun_reset_write(struct file *filp,
if (!ret && crtc_state->hw.active) {
drm_dbg_kms(&dev_priv->drm,
"Re-arming FIFO underruns on pipe %c\n",
- pipe_name(intel_crtc->pipe));
+ pipe_name(crtc->pipe));
- intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
+ intel_crtc_arm_fifo_underrun(crtc, crtc_state);
}
- drm_modeset_unlock(&intel_crtc->base.mutex);
+ drm_modeset_unlock(&crtc->base.mutex);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 4298ae684d7d..285380079aab 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -961,8 +961,9 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
static void assert_dmc_loaded(struct drm_i915_private *dev_priv)
{
drm_WARN_ONCE(&dev_priv->drm,
- !intel_de_read(dev_priv, DMC_PROGRAM(0)),
- "DMC program storage start is NULL\n");
+ !intel_de_read(dev_priv,
+ DMC_PROGRAM(dev_priv->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
+ "DMC program storage start is NULL\n");
drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE),
"DMC SSP Base Not fine\n");
drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL),
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 15e91a99c8b9..d94f361b548b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -48,6 +48,7 @@
struct drm_printer;
struct __intel_global_objs_state;
+struct intel_ddi_buf_trans;
/*
* Display related stuff
@@ -263,6 +264,9 @@ struct intel_encoder {
* Returns whether the port clock is enabled or not.
*/
bool (*is_clock_enabled)(struct intel_encoder *encoder);
+ const struct intel_ddi_buf_trans *(*get_buf_trans)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
enum hpd_pin hpd_pin;
enum intel_display_power_domain power_domain;
/* for communication with audio component; protected by av_mutex */
@@ -1040,7 +1044,9 @@ struct intel_crtc_state {
bool has_psr;
bool has_psr2;
bool enable_psr2_sel_fetch;
+ bool req_psr2_sdp_prior_scanline;
u32 dc3co_exitline;
+ u16 su_y_granularity;
/*
* Frequence the dpll for the port should run at. Differs from the
@@ -1493,12 +1499,14 @@ struct intel_psr {
bool colorimetry_support;
bool psr2_enabled;
bool psr2_sel_fetch_enabled;
+ bool req_psr2_sdp_prior_scanline;
u8 sink_sync_latency;
ktime_t last_entry_attempt;
ktime_t last_exit;
bool sink_not_reliable;
bool irq_aux_error;
- u16 su_x_granularity;
+ u16 su_w_granularity;
+ u16 su_y_granularity;
u32 dc3co_exitline;
u32 dc3co_exit_delay;
struct delayed_work dc3co_work;
@@ -1723,6 +1731,14 @@ vlv_pipe_to_channel(enum pipe pipe)
}
}
+static inline bool intel_pipe_valid(struct drm_i915_private *i915, enum pipe pipe)
+{
+ return (pipe >= 0 &&
+ pipe < ARRAY_SIZE(i915->pipe_to_crtc_mapping) &&
+ INTEL_INFO(i915)->pipe_mask & BIT(pipe) &&
+ i915->pipe_to_crtc_mapping[pipe]);
+}
+
static inline struct intel_crtc *
intel_get_first_crtc(struct drm_i915_private *dev_priv)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 97308da28059..f8789d4543bf 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -45,6 +45,10 @@
#define GEN12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 10)
+#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 10)
+MODULE_FIRMWARE(ADLP_DMC_PATH);
+
#define ADLS_DMC_PATH DMC_PATH(adls, 2, 01)
#define ADLS_DMC_VERSION_REQUIRED DMC_VERSION(2, 1)
MODULE_FIRMWARE(ADLS_DMC_PATH);
@@ -96,6 +100,7 @@ MODULE_FIRMWARE(BXT_DMC_PATH);
#define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32
#define DMC_V1_MAX_MMIO_COUNT 8
#define DMC_V3_MAX_MMIO_COUNT 20
+#define DMC_V1_MMIO_START_RANGE 0x80000
struct intel_css_header {
/* 0x09 for DMC */
@@ -239,7 +244,7 @@ struct stepping_info {
bool intel_dmc_has_payload(struct drm_i915_private *i915)
{
- return i915->dmc.dmc_payload;
+ return i915->dmc.dmc_info[DMC_FW_MAIN].payload;
}
static const struct stepping_info skl_stepping_info[] = {
@@ -316,8 +321,8 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
*/
void intel_dmc_load_program(struct drm_i915_private *dev_priv)
{
- u32 *payload = dev_priv->dmc.dmc_payload;
- u32 i, fw_size;
+ struct intel_dmc *dmc = &dev_priv->dmc;
+ u32 id, i;
if (!HAS_DMC(dev_priv)) {
drm_err(&dev_priv->drm,
@@ -325,26 +330,31 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
return;
}
- if (!intel_dmc_has_payload(dev_priv)) {
+ if (!dev_priv->dmc.dmc_info[DMC_FW_MAIN].payload) {
drm_err(&dev_priv->drm,
"Tried to program CSR with empty payload\n");
return;
}
- fw_size = dev_priv->dmc.dmc_fw_size;
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
preempt_disable();
- for (i = 0; i < fw_size; i++)
- intel_uncore_write_fw(&dev_priv->uncore, DMC_PROGRAM(i),
- payload[i]);
+ for (id = 0; id < DMC_FW_MAX; id++) {
+ for (i = 0; i < dmc->dmc_info[id].dmc_fw_size; i++) {
+ intel_uncore_write_fw(&dev_priv->uncore,
+ DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i),
+ dmc->dmc_info[id].payload[i]);
+ }
+ }
preempt_enable();
- for (i = 0; i < dev_priv->dmc.mmio_count; i++) {
- intel_de_write(dev_priv, dev_priv->dmc.mmioaddr[i],
- dev_priv->dmc.mmiodata[i]);
+ for (id = 0; id < DMC_FW_MAX; id++) {
+ for (i = 0; i < dmc->dmc_info[id].mmio_count; i++) {
+ intel_de_write(dev_priv, dmc->dmc_info[id].mmioaddr[i],
+ dmc->dmc_info[id].mmiodata[i]);
+ }
}
dev_priv->dmc.dc_state = 0;
@@ -352,62 +362,72 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
gen9_set_dc_state_debugmask(dev_priv);
}
+static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info,
+ const struct stepping_info *si)
+{
+ if ((fw_info->substepping == '*' && si->stepping == fw_info->stepping) ||
+ (si->stepping == fw_info->stepping && si->substepping == fw_info->substepping) ||
+ /*
+ * If we don't find a more specific one from above two checks, we
+ * then check for the generic one to be sure to work even with
+ * "broken firmware"
+ */
+ (si->stepping == '*' && si->substepping == fw_info->substepping) ||
+ (fw_info->stepping == '*' && fw_info->substepping == '*'))
+ return true;
+
+ return false;
+}
+
/*
* Search fw_info table for dmc_offset to find firmware binary: num_entries is
* already sanitized.
*/
-static u32 find_dmc_fw_offset(const struct intel_fw_info *fw_info,
+static void dmc_set_fw_offset(struct intel_dmc *dmc,
+ const struct intel_fw_info *fw_info,
unsigned int num_entries,
const struct stepping_info *si,
u8 package_ver)
{
- u32 dmc_offset = DMC_DEFAULT_FW_OFFSET;
- unsigned int i;
+ unsigned int i, id;
+
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
for (i = 0; i < num_entries; i++) {
- if (package_ver > 1 && fw_info[i].dmc_id != 0)
- continue;
+ id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
- if (fw_info[i].substepping == '*' &&
- si->stepping == fw_info[i].stepping) {
- dmc_offset = fw_info[i].offset;
- break;
+ if (id >= DMC_FW_MAX) {
+ drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", id);
+ continue;
}
- if (si->stepping == fw_info[i].stepping &&
- si->substepping == fw_info[i].substepping) {
- dmc_offset = fw_info[i].offset;
- break;
- }
+ /* More specific versions come first, so we don't even have to
+ * check for the stepping since we already found a previous FW
+ * for this id.
+ */
+ if (dmc->dmc_info[id].present)
+ continue;
- if (fw_info[i].stepping == '*' &&
- fw_info[i].substepping == '*') {
- /*
- * In theory we should stop the search as generic
- * entries should always come after the more specific
- * ones, but let's continue to make sure to work even
- * with "broken" firmwares. If we don't find a more
- * specific one, then we use this entry
- */
- dmc_offset = fw_info[i].offset;
+ if (fw_info_matches_stepping(&fw_info[i], si)) {
+ dmc->dmc_info[id].present = true;
+ dmc->dmc_info[id].dmc_offset = fw_info[i].offset;
}
}
-
- return dmc_offset;
}
static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
const struct intel_dmc_header_base *dmc_header,
- size_t rem_size)
+ size_t rem_size, u8 dmc_id)
{
struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
unsigned int header_len_bytes, dmc_header_size, payload_size, i;
const u32 *mmioaddr, *mmiodata;
- u32 mmio_count, mmio_count_max;
+ u32 mmio_count, mmio_count_max, start_mmioaddr;
u8 *payload;
- BUILD_BUG_ON(ARRAY_SIZE(dmc->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
- ARRAY_SIZE(dmc->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
+ ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
/*
* Check if we can access common fields, we will checkc again below
@@ -430,6 +450,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
mmio_count_max = DMC_V3_MAX_MMIO_COUNT;
/* header_len is in dwords */
header_len_bytes = dmc_header->header_len * 4;
+ start_mmioaddr = v3->start_mmioaddr;
dmc_header_size = sizeof(*v3);
} else if (dmc_header->header_ver == 1) {
const struct intel_dmc_header_v1 *v1 =
@@ -443,6 +464,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
mmio_count = v1->mmio_count;
mmio_count_max = DMC_V1_MAX_MMIO_COUNT;
header_len_bytes = dmc_header->header_len;
+ start_mmioaddr = DMC_V1_MMIO_START_RANGE;
dmc_header_size = sizeof(*v1);
} else {
drm_err(&i915->drm, "Unknown DMC fw header version: %u\n",
@@ -463,16 +485,11 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
}
for (i = 0; i < mmio_count; i++) {
- if (mmioaddr[i] < DMC_MMIO_START_RANGE ||
- mmioaddr[i] > DMC_MMIO_END_RANGE) {
- drm_err(&i915->drm, "DMC firmware has wrong mmio address 0x%x\n",
- mmioaddr[i]);
- return 0;
- }
- dmc->mmioaddr[i] = _MMIO(mmioaddr[i]);
- dmc->mmiodata[i] = mmiodata[i];
+ dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
+ dmc_info->mmiodata[i] = mmiodata[i];
}
- dmc->mmio_count = mmio_count;
+ dmc_info->mmio_count = mmio_count;
+ dmc_info->start_mmioaddr = start_mmioaddr;
rem_size -= header_len_bytes;
@@ -485,14 +502,14 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size);
return 0;
}
- dmc->dmc_fw_size = dmc_header->fw_size;
+ dmc_info->dmc_fw_size = dmc_header->fw_size;
- dmc->dmc_payload = kmalloc(payload_size, GFP_KERNEL);
- if (!dmc->dmc_payload)
+ dmc_info->payload = kmalloc(payload_size, GFP_KERNEL);
+ if (!dmc_info->payload)
return 0;
payload = (u8 *)(dmc_header) + header_len_bytes;
- memcpy(dmc->dmc_payload, payload, payload_size);
+ memcpy(dmc_info->payload, payload, payload_size);
return header_len_bytes + payload_size;
@@ -509,7 +526,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
{
struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
u32 package_size = sizeof(struct intel_package_header);
- u32 num_entries, max_entries, dmc_offset;
+ u32 num_entries, max_entries;
const struct intel_fw_info *fw_info;
if (rem_size < package_size)
@@ -545,16 +562,11 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
fw_info = (const struct intel_fw_info *)
((u8 *)package_header + sizeof(*package_header));
- dmc_offset = find_dmc_fw_offset(fw_info, num_entries, si,
- package_header->header_ver);
- if (dmc_offset == DMC_DEFAULT_FW_OFFSET) {
- drm_err(&i915->drm, "DMC firmware not supported for %c stepping\n",
- si->stepping);
- return 0;
- }
+ dmc_set_fw_offset(dmc, fw_info, num_entries, si,
+ package_header->header_ver);
/* dmc_offset is in dwords */
- return package_size + dmc_offset * 4;
+ return package_size;
error_truncated:
drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
@@ -606,7 +618,8 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
struct intel_dmc *dmc = &dev_priv->dmc;
const struct stepping_info *si = intel_get_stepping_info(dev_priv);
u32 readcount = 0;
- u32 r;
+ u32 r, offset;
+ int id;
if (!fw)
return;
@@ -627,9 +640,19 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
readcount += r;
- /* Extract dmc_header information */
- dmc_header = (struct intel_dmc_header_base *)&fw->data[readcount];
- parse_dmc_fw_header(dmc, dmc_header, fw->size - readcount);
+ for (id = 0; id < DMC_FW_MAX; id++) {
+ if (!dev_priv->dmc.dmc_info[id].present)
+ continue;
+
+ offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
+ if (fw->size - offset < 0) {
+ drm_err(&dev_priv->drm, "Reading beyond the fw_size\n");
+ continue;
+ }
+
+ dmc_header = (struct intel_dmc_header_base *)&fw->data[offset];
+ parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, id);
+ }
}
static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv)
@@ -705,7 +728,11 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
*/
intel_dmc_runtime_pm_get(dev_priv);
- if (IS_ALDERLAKE_S(dev_priv)) {
+ if (IS_ALDERLAKE_P(dev_priv)) {
+ dmc->fw_path = ADLP_DMC_PATH;
+ dmc->required_version = ADLP_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE;
+ } else if (IS_ALDERLAKE_S(dev_priv)) {
dmc->fw_path = ADLS_DMC_PATH;
dmc->required_version = ADLS_DMC_VERSION_REQUIRED;
dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE;
@@ -827,5 +854,5 @@ void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
intel_dmc_ucode_suspend(dev_priv);
drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
- kfree(dev_priv->dmc.dmc_payload);
+ kfree(dev_priv->dmc.dmc_info[DMC_FW_MAIN].payload);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index 4c22f567b61b..c3c00ff03869 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -16,17 +16,30 @@ struct drm_i915_private;
#define DMC_VERSION_MAJOR(version) ((version) >> 16)
#define DMC_VERSION_MINOR(version) ((version) & 0xffff)
+enum {
+ DMC_FW_MAIN = 0,
+ DMC_FW_PIPEA,
+ DMC_FW_PIPEB,
+ DMC_FW_MAX
+};
+
struct intel_dmc {
struct work_struct work;
const char *fw_path;
u32 required_version;
u32 max_fw_size; /* bytes */
- u32 *dmc_payload;
- u32 dmc_fw_size; /* dwords */
u32 version;
- u32 mmio_count;
- i915_reg_t mmioaddr[20];
- u32 mmiodata[20];
+ struct dmc_fw_info {
+ u32 mmio_count;
+ i915_reg_t mmioaddr[20];
+ u32 mmiodata[20];
+ u32 dmc_offset;
+ u32 start_mmioaddr;
+ u32 dmc_fw_size; /*dwords */
+ u32 *payload;
+ bool present;
+ } dmc_info[DMC_FW_MAX];
+
u32 dc_state;
u32 target_dc_state;
u32 allowed_dc_mask;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 5c9222283044..5b52beaddada 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -2868,7 +2868,7 @@ static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
if (size < sizeof(struct dp_sdp))
return -EINVAL;
- memset(vsc, 0, size);
+ memset(vsc, 0, sizeof(*vsc));
if (sdp->sdp_header.HB0 != 0)
return -EINVAL;
@@ -3031,9 +3031,6 @@ void intel_read_dp_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
unsigned int type)
{
- if (encoder->type != INTEL_OUTPUT_DDI)
- return;
-
switch (type) {
case DP_SDP_VSC:
intel_read_dp_vsc_sdp(encoder, crtc_state,
@@ -4741,7 +4738,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
int refresh_rate)
{
struct intel_dp *intel_dp = dev_priv->drrs.dp;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
if (refresh_rate <= 0) {
@@ -4755,7 +4752,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
return;
}
- if (!intel_crtc) {
+ if (!crtc) {
drm_dbg_kms(&dev_priv->drm,
"DRRS: intel_crtc not initialized\n");
return;
@@ -5238,7 +5235,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
- intel_connector->panel.backlight.power = intel_pps_backlight_power;
+ if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
+ intel_connector->panel.backlight.power = intel_pps_backlight_power;
intel_panel_setup_backlight(connector, pipe);
if (fixed_mode) {
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index b170e272bdee..3661cd19ce48 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -308,9 +308,9 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
* connector
*/
if (new_crtc) {
- struct intel_crtc *intel_crtc = to_intel_crtc(new_crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc);
struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, intel_crtc);
+ intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state ||
!drm_atomic_crtc_needs_modeset(&crtc_state->uapi) ||
@@ -835,13 +835,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
- if (DISPLAY_VER(dev_priv) <= 12) {
- ret = intel_dp_hdcp_init(dig_port, intel_connector);
- if (ret)
- drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
- connector->name, connector->base.id);
- }
-
+ ret = intel_dp_hdcp_init(dig_port, intel_connector);
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
+ connector->name, connector->base.id);
/*
* Reuse the prop from the SST connector because we're
* not allowed to create new props after device registration.
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 1847a161cb37..82effb64a3b9 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -104,7 +104,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
int i;
u32 fbc_ctl;
- /* Note: fbc.threshold == 1 for i8xx */
+ /* Note: fbc.limit == 1 for i8xx */
cfb_pitch = params->cfb_size / FBC_LL_SIZE;
if (params->fb.stride < cfb_pitch)
cfb_pitch = params->fb.stride;
@@ -148,16 +148,35 @@ static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN;
}
+static u32 g4x_dpfc_ctl_limit(struct drm_i915_private *i915)
+{
+ const struct intel_fbc_reg_params *params = &i915->fbc.params;
+ int limit = i915->fbc.limit;
+
+ if (params->fb.format->cpp[0] == 2)
+ limit <<= 1;
+
+ switch (limit) {
+ default:
+ MISSING_CASE(limit);
+ fallthrough;
+ case 1:
+ return DPFC_CTL_LIMIT_1X;
+ case 2:
+ return DPFC_CTL_LIMIT_2X;
+ case 4:
+ return DPFC_CTL_LIMIT_4X;
+ }
+}
+
static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
{
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
u32 dpfc_ctl;
dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
- if (params->fb.format->cpp[0] == 2)
- dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- else
- dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+
+ dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv);
if (params->fence_id >= 0) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
@@ -235,24 +254,10 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
{
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
u32 dpfc_ctl;
- int threshold = dev_priv->fbc.threshold;
dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
- if (params->fb.format->cpp[0] == 2)
- threshold++;
- switch (threshold) {
- case 4:
- case 3:
- dpfc_ctl |= DPFC_CTL_LIMIT_4X;
- break;
- case 2:
- dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- break;
- case 1:
- dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- break;
- }
+ dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv);
if (params->fence_id >= 0) {
dpfc_ctl |= DPFC_CTL_FENCE_EN;
@@ -300,7 +305,6 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
{
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
u32 dpfc_ctl;
- int threshold = dev_priv->fbc.threshold;
/* Display WA #0529: skl, kbl, bxt. */
if (DISPLAY_VER(dev_priv) == 9) {
@@ -318,21 +322,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_IVYBRIDGE(dev_priv))
dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
- if (params->fb.format->cpp[0] == 2)
- threshold++;
-
- switch (threshold) {
- case 4:
- case 3:
- dpfc_ctl |= DPFC_CTL_LIMIT_4X;
- break;
- case 2:
- dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- break;
- case 1:
- dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- break;
- }
+ dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv);
if (params->fence_id >= 0) {
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
@@ -433,13 +423,8 @@ static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
return BIT_ULL(32);
}
-static int find_compression_threshold(struct drm_i915_private *dev_priv,
- struct drm_mm_node *node,
- unsigned int size,
- unsigned int fb_cpp)
+static u64 intel_fbc_stolen_end(struct drm_i915_private *dev_priv)
{
- int compression_threshold = 1;
- int ret;
u64 end;
/* The FBC hardware for BDW/SKL doesn't have access to the stolen
@@ -452,51 +437,69 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
else
end = U64_MAX;
- end = min(end, intel_fbc_cfb_base_max(dev_priv));
+ return min(end, intel_fbc_cfb_base_max(dev_priv));
+}
- /* HACK: This code depends on what we will do in *_enable_fbc. If that
- * code changes, this code needs to change as well.
- *
- * The enable_fbc code will attempt to use one of our 2 compression
- * thresholds, therefore, in that case, we only have 1 resort.
+static int intel_fbc_max_limit(struct drm_i915_private *dev_priv, int fb_cpp)
+{
+ /*
+ * FIXME: FBC1 can have arbitrary cfb stride,
+ * so we could support different compression ratios.
*/
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ return 1;
+
+ /* WaFbcOnly1to1Ratio:ctg */
+ if (IS_G4X(dev_priv))
+ return 1;
+
+ /* FBC2 can only do 1:1, 1:2, 1:4 */
+ return fb_cpp == 2 ? 2 : 4;
+}
+
+static int find_compression_limit(struct drm_i915_private *dev_priv,
+ unsigned int size,
+ unsigned int fb_cpp)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ u64 end = intel_fbc_stolen_end(dev_priv);
+ int ret, limit = 1;
/* Try to over-allocate to reduce reallocations and fragmentation. */
- ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
- 4096, 0, end);
+ ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb,
+ size <<= 1, 4096, 0, end);
if (ret == 0)
- return compression_threshold;
+ return limit;
-again:
- /* HW's ability to limit the CFB is 1:4 */
- if (compression_threshold > 4 ||
- (fb_cpp == 2 && compression_threshold == 2))
- return 0;
-
- ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
- 4096, 0, end);
- if (ret && DISPLAY_VER(dev_priv) <= 4) {
- return 0;
- } else if (ret) {
- compression_threshold <<= 1;
- goto again;
- } else {
- return compression_threshold;
+ for (; limit <= intel_fbc_max_limit(dev_priv, fb_cpp); limit <<= 1) {
+ ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb,
+ size >>= 1, 4096, 0, end);
+ if (ret == 0)
+ return limit;
}
+
+ return 0;
}
static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
unsigned int size, unsigned int fb_cpp)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- struct drm_mm_node *compressed_llb;
int ret;
drm_WARN_ON(&dev_priv->drm,
drm_mm_node_allocated(&fbc->compressed_fb));
+ drm_WARN_ON(&dev_priv->drm,
+ drm_mm_node_allocated(&fbc->compressed_llb));
- ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
- size, fb_cpp);
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
+ ret = i915_gem_stolen_insert_node(dev_priv, &fbc->compressed_llb,
+ 4096, 4096);
+ if (ret)
+ goto err;
+ }
+
+ ret = find_compression_limit(dev_priv, size, fb_cpp);
if (!ret)
goto err_llb;
else if (ret > 1) {
@@ -504,51 +507,46 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
"Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
}
- fbc->threshold = ret;
+ fbc->limit = ret;
- if (DISPLAY_VER(dev_priv) >= 5)
+ drm_dbg_kms(&dev_priv->drm,
+ "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
+ fbc->compressed_fb.size, fbc->limit);
+
+ return 0;
+
+err_llb:
+ if (drm_mm_node_allocated(&fbc->compressed_llb))
+ i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb);
+err:
+ if (drm_mm_initialized(&dev_priv->mm.stolen))
+ drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+ return -ENOSPC;
+}
+
+static void intel_fbc_program_cfb(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ if (DISPLAY_VER(dev_priv) >= 5) {
intel_de_write(dev_priv, ILK_DPFC_CB_BASE,
fbc->compressed_fb.start);
- else if (IS_GM45(dev_priv)) {
+ } else if (IS_GM45(dev_priv)) {
intel_de_write(dev_priv, DPFC_CB_BASE,
fbc->compressed_fb.start);
} else {
- compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
- if (!compressed_llb)
- goto err_fb;
-
- ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
- 4096, 4096);
- if (ret)
- goto err_fb;
-
- fbc->compressed_llb = compressed_llb;
-
GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
fbc->compressed_fb.start,
U32_MAX));
GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
- fbc->compressed_llb->start,
+ fbc->compressed_llb.start,
U32_MAX));
+
intel_de_write(dev_priv, FBC_CFB_BASE,
dev_priv->dsm.start + fbc->compressed_fb.start);
intel_de_write(dev_priv, FBC_LL_BASE,
- dev_priv->dsm.start + compressed_llb->start);
+ dev_priv->dsm.start + fbc->compressed_llb.start);
}
-
- drm_dbg_kms(&dev_priv->drm,
- "reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
- fbc->compressed_fb.size, fbc->threshold);
-
- return 0;
-
-err_fb:
- kfree(compressed_llb);
- i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
-err_llb:
- if (drm_mm_initialized(&dev_priv->mm.stolen))
- drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
- return -ENOSPC;
}
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
@@ -558,15 +556,10 @@ static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
if (WARN_ON(intel_fbc_hw_is_active(dev_priv)))
return;
- if (!drm_mm_node_allocated(&fbc->compressed_fb))
- return;
-
- if (fbc->compressed_llb) {
- i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
- kfree(fbc->compressed_llb);
- }
-
- i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
+ if (drm_mm_node_allocated(&fbc->compressed_llb))
+ i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb);
+ if (drm_mm_node_allocated(&fbc->compressed_fb))
+ i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
}
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
@@ -753,7 +746,7 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
struct intel_fbc *fbc = &dev_priv->fbc;
return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
- fbc->compressed_fb.size * fbc->threshold;
+ fbc->compressed_fb.size * fbc->limit;
}
static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv)
@@ -763,7 +756,7 @@ static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv)
if ((DISPLAY_VER(dev_priv) == 9) &&
cache->fb.modifier != I915_FORMAT_MOD_X_TILED)
- return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
+ return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->limit) * 8;
else
return 0;
}
@@ -1302,6 +1295,8 @@ void intel_fbc_enable(struct intel_atomic_state *state,
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
fbc->crtc = crtc;
+
+ intel_fbc_program_cfb(dev_priv);
out:
mutex_unlock(&fbc->lock);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 4af40229f5ec..df05d285f0bd 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -335,32 +335,43 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
* fbcon), so we just find the biggest and use that.
*/
static bool intel_fbdev_init_bios(struct drm_device *dev,
- struct intel_fbdev *ifbdev)
+ struct intel_fbdev *ifbdev)
{
struct drm_i915_private *i915 = to_i915(dev);
struct intel_framebuffer *fb = NULL;
- struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
+ struct intel_crtc *crtc;
unsigned int max_size = 0;
/* Find the largest fb */
- for_each_crtc(dev, crtc) {
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
struct drm_i915_gem_object *obj =
- intel_fb_obj(crtc->primary->state->fb);
- intel_crtc = to_intel_crtc(crtc);
+ intel_fb_obj(plane_state->uapi.fb);
- if (!crtc->state->active || !obj) {
+ if (!crtc_state->uapi.active) {
drm_dbg_kms(&i915->drm,
- "pipe %c not active or no fb, skipping\n",
- pipe_name(intel_crtc->pipe));
+ "[CRTC:%d:%s] not active, skipping\n",
+ crtc->base.base.id, crtc->base.name);
+ continue;
+ }
+
+ if (!obj) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] no fb, skipping\n",
+ plane->base.base.id, plane->base.name);
continue;
}
if (obj->base.size > max_size) {
drm_dbg_kms(&i915->drm,
- "found possible fb from plane %c\n",
- pipe_name(intel_crtc->pipe));
- fb = to_intel_framebuffer(crtc->primary->state->fb);
+ "found possible fb from [PLANE:%d:%s]\n",
+ plane->base.base.id, plane->base.name);
+ fb = to_intel_framebuffer(plane_state->uapi.fb);
max_size = obj->base.size;
}
}
@@ -372,60 +383,62 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
}
/* Now make sure all the pipes will fit into it */
- for_each_crtc(dev, crtc) {
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
unsigned int cur_size;
- intel_crtc = to_intel_crtc(crtc);
-
- if (!crtc->state->active) {
+ if (!crtc_state->uapi.active) {
drm_dbg_kms(&i915->drm,
- "pipe %c not active, skipping\n",
- pipe_name(intel_crtc->pipe));
+ "[CRTC:%d:%s] not active, skipping\n",
+ crtc->base.base.id, crtc->base.name);
continue;
}
- drm_dbg_kms(&i915->drm, "checking plane %c for BIOS fb\n",
- pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&i915->drm, "checking [PLANE:%d:%s] for BIOS fb\n",
+ plane->base.base.id, plane->base.name);
/*
* See if the plane fb we found above will fit on this
* pipe. Note we need to use the selected fb's pitch and bpp
* rather than the current pipe's, since they differ.
*/
- cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
+ cur_size = crtc_state->uapi.adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.format->cpp[0];
if (fb->base.pitches[0] < cur_size) {
drm_dbg_kms(&i915->drm,
- "fb not wide enough for plane %c (%d vs %d)\n",
- pipe_name(intel_crtc->pipe),
+ "fb not wide enough for [PLANE:%d:%s] (%d vs %d)\n",
+ plane->base.base.id, plane->base.name,
cur_size, fb->base.pitches[0]);
fb = NULL;
break;
}
- cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
+ cur_size = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
cur_size *= fb->base.pitches[0];
drm_dbg_kms(&i915->drm,
- "pipe %c area: %dx%d, bpp: %d, size: %d\n",
- pipe_name(intel_crtc->pipe),
- crtc->state->adjusted_mode.crtc_hdisplay,
- crtc->state->adjusted_mode.crtc_vdisplay,
+ "[CRTC:%d:%s] area: %dx%d, bpp: %d, size: %d\n",
+ crtc->base.base.id, crtc->base.name,
+ crtc_state->uapi.adjusted_mode.crtc_hdisplay,
+ crtc_state->uapi.adjusted_mode.crtc_vdisplay,
fb->base.format->cpp[0] * 8,
cur_size);
if (cur_size > max_size) {
drm_dbg_kms(&i915->drm,
- "fb not big enough for plane %c (%d vs %d)\n",
- pipe_name(intel_crtc->pipe),
+ "fb not big enough for [PLANE:%d:%s] (%d vs %d)\n",
+ plane->base.base.id, plane->base.name,
cur_size, max_size);
fb = NULL;
break;
}
drm_dbg_kms(&i915->drm,
- "fb big enough for plane %c (%d >= %d)\n",
- pipe_name(intel_crtc->pipe),
+ "fb big enough [PLANE:%d:%s] (%d >= %d)\n",
+ plane->base.base.id, plane->base.name,
max_size, cur_size);
}
@@ -441,15 +454,20 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
drm_framebuffer_get(&ifbdev->fb->base);
/* Final pass to check if any active pipes don't have fbs */
- for_each_crtc(dev, crtc) {
- intel_crtc = to_intel_crtc(crtc);
-
- if (!crtc->state->active)
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (!crtc_state->uapi.active)
continue;
- drm_WARN(dev, !crtc->primary->state->fb,
- "re-used BIOS config but lost an fb on crtc %d\n",
- crtc->base.id);
+ drm_WARN(dev, !plane_state->uapi.fb,
+ "re-used BIOS config but lost an fb on [PLANE:%d:%s]\n",
+ plane->base.base.id, plane->base.name);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index cef1061fd6cb..e10b9cd8e86e 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -4,7 +4,6 @@
*/
#include "intel_atomic.h"
#include "intel_ddi.h"
-#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
@@ -96,10 +95,10 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
}
-int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *pipe_config)
+int ilk_fdi_compute_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *i915 = to_i915(dev);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int lane, link_bw, fdi_dotclock, ret;
@@ -125,7 +124,7 @@ retry:
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
link_bw, &pipe_config->fdi_m_n, false, false);
- ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+ ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config);
if (ret == -EDEADLK)
return ret;
@@ -569,9 +568,9 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
u32 temp, i, rx_ctl_val;
int n_entries;
- intel_ddi_get_buf_trans_fdi(dev_priv, &n_entries);
+ encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- intel_prepare_dp_ddi_buffers(encoder, crtc_state);
+ hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
* mode set "sequence for CRT port" document:
@@ -691,9 +690,9 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -726,11 +725,11 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
}
}
-void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
+void ilk_fdi_pll_disable(struct intel_crtc *crtc)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 7e51c98c475e..852af2b23540 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -270,8 +270,8 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
u32 val = intel_de_read(dev_priv, reg);
int i;
@@ -286,13 +286,13 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
intel_de_write(dev_priv, reg, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe),
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe),
*data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
@@ -349,8 +349,8 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
u32 val = intel_de_read(dev_priv, reg);
int i;
@@ -368,13 +368,13 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
intel_de_write(dev_priv, reg, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe),
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe),
*data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
@@ -427,8 +427,8 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe);
u32 val = intel_de_read(dev_priv, reg);
int i;
@@ -444,13 +444,13 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
for (i = 0; i < len; i += 4) {
intel_de_write(dev_priv,
- VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ VLV_TVIDEO_DIP_DATA(crtc->pipe), *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
intel_de_write(dev_priv,
- VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ VLV_TVIDEO_DIP_DATA(crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
@@ -1040,10 +1040,10 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
- i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
u32 val = intel_de_read(dev_priv, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
@@ -1099,9 +1099,9 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
u32 val = intel_de_read(dev_priv, reg);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1148,9 +1148,9 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe);
u32 val = intel_de_read(dev_priv, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
@@ -1465,14 +1465,12 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_crtc *crtc = connector->base.state->crtc;
- struct intel_crtc *intel_crtc = container_of(crtc,
- struct intel_crtc, base);
+ struct intel_crtc *crtc = to_intel_crtc(connector->base.state->crtc);
u32 scanline;
int ret;
for (;;) {
- scanline = intel_de_read(dev_priv, PIPEDSL(intel_crtc->pipe));
+ scanline = intel_de_read(dev_priv, PIPEDSL(crtc->pipe));
if (scanline > 100 && scanline < 200)
break;
usleep_range(25, 50);
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 7f40e9f60bc2..e0381b0fce91 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -411,12 +411,12 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_connector *intel_connector =
lvds_encoder->attached_connector;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
unsigned int lvds_bpp;
int ret;
/* Should never happen!! */
- if (DISPLAY_VER(dev_priv) < 4 && intel_crtc->pipe == 0) {
+ if (DISPLAY_VER(dev_priv) < 4 && crtc->pipe == 0) {
drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 77865cf6641f..9643624fe160 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -265,32 +265,44 @@ static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
return val;
}
-static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
+static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- u16 val;
ssize_t r;
+ u16 w;
+ u8 y;
+
+ /* If sink don't have specific granularity requirements set legacy ones */
+ if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
+ /* As PSR2 HW sends full lines, we do not care about x granularity */
+ w = 4;
+ y = 4;
+ goto exit;
+ }
- /*
- * Returning the default X granularity if granularity not required or
- * if DPCD read fails
- */
- if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
- return 4;
-
- r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
+ r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
if (r != 2)
drm_dbg_kms(&i915->drm,
"Unable to read DP_PSR2_SU_X_GRANULARITY\n");
-
/*
* Spec says that if the value read is 0 the default granularity should
* be used instead.
*/
- if (r != 2 || val == 0)
- val = 4;
+ if (r != 2 || w == 0)
+ w = 4;
- return val;
+ r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
+ if (r != 1) {
+ drm_dbg_kms(&i915->drm,
+ "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
+ y = 4;
+ }
+ if (y == 0)
+ y = 1;
+
+exit:
+ intel_dp->psr.su_w_granularity = w;
+ intel_dp->psr.su_y_granularity = y;
}
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
@@ -346,8 +358,7 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
if (intel_dp->psr.sink_psr2_support) {
intel_dp->psr.colorimetry_support =
intel_dp_get_colorimetry_status(intel_dp);
- intel_dp->psr.su_x_granularity =
- intel_dp_get_su_x_granulartiy(intel_dp);
+ intel_dp_get_su_granularity(intel_dp);
}
}
}
@@ -407,6 +418,9 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
dpcd_val |= DP_PSR_CRC_VERIFICATION;
}
+ if (intel_dp->psr.req_psr2_sdp_prior_scanline)
+ dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
+
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
@@ -531,7 +545,34 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
val |= intel_psr2_get_tp_time(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ /* Wa_22012278275:adlp */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_D1)) {
+ static const u8 map[] = {
+ 2, /* 5 lines */
+ 1, /* 6 lines */
+ 0, /* 7 lines */
+ 3, /* 8 lines */
+ 6, /* 9 lines */
+ 5, /* 10 lines */
+ 4, /* 11 lines */
+ 7, /* 12 lines */
+ };
+ /*
+ * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
+ * comments bellow for more information
+ */
+ u32 tmp, lines = 7;
+
+ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+
+ tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
+ tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
+ val |= tmp;
+
+ tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
+ val |= tmp;
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
/*
* TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
* values from BSpec. In order to setting an optimal power
@@ -547,6 +588,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FAST_WAKE(7);
}
+ if (intel_dp->psr.req_psr2_sdp_prior_scanline)
+ val |= EDP_PSR2_SU_SDP_SCANLINE;
+
if (intel_dp->psr.psr2_sel_fetch_enabled) {
/* WA 1408330847 */
if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) ||
@@ -689,6 +733,10 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
return;
+ /* Wa_16011303918:adlp */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0))
+ return;
+
/*
* DC3CO Exit time 200us B.Spec 49196
* PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
@@ -742,6 +790,63 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
return crtc_state->enable_psr2_sel_fetch = true;
}
+static bool psr2_granularity_check(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
+ const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+ u16 y_granularity = 0;
+
+ /* PSR2 HW only send full lines so we only need to validate the width */
+ if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
+ return false;
+
+ if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
+ return false;
+
+ /* HW tracking is only aligned to 4 lines */
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return intel_dp->psr.su_y_granularity == 4;
+
+ /*
+ * For SW tracking we can adjust the y to match sink requirement if
+ * multiple of 4
+ */
+ if (intel_dp->psr.su_y_granularity <= 2)
+ y_granularity = 4;
+ else if ((intel_dp->psr.su_y_granularity % 4) == 0)
+ y_granularity = intel_dp->psr.su_y_granularity;
+
+ if (y_granularity == 0 || crtc_vdisplay % y_granularity)
+ return false;
+
+ crtc_state->su_y_granularity = y_granularity;
+ return true;
+}
+
+static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 hblank_total, hblank_ns, req_ns;
+
+ hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+ hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
+
+ /* From spec: (72 / number of lanes) * 1000 / symbol clock frequency MHz */
+ req_ns = (72 / crtc_state->lane_count) * 1000 / (crtc_state->port_clock / 1000);
+
+ if ((hblank_ns - req_ns) > 100)
+ return true;
+
+ if (DISPLAY_VER(dev_priv) < 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
+ return false;
+
+ crtc_state->req_psr2_sdp_prior_scanline = true;
+ return true;
+}
+
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -824,19 +929,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- /*
- * HW sends SU blocks of size four scan lines, which means the starting
- * X coordinate and Y granularity requirements will always be met. We
- * only need to validate the SU block width is a multiple of
- * x granularity.
- */
- if (crtc_hdisplay % intel_dp->psr.su_x_granularity) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
- crtc_hdisplay, intel_dp->psr.su_x_granularity);
- return false;
- }
-
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
!HAS_PSR_HW_TRACKING(dev_priv)) {
@@ -853,6 +945,11 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ if (!psr2_granularity_check(intel_dp, crtc_state)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
+ return false;
+ }
+
if (!crtc_state->enable_psr2_sel_fetch &&
(crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
drm_dbg_kms(&dev_priv->drm,
@@ -862,6 +959,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
+ return false;
+ }
+
+ /* Wa_16011303918:adlp */
+ if (crtc_state->vrr.enable &&
+ IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, not compatible with HW stepping + VRR\n");
+ return false;
+ }
+
tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
return true;
}
@@ -1048,6 +1159,14 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp)
intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
+
+ /* Wa_16011168373:adlp */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) &&
+ intel_dp->psr.psr2_enabled)
+ intel_de_rmw(dev_priv,
+ TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
+ TRANS_SET_CONTEXT_LATENCY_MASK,
+ TRANS_SET_CONTEXT_LATENCY_VALUE(1));
}
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
@@ -1101,6 +1220,8 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.dc3co_exit_delay = val;
intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
+ intel_dp->psr.req_psr2_sdp_prior_scanline =
+ crtc_state->req_psr2_sdp_prior_scanline;
if (!psr_interrupt_error_check(intel_dp))
return;
@@ -1225,6 +1346,13 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
+ /* Wa_16011168373:adlp */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) &&
+ intel_dp->psr.psr2_enabled)
+ intel_de_rmw(dev_priv,
+ TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
+ TRANS_SET_CONTEXT_LATENCY_MASK, 0);
+
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -1432,6 +1560,16 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
overlap_damage_area->y2 = damage_area->y2;
}
+static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
+ struct drm_rect *pipe_clip)
+{
+ const u16 y_alignment = crtc_state->su_y_granularity;
+
+ pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
+ if (pipe_clip->y2 % y_alignment)
+ pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
+}
+
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -1540,10 +1678,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (full_update)
goto skip_sel_fetch_set_loop;
- /* It must be aligned to 4 lines */
- pipe_clip.y1 -= pipe_clip.y1 % 4;
- if (pipe_clip.y2 % 4)
- pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4;
+ intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
/*
* Now that we have the pipe damaged area check if it intersect with
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 98dd787b00e3..8a52b7a16774 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -53,6 +53,12 @@ static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n");
}
+static void quirk_no_pps_backlight_power_hook(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK;
+ drm_info(&i915->drm, "Applying no pps backlight power quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -72,6 +78,12 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
return 1;
}
+static int intel_dmi_no_pps_backlight(const struct dmi_system_id *id)
+{
+ DRM_INFO("No pps backlight support on %s\n", id->ident);
+ return 1;
+}
+
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
{
.dmi_id_list = &(const struct dmi_system_id[]) {
@@ -96,6 +108,28 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
},
.hook = quirk_invert_brightness,
},
+ {
+ .dmi_id_list = &(const struct dmi_system_id[]) {
+ {
+ .callback = intel_dmi_no_pps_backlight,
+ .ident = "Google Lillipup sku524294",
+ .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Google"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Lindar"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "sku524294"),
+ },
+ },
+ {
+ .callback = intel_dmi_no_pps_backlight,
+ .ident = "Google Lillipup sku524295",
+ .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Google"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Lindar"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "sku524295"),
+ },
+ },
+ { }
+ },
+ .hook = quirk_no_pps_backlight_power_hook,
+ },
};
static struct intel_quirk intel_quirks[] = {
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index e4f91d7a5c60..6cb27599ea03 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1824,7 +1824,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
u32 temp;
bool input1, input2;
int i;
@@ -1835,7 +1835,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
intel_sdvo_write_sdvox(intel_sdvo, temp);
for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index c23c210a55f5..3ffece568ed9 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -556,7 +556,7 @@ intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
}
static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
- int required_lanes)
+ int required_lanes, bool force_disconnect)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port_mode old_tc_mode = dig_port->tc_mode;
@@ -572,7 +572,8 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
}
icl_tc_phy_disconnect(dig_port);
- icl_tc_phy_connect(dig_port, required_lanes);
+ if (!force_disconnect)
+ icl_tc_phy_connect(dig_port, required_lanes);
drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
dig_port->tc_port_name,
@@ -662,7 +663,7 @@ bool intel_tc_port_connected(struct intel_encoder *encoder)
}
static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
- int required_lanes)
+ int required_lanes, bool force_disconnect)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
intel_wakeref_t wakeref;
@@ -676,8 +677,9 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
tc_cold_wref = tc_cold_block(dig_port);
- if (intel_tc_port_needs_reset(dig_port))
- intel_tc_port_reset_mode(dig_port, required_lanes);
+ if (force_disconnect || intel_tc_port_needs_reset(dig_port))
+ intel_tc_port_reset_mode(dig_port, required_lanes,
+ force_disconnect);
tc_cold_unblock(dig_port, tc_cold_wref);
}
@@ -688,7 +690,7 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
void intel_tc_port_lock(struct intel_digital_port *dig_port)
{
- __intel_tc_port_lock(dig_port, 1);
+ __intel_tc_port_lock(dig_port, 1, false);
}
void intel_tc_port_unlock(struct intel_digital_port *dig_port)
@@ -702,6 +704,24 @@ void intel_tc_port_unlock(struct intel_digital_port *dig_port)
wakeref);
}
+/**
+ * intel_tc_port_disconnect_phy: disconnect TypeC PHY from display port
+ * @dig_port: digital port
+ *
+ * Disconnect the given digital port from its TypeC PHY (handing back the
+ * control of the PHY to the TypeC subsystem). The only purpose of this
+ * function is to force the disconnect even with a TypeC display output still
+ * plugged to the TypeC connector, which is required by the TypeC firmwares
+ * during system suspend and shutdown. Otherwise - during the unplug event
+ * handling - the PHY ownership is released automatically by
+ * intel_tc_port_reset_mode(), when calling this function is not required.
+ */
+void intel_tc_port_disconnect_phy(struct intel_digital_port *dig_port)
+{
+ __intel_tc_port_lock(dig_port, 1, true);
+ intel_tc_port_unlock(dig_port);
+}
+
bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
{
return mutex_is_locked(&dig_port->tc_lock) ||
@@ -711,7 +731,7 @@ bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
int required_lanes)
{
- __intel_tc_port_lock(dig_port, required_lanes);
+ __intel_tc_port_lock(dig_port, required_lanes, false);
dig_port->tc_link_refcount++;
intel_tc_port_unlock(dig_port);
}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index 0eacbd76ec15..0c881f645e27 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -13,6 +13,8 @@ struct intel_digital_port;
struct intel_encoder;
bool intel_tc_port_connected(struct intel_encoder *encoder);
+void intel_tc_port_disconnect_phy(struct intel_digital_port *dig_port);
+
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index aa52af7891f0..d02f09f7e750 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1420,7 +1420,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct intel_tv_connector_state *tv_conn_state =
to_intel_tv_connector_state(conn_state);
@@ -1466,7 +1466,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
break;
}
- tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
+ tv_ctl |= TV_ENC_PIPE_SEL(crtc->pipe);
switch (tv_mode->oversample) {
case 8:
@@ -1571,8 +1571,7 @@ static int
intel_tv_detect_type(struct intel_tv *intel_tv,
struct drm_connector *connector)
{
- struct drm_crtc *crtc = connector->state->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(connector->state->crtc);
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tv_ctl, save_tv_ctl;
@@ -1594,7 +1593,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
/* Poll for TV detection */
tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK);
tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
- tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
+ tv_ctl |= TV_ENC_PIPE_SEL(crtc->pipe);
tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
tv_dac |= (TVDAC_STATE_CHG_EN |
@@ -1619,7 +1618,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
intel_de_write(dev_priv, TV_DAC, tv_dac);
intel_de_posting_read(dev_priv, TV_DAC);
- intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
type = -1;
tv_dac = intel_de_read(dev_priv, TV_DAC);
@@ -1652,7 +1651,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
intel_de_posting_read(dev_priv, TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
- intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 7121b66bf96d..85749370508c 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -1106,6 +1106,27 @@ static i915_reg_t dss_ctl2_reg(const struct intel_crtc_state *crtc_state)
return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL2(pipe) : DSS_CTL2;
}
+static struct intel_crtc *
+_get_crtc_for_pipe(struct drm_i915_private *i915, enum pipe pipe)
+{
+ if (!intel_pipe_valid(i915, pipe))
+ return NULL;
+
+ return intel_get_crtc_for_pipe(i915, pipe);
+}
+
+struct intel_crtc *
+intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc)
+{
+ return _get_crtc_for_pipe(to_i915(primary_crtc->base.dev), primary_crtc->pipe + 1);
+}
+
+static struct intel_crtc *
+intel_dsc_get_bigjoiner_primary(const struct intel_crtc *secondary_crtc)
+{
+ return _get_crtc_for_pipe(to_i915(secondary_crtc->base.dev), secondary_crtc->pipe - 1);
+}
+
void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1178,15 +1199,13 @@ void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state)
dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc_state));
if (dss_ctl1 & UNCOMPRESSED_JOINER_MASTER) {
crtc_state->bigjoiner = true;
- if (!WARN_ON(INTEL_NUM_PIPES(dev_priv) == crtc->pipe + 1))
- crtc_state->bigjoiner_linked_crtc =
- intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
+ crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->bigjoiner_linked_crtc);
} else if (dss_ctl1 & UNCOMPRESSED_JOINER_SLAVE) {
crtc_state->bigjoiner = true;
crtc_state->bigjoiner_slave = true;
- if (!WARN_ON(crtc->pipe == PIPE_A))
- crtc_state->bigjoiner_linked_crtc =
- intel_get_crtc_for_pipe(dev_priv, crtc->pipe - 1);
+ crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_primary(crtc);
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->bigjoiner_linked_crtc);
}
}
@@ -1224,14 +1243,11 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
if (!(dss_ctl1 & MASTER_BIG_JOINER_ENABLE)) {
crtc_state->bigjoiner_slave = true;
- if (!WARN_ON(crtc->pipe == PIPE_A))
- crtc_state->bigjoiner_linked_crtc =
- intel_get_crtc_for_pipe(dev_priv, crtc->pipe - 1);
+ crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_primary(crtc);
} else {
- if (!WARN_ON(INTEL_NUM_PIPES(dev_priv) == crtc->pipe + 1))
- crtc_state->bigjoiner_linked_crtc =
- intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
+ crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
}
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->bigjoiner_linked_crtc);
}
/* FIXME: add more state readout as needed */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index fe4d45561253..dfb1fd38deb4 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -22,5 +22,6 @@ void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state);
void intel_dsc_get_config(struct intel_crtc_state *crtc_state);
enum intel_display_power_domain
intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
+struct intel_crtc *intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc);
#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index f002b82ba9c0..3385b45ecd4b 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -29,6 +29,9 @@ void intel_vga_disable(struct drm_i915_private *dev_priv)
i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
u8 sr1;
+ if (intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE)
+ return;
+
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(SR01, VGA_SR_INDEX);
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 394b7bbf48d8..911a113ee006 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -96,9 +96,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
{
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -141,7 +140,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
drm_dbg_kms(&dev_priv->drm,
"scaler_user index %u.%u: "
"Staged freeing scaler id %d scaler_users = 0x%x\n",
- intel_crtc->pipe, scaler_user, *scaler_id,
+ crtc->pipe, scaler_user, *scaler_id,
scaler_state->scaler_users);
*scaler_id = -1;
}
@@ -167,7 +166,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
drm_dbg_kms(&dev_priv->drm,
"scaler_user index %u.%u: src %ux%u dst %ux%u "
"size is out of scaler range\n",
- intel_crtc->pipe, scaler_user, src_w, src_h,
+ crtc->pipe, scaler_user, src_w, src_h,
dst_w, dst_h);
return -EINVAL;
}
@@ -176,7 +175,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
scaler_state->scaler_users |= (1 << scaler_user);
drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
- intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+ crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
scaler_state->scaler_users);
return 0;
@@ -515,17 +514,17 @@ skl_program_plane_scaler(struct intel_plane *plane,
(crtc_w << 16) | crtc_h);
}
-static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
+static void skl_detach_scaler(struct intel_crtc *crtc, int id)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -535,15 +534,15 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
*/
void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
int i;
/* loop through and disable scalers that aren't in use */
- for (i = 0; i < intel_crtc->num_scalers; i++) {
+ for (i = 0; i < crtc->num_scalers; i++) {
if (!scaler_state->scalers[i].in_use)
- skl_detach_scaler(intel_crtc, i);
+ skl_detach_scaler(crtc, i);
}
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 084c9c43b2ed..0ee4ff341e25 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -780,10 +780,9 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- struct drm_crtc *crtc = pipe_config->uapi.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
enum port port;
u32 val;
bool glk_cold_boot = false;
@@ -1389,7 +1388,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
enum port port;
@@ -1397,7 +1396,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
u32 val, tmp;
u16 mode_hdisplay;
- drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(crtc->pipe));
mode_hdisplay = adjusted_mode->crtc_hdisplay;
@@ -1424,7 +1423,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
intel_de_write(dev_priv, MIPI_CTRL(port),
tmp | READ_REQUEST_PRIORITY_HIGH);
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- enum pipe pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
tmp &= ~BXT_PIPE_SELECT_MASK;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 2fd155742bd2..4f50a508c7a0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -62,10 +62,11 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
unsigned long addr;
- /* mmap ioctl is disallowed for all platforms after TGL-LP. This also
- * covers all platforms with local memory.
+ /*
+ * mmap ioctl is disallowed for all discrete platforms,
+ * and for all platforms with GRAPHICS_VER > 12.
*/
- if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
+ if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12)
return -EOPNOTSUPP;
if (args->flags & ~(I915_MMAP_WC))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index f4fb68e8955a..e382b7f2353b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -62,6 +62,7 @@ static void try_to_writeback(struct drm_i915_gem_object *obj,
switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj);
+ return;
case __I915_MADV_PURGED:
return;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 44b5de06ce64..bc32622a8c5f 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -890,7 +890,7 @@ static int __igt_mmap(struct drm_i915_private *i915,
pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
- area = find_vma(current->mm, addr);
+ area = vma_lookup(current->mm, addr);
if (!area) {
pr_err("%s: Did not create a vm_area_struct for the mmap\n",
obj->mm.region->name);
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 21c8b7350b7a..da4f5eb43ac2 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -303,10 +303,7 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
__i915_gem_object_pin_pages(pt->base);
i915_gem_object_make_unshrinkable(pt->base);
- if (lvl ||
- gen8_pt_count(*start, end) < I915_PDES ||
- intel_vgpu_active(vm->i915))
- fill_px(pt, vm->scratch[lvl]->encode);
+ fill_px(pt, vm->scratch[lvl]->encode);
spin_lock(&pd->lock);
if (likely(!pd->entry[idx])) {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 9ceddfbb1687..7f03df236613 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1279,7 +1279,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return true;
/* Waiting to drain ELSP? */
- synchronize_hardirq(to_pci_dev(engine->i915->drm.dev)->irq);
+ intel_synchronize_hardirq(engine->i915);
intel_engine_flush_submission(engine);
/* ELSP is empty, but there are ready requests? E.g. after reset */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index cac7f3f44642..f8948de72036 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -348,7 +348,7 @@ static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
if (intel_has_pending_fb_unpin(ggtt->vm.i915))
return ERR_PTR(-EAGAIN);
- return ERR_PTR(-EDEADLK);
+ return ERR_PTR(-ENOBUFS);
}
int __i915_vma_pin_fence(struct i915_vma *vma)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 0c423f096e2b..37d74d4ed59b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -184,8 +184,11 @@ static int xcs_resume(struct intel_engine_cs *engine)
ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
ring->head, ring->tail);
- /* Double check the ring is empty & disabled before we resume */
- synchronize_hardirq(engine->i915->drm.irq);
+ /*
+ * Double check the ring is empty & disabled before we resume. Called
+ * from atomic context during PCI probe, so _hardirq().
+ */
+ intel_synchronize_hardirq(engine->i915);
if (!stop_ring(engine))
goto err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index ea2203af0764..1c8108d30b85 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -551,6 +551,32 @@ static int live_pin_rewind(void *arg)
return err;
}
+static int engine_lock_reset_tasklet(struct intel_engine_cs *engine)
+{
+ tasklet_disable(&engine->execlists.tasklet);
+ local_bh_disable();
+
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &engine->gt->reset.flags)) {
+ local_bh_enable();
+ tasklet_enable(&engine->execlists.tasklet);
+
+ intel_gt_set_wedged(engine->gt);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void engine_unlock_reset_tasklet(struct intel_engine_cs *engine)
+{
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
+ &engine->gt->reset.flags);
+
+ local_bh_enable();
+ tasklet_enable(&engine->execlists.tasklet);
+}
+
static int live_hold_reset(void *arg)
{
struct intel_gt *gt = arg;
@@ -598,15 +624,9 @@ static int live_hold_reset(void *arg)
/* We have our request executing, now remove it and reset */
- local_bh_disable();
- if (test_and_set_bit(I915_RESET_ENGINE + id,
- &gt->reset.flags)) {
- local_bh_enable();
- intel_gt_set_wedged(gt);
- err = -EBUSY;
+ err = engine_lock_reset_tasklet(engine);
+ if (err)
goto out;
- }
- tasklet_disable(&engine->execlists.tasklet);
engine->execlists.tasklet.callback(&engine->execlists.tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
@@ -618,10 +638,7 @@ static int live_hold_reset(void *arg)
__intel_engine_reset_bh(engine, NULL);
GEM_BUG_ON(rq->fence.error != -EIO);
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(I915_RESET_ENGINE + id,
- &gt->reset.flags);
- local_bh_enable();
+ engine_unlock_reset_tasklet(engine);
/* Check that we do not resubmit the held request */
if (!i915_request_wait(rq, 0, HZ / 5)) {
@@ -4585,15 +4602,9 @@ static int reset_virtual_engine(struct intel_gt *gt,
GEM_BUG_ON(engine == ve->engine);
/* Take ownership of the reset and tasklet */
- local_bh_disable();
- if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &gt->reset.flags)) {
- local_bh_enable();
- intel_gt_set_wedged(gt);
- err = -EBUSY;
+ err = engine_lock_reset_tasklet(engine);
+ if (err)
goto out_heartbeat;
- }
- tasklet_disable(&engine->execlists.tasklet);
engine->execlists.tasklet.callback(&engine->execlists.tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
@@ -4612,9 +4623,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
GEM_BUG_ON(rq->fence.error != -EIO);
/* Release our grasp on the engine, letting CS flow again */
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
- local_bh_enable();
+ engine_unlock_reset_tasklet(engine);
/* Check that we do not resubmit the held request */
i915_request_get(rq);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 48b4d4cf805d..1ac98f8aba31 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -88,6 +88,7 @@ struct kvmgt_pgfn {
struct hlist_node hnode;
};
+#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
struct kvmgt_guest_info {
struct kvm *kvm;
struct intel_vgpu *vgpu;
@@ -95,7 +96,6 @@ struct kvmgt_guest_info {
#define NR_BKT (1 << 18)
struct hlist_head ptable[NR_BKT];
#undef NR_BKT
- struct dentry *debugfs_cache_entries;
};
struct gvt_dma {
@@ -1947,16 +1947,15 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(kvm, &info->track_node);
- info->debugfs_cache_entries = debugfs_create_ulong(
- "kvmgt_nr_cache_entries",
- 0444, vgpu->debugfs,
- &vdev->nr_cache_entries);
+ debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
+ &vdev->nr_cache_entries);
return 0;
}
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
{
- debugfs_remove(info->debugfs_cache_entries);
+ debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME,
+ info->vgpu->debugfs));
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
kvm_put_kvm(info->kvm);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index cc745751ac53..0529576f069c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -636,7 +636,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
intel_uncore_read16(uncore, C0DRB3_BW));
seq_printf(m, "C1DRB3 = 0x%04x\n",
intel_uncore_read16(uncore, C1DRB3_BW));
- } else if (INTEL_GEN(dev_priv) >= 6) {
+ } else if (GRAPHICS_VER(dev_priv) >= 6) {
seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
intel_uncore_read(uncore, MAD_DIMM_C0));
seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 62327c15f457..30d8cd8c69b1 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -42,7 +42,6 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_ioctl.h>
-#include <drm/drm_irq.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aa109c7e06b5..fd9db6de5113 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -270,8 +270,10 @@ struct drm_i915_display_funcs {
int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
- int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
- int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
+ int (*compute_pipe_wm)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ int (*compute_intermediate_wm)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void (*initial_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void (*atomic_update_watermarks)(struct intel_atomic_state *state,
@@ -346,13 +348,14 @@ struct intel_fbc {
/* This is always the inner lock when overlapping with struct_mutex and
* it's the outer lock when overlapping with stolen_lock. */
struct mutex lock;
- unsigned threshold;
unsigned int possible_framebuffer_bits;
unsigned int busy_bits;
struct intel_crtc *crtc;
struct drm_mm_node compressed_fb;
- struct drm_mm_node *compressed_llb;
+ struct drm_mm_node compressed_llb;
+
+ u8 limit;
bool false_color;
@@ -467,6 +470,7 @@ struct i915_drrs {
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
#define QUIRK_INCREASE_T12_DELAY (1<<6)
#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
+#define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8)
struct intel_fbdev;
struct intel_fbc_work;
@@ -1239,21 +1243,6 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
-/*
- * Deprecated: this will be replaced by individual IP checks:
- * GRAPHICS_VER(), MEDIA_VER() and DISPLAY_VER()
- */
-#define INTEL_GEN(dev_priv) GRAPHICS_VER(dev_priv)
-/*
- * Deprecated: use IS_GRAPHICS_VER(), IS_MEDIA_VER() and IS_DISPLAY_VER() as
- * appropriate.
- */
-#define IS_GEN_RANGE(dev_priv, s, e) IS_GRAPHICS_VER(dev_priv, (s), (e))
-/*
- * Deprecated: use GRAPHICS_VER(), MEDIA_VER() and DISPLAY_VER() as appropriate.
- */
-#define IS_GEN(dev_priv, n) (GRAPHICS_VER(dev_priv) == (n))
-
#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics_ver)
#define IS_GRAPHICS_VER(i915, from, until) \
(GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 987211f21761..1d4c683c9de9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -33,7 +33,6 @@
#include <linux/sysrq.h>
#include <drm/drm_drv.h>
-#include <drm/drm_irq.h>
#include "display/intel_de.h"
#include "display/intel_display_types.h"
@@ -2881,14 +2880,14 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
return true;
}
-int bdw_enable_vblank(struct drm_crtc *crtc)
+int bdw_enable_vblank(struct drm_crtc *_crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
unsigned long irqflags;
- if (gen11_dsi_configure_te(intel_crtc, true))
+ if (gen11_dsi_configure_te(crtc, true))
return 0;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2899,7 +2898,7 @@ int bdw_enable_vblank(struct drm_crtc *crtc)
* PSR is active as no frames are generated, so check only for PSR.
*/
if (HAS_PSR(dev_priv))
- drm_crtc_vblank_restore(crtc);
+ drm_crtc_vblank_restore(&crtc->base);
return 0;
}
@@ -2953,14 +2952,14 @@ void ilk_disable_vblank(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void bdw_disable_vblank(struct drm_crtc *crtc)
+void bdw_disable_vblank(struct drm_crtc *_crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
unsigned long irqflags;
- if (gen11_dsi_configure_te(intel_crtc, false))
+ if (gen11_dsi_configure_te(crtc, false))
return;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -4564,10 +4563,6 @@ void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
{
- /*
- * We only use drm_irq_uninstall() at unload and VT switch, so
- * this is the only thing we need to check.
- */
return dev_priv->runtime_pm.irqs_enabled;
}
@@ -4575,3 +4570,8 @@ void intel_synchronize_irq(struct drm_i915_private *i915)
{
synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
}
+
+void intel_synchronize_hardirq(struct drm_i915_private *i915)
+{
+ synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index db34d5dbe402..e43b6734f21b 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -94,6 +94,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
bool intel_irqs_enabled(struct drm_i915_private *dev_priv);
void intel_synchronize_irq(struct drm_i915_private *i915);
+void intel_synchronize_hardirq(struct drm_i915_private *i915);
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 83b500bb170c..7030e563985c 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -845,7 +845,6 @@ static const struct intel_device_info icl_info = {
static const struct intel_device_info ehl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ELKHARTLAKE),
- .require_force_probe = 1,
.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
.ppgtt_size = 36,
};
@@ -853,7 +852,6 @@ static const struct intel_device_info ehl_info = {
static const struct intel_device_info jsl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_JASPERLAKE),
- .require_force_probe = 1,
.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
.ppgtt_size = 36,
};
@@ -939,15 +937,48 @@ static const struct intel_device_info adl_s_info = {
.dma_mask_size = 46,
};
+#define XE_LPD_CURSOR_OFFSETS \
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ [PIPE_B] = IVB_CURSOR_B_OFFSET, \
+ [PIPE_C] = IVB_CURSOR_C_OFFSET, \
+ [PIPE_D] = TGL_CURSOR_D_OFFSET, \
+ }
+
#define XE_LPD_FEATURES \
- .display.ver = 13, \
- .display.has_psr_hw_tracking = 0, \
- .abox_mask = GENMASK(1, 0), \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
- BIT(TRANSCODER_C) | BIT(TRANSCODER_D), \
- .dbuf.size = 4096, \
- .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4)
+ .abox_mask = GENMASK(1, 0), \
+ .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }, \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D), \
+ .dbuf.size = 4096, \
+ .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \
+ BIT(DBUF_S4), \
+ .display.has_ddi = 1, \
+ .display.has_dmc = 1, \
+ .display.has_dp_mst = 1, \
+ .display.has_dsb = 1, \
+ .display.has_dsc = 1, \
+ .display.has_fbc = 1, \
+ .display.has_fpga_dbg = 1, \
+ .display.has_hdcp = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_ipc = 1, \
+ .display.has_psr = 1, \
+ .display.ver = 13, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = PIPE_C_OFFSET, \
+ [TRANSCODER_D] = PIPE_D_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+ [TRANSCODER_D] = TRANSCODER_D_OFFSET, \
+ }, \
+ XE_LPD_CURSOR_OFFSETS
static const struct intel_device_info adl_p_info = {
GEN12_FEATURES,
@@ -956,6 +987,7 @@ static const struct intel_device_info adl_p_info = {
.has_cdclk_crawl = 1,
.require_force_probe = 1,
.display.has_modular_fia = 1,
+ .display.has_psr_hw_tracking = 0,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
.ppgtt_size = 48,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e915ec034c98..16a19239d86d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4590,19 +4590,22 @@ enum {
#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 (0 << 28)
#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 (1 << 28)
#define EDP_Y_COORDINATE_ENABLE REG_BIT(25) /* display 10, 11 and 12 */
+#define EDP_PSR2_SU_SDP_SCANLINE REG_BIT(25) /* display 13+ */
#define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20)
#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20)
#define EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES 8
#define EDP_PSR2_IO_BUFFER_WAKE(lines) ((EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES - (lines)) << 13)
#define EDP_PSR2_IO_BUFFER_WAKE_MASK (3 << 13)
#define TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES 5
-#define TGL_EDP_PSR2_IO_BUFFER_WAKE(lines) (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << 13)
+#define TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT 13
+#define TGL_EDP_PSR2_IO_BUFFER_WAKE(lines) (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT)
#define TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK (7 << 13)
#define EDP_PSR2_FAST_WAKE_MAX_LINES 8
#define EDP_PSR2_FAST_WAKE(lines) ((EDP_PSR2_FAST_WAKE_MAX_LINES - (lines)) << 11)
#define EDP_PSR2_FAST_WAKE_MASK (3 << 11)
#define TGL_EDP_PSR2_FAST_WAKE_MIN_LINES 5
-#define TGL_EDP_PSR2_FAST_WAKE(lines) (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << 10)
+#define TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT 10
+#define TGL_EDP_PSR2_FAST_WAKE(lines) (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT)
#define TGL_EDP_PSR2_FAST_WAKE_MASK (7 << 10)
#define EDP_PSR2_TP2_TIME_500us (0 << 8)
#define EDP_PSR2_TP2_TIME_100us (1 << 8)
@@ -7751,7 +7754,7 @@ enum {
#define GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED (3 << 0) /* icl + */
/* DMC */
-#define DMC_PROGRAM(i) _MMIO(0x80000 + (i) * 4)
+#define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4)
#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
#define DMC_HTP_ADDR_SKL 0x00500034
#define DMC_SSP_BASE _MMIO(0x8F074)
@@ -8107,6 +8110,7 @@ enum {
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
#define CHICKEN_PAR1_1 _MMIO(0x42080)
+#define IGNORE_KVMR_PIPE_A REG_BIT(23)
#define KBL_ARB_FILL_SPARE_22 REG_BIT(22)
#define DIS_RAM_BYPASS_PSR2_MAN_TRACK (1 << 16)
#define SKL_DE_COMPRESSED_HASH_MODE (1 << 15)
@@ -10365,6 +10369,14 @@ enum skl_power_gate {
#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
/* See DP_MSA_MISC_* for the bit definitions */
+#define _TRANS_A_SET_CONTEXT_LATENCY 0x6007C
+#define _TRANS_B_SET_CONTEXT_LATENCY 0x6107C
+#define _TRANS_C_SET_CONTEXT_LATENCY 0x6207C
+#define _TRANS_D_SET_CONTEXT_LATENCY 0x6307C
+#define TRANS_SET_CONTEXT_LATENCY(tran) _MMIO_TRANS2(tran, _TRANS_A_SET_CONTEXT_LATENCY)
+#define TRANS_SET_CONTEXT_LATENCY_MASK REG_GENMASK(15, 0)
+#define TRANS_SET_CONTEXT_LATENCY_VALUE(x) REG_FIELD_PREP(TRANS_SET_CONTEXT_LATENCY_MASK, (x))
+
/* LCPLL Control */
#define LCPLL_CTL _MMIO(0x130040)
#define LCPLL_PLL_DISABLE (1 << 31)
@@ -10513,7 +10525,6 @@ enum skl_power_gate {
#define _DG1_DPCLKA1_CFGCR0 0x16C280
#define _DG1_DPCLKA_PHY_IDX(phy) ((phy) % 2)
#define _DG1_DPCLKA_PLL_IDX(pll) ((pll) % 2)
-#define _DG1_PHY_DPLL_MAP(phy) ((phy) >= PHY_C ? DPLL_ID_DG1_DPLL2 : DPLL_ID_DG1_DPLL0)
#define DG1_DPCLKA_CFGCR0(phy) _MMIO_PHY((phy) / 2, \
_DG1_DPCLKA_CFGCR0, \
_DG1_DPCLKA1_CFGCR0)
@@ -10521,8 +10532,6 @@ enum skl_power_gate {
#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) (_DG1_DPCLKA_PHY_IDX(phy) * 2)
#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) (_DG1_DPCLKA_PLL_IDX(pll) << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (0x3 << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy) \
- (((clk_sel) >> DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) + _DG1_PHY_DPLL_MAP(phy))
/* ADLS Clocks */
#define _ADLS_DPCLKA_CFGCR0 0x164280
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 50fdea84ba70..879b0f007be3 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -484,8 +484,7 @@ static int gen11_get_dram_info(struct drm_i915_private *i915)
static int gen12_get_dram_info(struct drm_i915_private *i915)
{
- /* Always needed for GEN12+ */
- i915->dram_info.wm_lv_0_adjust_needed = true;
+ i915->dram_info.wm_lv_0_adjust_needed = false;
return icl_pcode_read_mem_global_info(i915);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 45fefa0ed160..74a8863b94c2 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1370,11 +1370,11 @@ static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
return true;
}
-static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
+static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
int num_active_planes = hweight8(crtc_state->active_planes &
~BIT(PLANE_CURSOR));
@@ -1451,20 +1451,21 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
return 0;
}
-static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
+static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
- struct intel_atomic_state *intel_state =
- to_intel_atomic_state(new_crtc_state->uapi.state);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(intel_state, crtc);
const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
enum plane_id plane_id;
- if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
+ if (!new_crtc_state->hw.active ||
+ drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
*intermediate = *optimal;
intermediate->cxsr = false;
@@ -1890,12 +1891,12 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
}
-static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
+static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
@@ -2095,19 +2096,20 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
#undef VLV_FIFO
-static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
+static int vlv_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
- struct intel_atomic_state *intel_state =
- to_intel_atomic_state(new_crtc_state->uapi.state);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(intel_state, crtc);
const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
int level;
- if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
+ if (!new_crtc_state->hw.active ||
+ drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
*intermediate = *optimal;
intermediate->cxsr = false;
@@ -2906,24 +2908,25 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
if (wm[level] == 0) {
for (i = level + 1; i <= max_level; i++)
wm[i] = 0;
+
+ max_level = level - 1;
+
break;
}
}
/*
- * WaWmMemoryReadLatency:skl+,glk
+ * WaWmMemoryReadLatency
*
* punit doesn't take into account the read latency so we need
- * to add 2us to the various latency levels we retrieve from the
- * punit when level 0 response data us 0us.
+ * to add proper adjustement to each valid level we retrieve
+ * from the punit when level 0 response data is 0us.
*/
if (wm[0] == 0) {
- wm[0] += 2;
- for (level = 1; level <= max_level; level++) {
- if (wm[level] == 0)
- break;
- wm[level] += 2;
- }
+ u8 adjust = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2;
+
+ for (level = 0; level <= max_level; level++)
+ wm[level] += adjust;
}
/*
@@ -2934,7 +2937,6 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
*/
if (dev_priv->dram_info.wm_lv_0_adjust_needed)
wm[0] += 1;
-
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
@@ -3144,10 +3146,12 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
}
/* Compute new watermarks for the pipe */
-static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
+static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_pipe_wm *pipe_wm;
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
@@ -3220,16 +3224,16 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
* state and the new state. These can be programmed to the hardware
* immediately.
*/
-static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(newstate->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
- struct intel_atomic_state *intel_state =
- to_intel_atomic_state(newstate->uapi.state);
- const struct intel_crtc_state *oldstate =
- intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
- const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
+static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
+ const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
int level, max_level = ilk_wm_max_level(dev_priv);
/*
@@ -3237,9 +3241,10 @@ static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
* currently active watermarks to get values that are safe both before
* and after the vblank.
*/
- *a = newstate->wm.ilk.optimal;
- if (!newstate->hw.active || drm_atomic_crtc_needs_modeset(&newstate->uapi) ||
- intel_state->skip_intermediate_wm)
+ *a = new_crtc_state->wm.ilk.optimal;
+ if (!new_crtc_state->hw.active ||
+ drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) ||
+ state->skip_intermediate_wm)
return 0;
a->pipe_enabled |= b->pipe_enabled;
@@ -3270,8 +3275,8 @@ static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
* If our intermediate WM are identical to the final WM, then we can
* omit the post-vblank programming; only update if it's different.
*/
- if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
- newstate->wm.need_postvbl_update = true;
+ if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0)
+ new_crtc_state->wm.need_postvbl_update = true;
return 0;
}
@@ -3283,12 +3288,12 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
int level,
struct intel_wm_level *ret_wm)
{
- const struct intel_crtc *intel_crtc;
+ const struct intel_crtc *crtc;
ret_wm->enable = true;
- for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
- const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
const struct intel_wm_level *wm = &active->wm[level];
if (!active->pipe_enabled)
@@ -3388,7 +3393,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
{
- struct intel_crtc *intel_crtc;
+ struct intel_crtc *crtc;
int level, wm_lp;
results->enable_fbc_wm = merged->fbc_wm_enabled;
@@ -3433,9 +3438,9 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
}
/* LP0 register values */
- for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
- enum pipe pipe = intel_crtc->pipe;
- const struct intel_pipe_wm *pipe_wm = &intel_crtc->wm.active.ilk;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ enum pipe pipe = crtc->pipe;
+ const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
const struct intel_wm_level *r = &pipe_wm->wm[0];
if (drm_WARN_ON(&dev_priv->drm, !r->enable))
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 1bed8f666048..7178bc6f8556 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1929,7 +1929,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
return -ENODEV;
}
- if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
+ if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
uncore->flags |= UNCORE_HAS_FORCEWAKE;
if (!intel_uncore_has_forcewake(uncore)) {
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index f64e06e1067d..96ea1a2c11dd 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -137,6 +137,7 @@ static int kmb_hw_init(struct drm_device *drm, unsigned long flags)
/* Allocate LCD interrupt resources */
irq_lcd = platform_get_irq(pdev, 0);
if (irq_lcd < 0) {
+ ret = irq_lcd;
drm_err(&kmb->drm, "irq_lcd not found");
goto setup_fail;
}
diff --git a/drivers/gpu/drm/lima/lima_trace.h b/drivers/gpu/drm/lima/lima_trace.h
index 3a430e93d384..494b9790b1da 100644
--- a/drivers/gpu/drm/lima/lima_trace.h
+++ b/drivers/gpu/drm/lima/lima_trace.h
@@ -24,7 +24,7 @@ DECLARE_EVENT_CLASS(lima_task,
__entry->task_id = task->base.id;
__entry->context = task->base.s_fence->finished.context;
__entry->seqno = task->base.s_fence->finished.seqno;
- __assign_str(pipe, task->base.sched->name)
+ __assign_str(pipe, task->base.sched->name);
),
TP_printk("task=%llu, context=%u seqno=%u pipe=%s",
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 5aade1d13961..34a00d7e9c38 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -577,7 +577,7 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
* porches and sync.
*/
/* (ps/s) / (pixels/s) = ps/pixels */
- pclk = DIV_ROUND_UP_ULL(1000000000000, mode->clock);
+ pclk = DIV_ROUND_UP_ULL(1000000000000, (mode->clock * 1000));
dev_dbg(d->dev, "picoseconds between two pixels: %llu\n",
pclk);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index bea91c81626e..bced555648b0 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -83,6 +83,7 @@ struct mtk_dpi {
struct pinctrl *pinctrl;
struct pinctrl_state *pins_gpio;
struct pinctrl_state *pins_dpi;
+ u32 output_fmt;
int refcount;
};
@@ -122,6 +123,8 @@ struct mtk_dpi_conf {
u32 reg_h_fre_con;
u32 max_clock_khz;
bool edge_sel_en;
+ const u32 *output_fmts;
+ u32 num_output_fmts;
};
static void mtk_dpi_mask(struct mtk_dpi *dpi, u32 offset, u32 val, u32 mask)
@@ -381,6 +384,20 @@ static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
}
}
+static void mtk_dpi_dual_edge(struct mtk_dpi *dpi)
+{
+ if ((dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_LE) ||
+ (dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_BE)) {
+ mtk_dpi_mask(dpi, DPI_DDR_SETTING, DDR_EN | DDR_4PHASE,
+ DDR_EN | DDR_4PHASE);
+ mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING,
+ dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_LE ?
+ EDGE_SEL : 0, EDGE_SEL);
+ } else {
+ mtk_dpi_mask(dpi, DPI_DDR_SETTING, DDR_EN | DDR_4PHASE, 0);
+ }
+}
+
static void mtk_dpi_power_off(struct mtk_dpi *dpi)
{
if (WARN_ON(dpi->refcount == 0))
@@ -455,7 +472,13 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
pll_rate = clk_get_rate(dpi->tvd_clk);
vm.pixelclock = pll_rate / factor;
- clk_set_rate(dpi->pixel_clk, vm.pixelclock);
+ if ((dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_LE) ||
+ (dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_BE))
+ clk_set_rate(dpi->pixel_clk, vm.pixelclock * 2);
+ else
+ clk_set_rate(dpi->pixel_clk, vm.pixelclock);
+
+
vm.pixelclock = clk_get_rate(dpi->pixel_clk);
dev_dbg(dpi->dev, "Got PLL %lu Hz, pixel clock %lu Hz\n",
@@ -519,12 +542,87 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
mtk_dpi_config_yc_map(dpi, dpi->yc_map);
mtk_dpi_config_color_format(dpi, dpi->color_format);
mtk_dpi_config_2n_h_fre(dpi);
+ mtk_dpi_dual_edge(dpi);
mtk_dpi_config_disable_edge(dpi);
mtk_dpi_sw_reset(dpi, false);
return 0;
}
+static u32 *mtk_dpi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ struct mtk_dpi *dpi = bridge_to_dpi(bridge);
+ u32 *output_fmts;
+
+ *num_output_fmts = 0;
+
+ if (!dpi->conf->output_fmts) {
+ dev_err(dpi->dev, "output_fmts should not be null\n");
+ return NULL;
+ }
+
+ output_fmts = kcalloc(dpi->conf->num_output_fmts, sizeof(*output_fmts),
+ GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+
+ *num_output_fmts = dpi->conf->num_output_fmts;
+
+ memcpy(output_fmts, dpi->conf->output_fmts,
+ sizeof(*output_fmts) * dpi->conf->num_output_fmts);
+
+ return output_fmts;
+}
+
+static u32 *mtk_dpi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kcalloc(1, sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ *num_input_fmts = 1;
+ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
+
+ return input_fmts;
+}
+
+static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct mtk_dpi *dpi = bridge->driver_private;
+ unsigned int out_bus_format;
+
+ out_bus_format = bridge_state->output_bus_cfg.format;
+
+ dev_dbg(dpi->dev, "input format 0x%04x, output format 0x%04x\n",
+ bridge_state->input_bus_cfg.format,
+ bridge_state->output_bus_cfg.format);
+
+ dpi->output_fmt = out_bus_format;
+ dpi->bit_num = MTK_DPI_OUT_BIT_NUM_8BITS;
+ dpi->channel_swap = MTK_DPI_OUT_CHANNEL_SWAP_RGB;
+ dpi->yc_map = MTK_DPI_OUT_YC_MAP_RGB;
+ dpi->color_format = MTK_DPI_COLOR_FORMAT_RGB;
+
+ return 0;
+}
+
static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
@@ -577,6 +675,12 @@ static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = {
.mode_valid = mtk_dpi_bridge_mode_valid,
.disable = mtk_dpi_bridge_disable,
.enable = mtk_dpi_bridge_enable,
+ .atomic_check = mtk_dpi_bridge_atomic_check,
+ .atomic_get_output_bus_fmts = mtk_dpi_bridge_atomic_get_output_bus_fmts,
+ .atomic_get_input_bus_fmts = mtk_dpi_bridge_atomic_get_input_bus_fmts,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
};
void mtk_dpi_start(struct device *dev)
@@ -623,11 +727,6 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
}
drm_connector_attach_encoder(dpi->connector, &dpi->encoder);
- dpi->bit_num = MTK_DPI_OUT_BIT_NUM_8BITS;
- dpi->channel_swap = MTK_DPI_OUT_CHANNEL_SWAP_RGB;
- dpi->yc_map = MTK_DPI_OUT_YC_MAP_RGB;
- dpi->color_format = MTK_DPI_COLOR_FORMAT_RGB;
-
return 0;
err_cleanup:
@@ -680,10 +779,21 @@ static unsigned int mt8183_calculate_factor(int clock)
return 2;
}
+static const u32 mt8173_output_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+};
+
+static const u32 mt8183_output_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_2X12_LE,
+ MEDIA_BUS_FMT_RGB888_2X12_BE,
+};
+
static const struct mtk_dpi_conf mt8173_conf = {
.cal_factor = mt8173_calculate_factor,
.reg_h_fre_con = 0xe0,
.max_clock_khz = 300000,
+ .output_fmts = mt8173_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8173_output_fmts),
};
static const struct mtk_dpi_conf mt2701_conf = {
@@ -691,18 +801,24 @@ static const struct mtk_dpi_conf mt2701_conf = {
.reg_h_fre_con = 0xb0,
.edge_sel_en = true,
.max_clock_khz = 150000,
+ .output_fmts = mt8173_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8173_output_fmts),
};
static const struct mtk_dpi_conf mt8183_conf = {
.cal_factor = mt8183_calculate_factor,
.reg_h_fre_con = 0xe0,
.max_clock_khz = 100000,
+ .output_fmts = mt8183_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8183_output_fmts),
};
static const struct mtk_dpi_conf mt8192_conf = {
.cal_factor = mt8183_calculate_factor,
.reg_h_fre_con = 0xe0,
.max_clock_khz = 150000,
+ .output_fmts = mt8173_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8173_output_fmts),
};
static int mtk_dpi_probe(struct platform_device *pdev)
@@ -718,6 +834,7 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dpi->dev = dev;
dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev);
+ dpi->output_fmt = MEDIA_BUS_FMT_RGB888_1X24;
dpi->pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(dpi->pinctrl)) {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 40df2c823187..474efb844249 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -260,7 +260,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
drm_connector_list_iter_end(&conn_iter);
}
- ret = pm_runtime_get_sync(crtc->dev->dev);
+ ret = pm_runtime_resume_and_get(crtc->dev->dev);
if (ret < 0) {
DRM_ERROR("Failed to enable power domain: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index dea46d66e712..c1651a83700d 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -148,6 +148,8 @@ struct hdmi_audio_param {
struct mtk_hdmi_conf {
bool tz_disabled;
+ bool cea_modes_only;
+ unsigned long max_mode_clock;
};
struct mtk_hdmi {
@@ -1222,6 +1224,13 @@ static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_BAD;
}
+ if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode))
+ return MODE_BAD;
+
+ if (hdmi->conf->max_mode_clock &&
+ mode->clock > hdmi->conf->max_mode_clock)
+ return MODE_CLOCK_HIGH;
+
if (mode->clock < 27000)
return MODE_CLOCK_LOW;
if (mode->clock > 297000)
@@ -1778,10 +1787,18 @@ static const struct mtk_hdmi_conf mtk_hdmi_conf_mt2701 = {
.tz_disabled = true,
};
+static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8167 = {
+ .max_mode_clock = 148500,
+ .cea_modes_only = true,
+};
+
static const struct of_device_id mtk_drm_hdmi_of_ids[] = {
{ .compatible = "mediatek,mt2701-hdmi",
.data = &mtk_hdmi_conf_mt2701,
},
+ { .compatible = "mediatek,mt8167-hdmi",
+ .data = &mtk_hdmi_conf_mt8167,
+ },
{ .compatible = "mediatek,mt8173-hdmi",
},
{}
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index dc46339c0678..bc0d60df04ae 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -470,11 +470,12 @@ static int meson_probe_remote(struct platform_device *pdev,
static void meson_drv_shutdown(struct platform_device *pdev)
{
struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
- struct drm_device *drm = priv->drm;
- DRM_DEBUG_DRIVER("\n");
- drm_kms_helper_poll_fini(drm);
- drm_atomic_helper_shutdown(drm);
+ if (!priv)
+ return;
+
+ drm_kms_helper_poll_fini(priv->drm);
+ drm_atomic_helper_shutdown(priv->drm);
}
static int meson_drv_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 610d630326bb..2c00aa70b708 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -58,7 +58,6 @@ msm-y := \
disp/dpu1/dpu_encoder_phys_cmd.o \
disp/dpu1/dpu_encoder_phys_vid.o \
disp/dpu1/dpu_formats.o \
- disp/dpu1/dpu_hw_blk.o \
disp/dpu1/dpu_hw_catalog.o \
disp/dpu1/dpu_hw_ctl.o \
disp/dpu1/dpu_hw_interrupts.o \
@@ -77,6 +76,8 @@ msm-y := \
disp/dpu1/dpu_plane.o \
disp/dpu1/dpu_rm.o \
disp/dpu1/dpu_vbif.o \
+ disp/msm_disp_snapshot.o \
+ disp/msm_disp_snapshot_util.o \
msm_atomic.o \
msm_atomic_tracepoints.o \
msm_debugfs.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 54e1b2aa57d5..4ff529576093 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,21 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14386 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 67699 bytes, from 2021-05-31 20:21:57)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84226 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 112551 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 150713 bytes, from 2021-06-10 22:34:02)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 180049 bytes, from 2021-06-02 21:44:19)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-05-27 20:22:36)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-05-27 20:18:13)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -1258,11 +1258,17 @@ static inline uint32_t A2XX_MH_MMU_VA_RANGE_VA_BASE(uint32_t val)
#define REG_A2XX_NQWAIT_UNTIL 0x00000394
-#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395
+#define REG_A2XX_RBBM_PERFCOUNTER0_SELECT 0x00000395
-#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397
+#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000396
-#define REG_A2XX_RBBM_PERFCOUNTER1_HI 0x00000398
+#define REG_A2XX_RBBM_PERFCOUNTER0_LO 0x00000397
+
+#define REG_A2XX_RBBM_PERFCOUNTER0_HI 0x00000398
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000399
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_HI 0x0000039a
#define REG_A2XX_RBBM_DEBUG 0x0000039b
@@ -2922,10 +2928,28 @@ static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
#define REG_A2XX_RB_PERFCOUNTER0_SELECT 0x00000f04
+#define REG_A2XX_RB_PERFCOUNTER1_SELECT 0x00000f05
+
+#define REG_A2XX_RB_PERFCOUNTER2_SELECT 0x00000f06
+
+#define REG_A2XX_RB_PERFCOUNTER3_SELECT 0x00000f07
+
#define REG_A2XX_RB_PERFCOUNTER0_LOW 0x00000f08
#define REG_A2XX_RB_PERFCOUNTER0_HI 0x00000f09
+#define REG_A2XX_RB_PERFCOUNTER1_LOW 0x00000f0a
+
+#define REG_A2XX_RB_PERFCOUNTER1_HI 0x00000f0b
+
+#define REG_A2XX_RB_PERFCOUNTER2_LOW 0x00000f0c
+
+#define REG_A2XX_RB_PERFCOUNTER2_HI 0x00000f0d
+
+#define REG_A2XX_RB_PERFCOUNTER3_LOW 0x00000f0e
+
+#define REG_A2XX_RB_PERFCOUNTER3_HI 0x00000f0f
+
#define REG_A2XX_SQ_TEX_0 0x00000000
#define A2XX_SQ_TEX_0_TYPE__MASK 0x00000003
#define A2XX_SQ_TEX_0_TYPE__SHIFT 0
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 16f9ef453bf8..e106b65abe26 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,21 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14386 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 67699 bytes, from 2021-05-31 20:21:57)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84226 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 112551 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 150713 bytes, from 2021-06-10 22:34:02)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 180049 bytes, from 2021-06-02 21:44:19)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-05-27 20:22:36)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-05-27 20:18:13)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -1215,7 +1215,7 @@ static inline uint32_t A3XX_RB_ALPHA_REF_UINT(uint32_t val)
#define A3XX_RB_ALPHA_REF_FLOAT__SHIFT 16
static inline uint32_t A3XX_RB_ALPHA_REF_FLOAT(float val)
{
- return ((util_float_to_half(val)) << A3XX_RB_ALPHA_REF_FLOAT__SHIFT) & A3XX_RB_ALPHA_REF_FLOAT__MASK;
+ return ((_mesa_float_to_half(val)) << A3XX_RB_ALPHA_REF_FLOAT__SHIFT) & A3XX_RB_ALPHA_REF_FLOAT__MASK;
}
static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
@@ -1328,7 +1328,7 @@ static inline uint32_t A3XX_RB_BLEND_RED_UINT(uint32_t val)
#define A3XX_RB_BLEND_RED_FLOAT__SHIFT 16
static inline uint32_t A3XX_RB_BLEND_RED_FLOAT(float val)
{
- return ((util_float_to_half(val)) << A3XX_RB_BLEND_RED_FLOAT__SHIFT) & A3XX_RB_BLEND_RED_FLOAT__MASK;
+ return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_RED_FLOAT__SHIFT) & A3XX_RB_BLEND_RED_FLOAT__MASK;
}
#define REG_A3XX_RB_BLEND_GREEN 0x000020e5
@@ -1342,7 +1342,7 @@ static inline uint32_t A3XX_RB_BLEND_GREEN_UINT(uint32_t val)
#define A3XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
static inline uint32_t A3XX_RB_BLEND_GREEN_FLOAT(float val)
{
- return ((util_float_to_half(val)) << A3XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A3XX_RB_BLEND_GREEN_FLOAT__MASK;
+ return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A3XX_RB_BLEND_GREEN_FLOAT__MASK;
}
#define REG_A3XX_RB_BLEND_BLUE 0x000020e6
@@ -1356,7 +1356,7 @@ static inline uint32_t A3XX_RB_BLEND_BLUE_UINT(uint32_t val)
#define A3XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
static inline uint32_t A3XX_RB_BLEND_BLUE_FLOAT(float val)
{
- return ((util_float_to_half(val)) << A3XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A3XX_RB_BLEND_BLUE_FLOAT__MASK;
+ return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A3XX_RB_BLEND_BLUE_FLOAT__MASK;
}
#define REG_A3XX_RB_BLEND_ALPHA 0x000020e7
@@ -1370,7 +1370,7 @@ static inline uint32_t A3XX_RB_BLEND_ALPHA_UINT(uint32_t val)
#define A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val)
{
- return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
+ return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
}
#define REG_A3XX_RB_CLEAR_COLOR_DW0 0x000020e8
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index a7eaf2c83fe2..b26ede96ae2a 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,21 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14386 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 67699 bytes, from 2021-05-31 20:21:57)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84226 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 112551 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 150713 bytes, from 2021-06-10 22:34:02)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 180049 bytes, from 2021-06-02 21:44:19)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-05-27 20:22:36)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-05-27 20:18:13)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -1085,7 +1085,7 @@ static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val)
#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
{
- return ((util_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK;
+ return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK;
}
#define REG_A4XX_RB_BLEND_RED_F32 0x000020f1
@@ -1113,7 +1113,7 @@ static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val)
#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
{
- return ((util_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK;
+ return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK;
}
#define REG_A4XX_RB_BLEND_GREEN_F32 0x000020f3
@@ -1141,7 +1141,7 @@ static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val)
#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
{
- return ((util_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK;
+ return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK;
}
#define REG_A4XX_RB_BLEND_BLUE_F32 0x000020f5
@@ -1169,7 +1169,7 @@ static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val)
#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
{
- return ((util_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK;
+ return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK;
}
#define REG_A4XX_RB_BLEND_ALPHA_F32 0x000020f7
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index 7b9fcfe95c04..1e575ca1f270 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -8,21 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14386 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 67699 bytes, from 2021-05-31 20:21:57)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84226 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 112551 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 150713 bytes, from 2021-06-10 22:34:02)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 180049 bytes, from 2021-06-02 21:44:19)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-05-27 20:22:36)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-05-27 20:18:13)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -2021,6 +2021,7 @@ static inline uint32_t A5XX_RBBM_STATUS_CP_ME_BUSY(uint32_t val)
#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001
#define REG_A5XX_RBBM_STATUS3 0x00000530
+#define A5XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT 0x01000000
#define REG_A5XX_RBBM_INT_0_STATUS 0x000004e1
@@ -2351,6 +2352,7 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7 0x00000e57
#define REG_A5XX_VPC_DBG_ECO_CNTL 0x00000e60
+#define A5XX_VPC_DBG_ECO_CNTL_ALLFLATOPTDIS 0x00000400
#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61
@@ -2808,7 +2810,19 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_GRAS_CL_CNTL 0x0000e000
#define A5XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040
-#define REG_A5XX_UNKNOWN_E001 0x0000e001
+#define REG_A5XX_GRAS_VS_CL_CNTL 0x0000e001
+#define A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A5XX_GRAS_VS_CL_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT) & A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK;
+}
+#define A5XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
+#define A5XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT 8
+static inline uint32_t A5XX_GRAS_VS_CL_CNTL_CULL_MASK(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT) & A5XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK;
+}
#define REG_A5XX_UNKNOWN_E004 0x0000e004
@@ -3345,7 +3359,7 @@ static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val)
#define A5XX_RB_BLEND_RED_FLOAT__SHIFT 16
static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val)
{
- return ((util_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK;
+ return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK;
}
#define REG_A5XX_RB_BLEND_RED_F32 0x0000e1a1
@@ -3373,7 +3387,7 @@ static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val)
#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val)
{
- return ((util_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK;
+ return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK;
}
#define REG_A5XX_RB_BLEND_GREEN_F32 0x0000e1a3
@@ -3401,7 +3415,7 @@ static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val)
#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val)
{
- return ((util_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK;
+ return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK;
}
#define REG_A5XX_RB_BLEND_BLUE_F32 0x0000e1a5
@@ -3429,7 +3443,7 @@ static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val)
#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val)
{
- return ((util_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK;
+ return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK;
}
#define REG_A5XX_RB_BLEND_ALPHA_F32 0x0000e1a7
@@ -3806,7 +3820,25 @@ static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294
#define REG_A5XX_VPC_GS_SIV_CNTL 0x0000e298
-#define REG_A5XX_UNKNOWN_E29A 0x0000e29a
+#define REG_A5XX_VPC_CLIP_CNTL 0x0000e29a
+#define A5XX_VPC_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A5XX_VPC_CLIP_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_MASK__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_MASK__MASK;
+}
+#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
+#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
+static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
+}
+#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
+#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
+static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
+}
#define REG_A5XX_VPC_PACK 0x0000e29d
#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK 0x000000ff
@@ -3910,7 +3942,13 @@ static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su
}
#define A5XX_PC_RASTER_CNTL_POLYMODE_ENABLE 0x00000040
-#define REG_A5XX_UNKNOWN_E389 0x0000e389
+#define REG_A5XX_PC_CLIP_CNTL 0x0000e389
+#define A5XX_PC_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A5XX_PC_CLIP_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A5XX_PC_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A5XX_PC_CLIP_CNTL_CLIP_MASK__SHIFT) & A5XX_PC_CLIP_CNTL_CLIP_MASK__MASK;
+}
#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c
@@ -4302,7 +4340,12 @@ static inline uint32_t A5XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4
#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
-#define A5XX_SP_BLEND_CNTL_ENABLED 0x00000001
+#define A5XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A5XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A5XX_SP_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A5XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
#define A5XX_SP_BLEND_CNTL_UNK8 0x00000100
#define A5XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
@@ -5192,8 +5235,8 @@ static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val)
}
#define REG_A5XX_TEX_SAMP_2 0x00000002
-#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK 0xfffffff0
-#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT 4
+#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK 0xffffff80
+#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT 7
static inline uint32_t A5XX_TEX_SAMP_2_BCOLOR_OFFSET(uint32_t val)
{
return ((val) << A5XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A5XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK;
@@ -5273,6 +5316,7 @@ static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val)
}
#define REG_A5XX_TEX_CONST_2 0x00000002
+#define A5XX_TEX_CONST_2_UNK4 0x00000010
#define A5XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f
#define A5XX_TEX_CONST_2_PITCHALIGN__SHIFT 0
static inline uint32_t A5XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
@@ -5291,6 +5335,7 @@ static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val)
{
return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK;
}
+#define A5XX_TEX_CONST_2_UNK31 0x80000000
#define REG_A5XX_TEX_CONST_3 0x00000003
#define A5XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index ce13d49e615b..7a271de9a212 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -902,7 +902,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (!a5xx_gpu->shadow_bo) {
a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
sizeof(u32) * gpu->nr_rings,
- MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
+ MSM_BO_WC | MSM_BO_MAP_PRIV,
gpu->aspace, &a5xx_gpu->shadow_bo,
&a5xx_gpu->shadow_iova);
@@ -1075,7 +1075,7 @@ bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return true;
}
-static int a5xx_fault_handler(void *arg, unsigned long iova, int flags)
+static int a5xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
{
struct msm_gpu *gpu = arg;
pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
@@ -1085,7 +1085,7 @@ static int a5xx_fault_handler(void *arg, unsigned long iova, int flags)
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
- return -EFAULT;
+ return 0;
}
static void a5xx_cp_err_irq(struct msm_gpu *gpu)
@@ -1200,6 +1200,15 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
struct drm_device *dev = gpu->dev;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+ /*
+ * If stalled on SMMU fault, we could trip the GPU's hang detection,
+ * but the fault handler will trigger the devcore dump, and we want
+ * to otherwise resume normally rather than killing the submit, so
+ * just bail.
+ */
+ if (gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24))
+ return;
+
DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->seqno : 0,
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
@@ -1407,7 +1416,7 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu,
struct a5xx_crashdumper *dumper)
{
dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
- SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
+ SZ_1M, MSM_BO_WC, gpu->aspace,
&dumper->bo, &dumper->iova);
if (!IS_ERR(dumper->ptr))
@@ -1523,6 +1532,7 @@ static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
{
struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state),
GFP_KERNEL);
+ bool stalled = !!(gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24));
if (!a5xx_state)
return ERR_PTR(-ENOMEM);
@@ -1535,8 +1545,13 @@ static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
- /* Get the HLSQ regs with the help of the crashdumper */
- a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
+ /*
+ * Get the HLSQ regs with the help of the crashdumper, but only if
+ * we are not stalled in an iommu fault (in which case the crashdumper
+ * would not have access to memory)
+ */
+ if (!stalled)
+ a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
a5xx_set_hwcg(gpu, true);
@@ -1705,7 +1720,7 @@ static void check_speed_bin(struct device *dev)
nvmem_cell_put(cell);
}
- dev_pm_opp_set_supported_hw(dev, &val, 1);
+ devm_pm_opp_set_supported_hw(dev, &val, 1);
}
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index c35b06b46fcc..cdb165236a88 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -363,7 +363,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
ptr = msm_gem_kernel_new_locked(drm, bosize,
- MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
+ MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace,
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
if (IS_ERR(ptr))
return;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 42eaef7ad7c7..ee72510ff8ce 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -230,7 +230,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
- MSM_BO_UNCACHED | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
+ MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -238,7 +238,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
/* The buffer to store counters needs to be unprivileged */
counters = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_COUNTER_SIZE,
- MSM_BO_UNCACHED, gpu->aspace, &counters_bo, &counters_iova);
+ MSM_BO_WC, gpu->aspace, &counters_bo, &counters_iova);
if (IS_ERR(counters)) {
msm_gem_kernel_put(bo, gpu->aspace, true);
return PTR_ERR(counters);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index 920c5e6b8e96..a3cb3d988ba4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -8,21 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14386 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 67699 bytes, from 2021-05-31 20:21:57)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84226 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 112551 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 150713 bytes, from 2021-06-10 22:34:02)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 180049 bytes, from 2021-06-02 21:44:19)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-05-27 20:22:36)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-05-27 20:18:13)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -168,7 +168,7 @@ enum a6xx_format {
FMT6_ASTC_10x10 = 204,
FMT6_ASTC_12x10 = 205,
FMT6_ASTC_12x12 = 206,
- FMT6_S8Z24_UINT = 234,
+ FMT6_Z24_UINT_S8_UINT = 234,
FMT6_NONE = 255,
};
@@ -907,6 +907,11 @@ enum a6xx_tess_output {
TESS_CCW_TRIS = 3,
};
+enum a6xx_threadsize {
+ THREAD64 = 0,
+ THREAD128 = 1,
+};
+
enum a6xx_tex_filter {
A6XX_TEX_NEAREST = 0,
A6XX_TEX_LINEAR = 1,
@@ -1007,9 +1012,7 @@ enum a6xx_tex_type {
#define REG_A6XX_CP_PROTECT_STATUS 0x00000824
-#define REG_A6XX_CP_SQE_INSTR_BASE_LO 0x00000830
-
-#define REG_A6XX_CP_SQE_INSTR_BASE_HI 0x00000831
+#define REG_A6XX_CP_SQE_INSTR_BASE 0x00000830
#define REG_A6XX_CP_MISC_CNTL 0x00000840
@@ -1104,33 +1107,7 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI 0x000008a8
-#define REG_A6XX_CP_PERFCTR_CP_SEL_0 0x000008d0
-
-#define REG_A6XX_CP_PERFCTR_CP_SEL_1 0x000008d1
-
-#define REG_A6XX_CP_PERFCTR_CP_SEL_2 0x000008d2
-
-#define REG_A6XX_CP_PERFCTR_CP_SEL_3 0x000008d3
-
-#define REG_A6XX_CP_PERFCTR_CP_SEL_4 0x000008d4
-
-#define REG_A6XX_CP_PERFCTR_CP_SEL_5 0x000008d5
-
-#define REG_A6XX_CP_PERFCTR_CP_SEL_6 0x000008d6
-
-#define REG_A6XX_CP_PERFCTR_CP_SEL_7 0x000008d7
-
-#define REG_A6XX_CP_PERFCTR_CP_SEL_8 0x000008d8
-
-#define REG_A6XX_CP_PERFCTR_CP_SEL_9 0x000008d9
-
-#define REG_A6XX_CP_PERFCTR_CP_SEL_10 0x000008da
-
-#define REG_A6XX_CP_PERFCTR_CP_SEL_11 0x000008db
-
-#define REG_A6XX_CP_PERFCTR_CP_SEL_12 0x000008dc
-
-#define REG_A6XX_CP_PERFCTR_CP_SEL_13 0x000008dd
+static inline uint32_t REG_A6XX_CP_PERFCTR_CP_SEL(uint32_t i0) { return 0x000008d0 + 0x1*i0; }
#define REG_A6XX_CP_CRASH_SCRIPT_BASE_LO 0x00000900
@@ -1176,15 +1153,21 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
#define REG_A6XX_CP_SDS_BASE_HI 0x0000092f
-#define REG_A6XX_CP_SDS_REM_SIZE 0x0000092e
+#define REG_A6XX_CP_SDS_REM_SIZE 0x00000930
+
+#define REG_A6XX_CP_MRB_BASE 0x00000931
-#define REG_A6XX_CP_BIN_SIZE_ADDRESS 0x00000931
+#define REG_A6XX_CP_MRB_BASE_HI 0x00000932
-#define REG_A6XX_CP_BIN_SIZE_ADDRESS_HI 0x00000932
+#define REG_A6XX_CP_MRB_REM_SIZE 0x00000933
-#define REG_A6XX_CP_BIN_DATA_ADDR 0x00000934
+#define REG_A6XX_CP_VSD_BASE 0x00000934
-#define REG_A6XX_CP_BIN_DATA_ADDR_HI 0x00000935
+#define REG_A6XX_CP_VSD_BASE_HI 0x00000935
+
+#define REG_A6XX_CP_MRB_DWORDS 0x00000946
+
+#define REG_A6XX_CP_VSD_DWORDS 0x00000947
#define REG_A6XX_CP_CSQ_IB1_STAT 0x00000949
#define A6XX_CP_CSQ_IB1_STAT_REM__MASK 0xffff0000
@@ -1202,6 +1185,14 @@ static inline uint32_t A6XX_CP_CSQ_IB2_STAT_REM(uint32_t val)
return ((val) << A6XX_CP_CSQ_IB2_STAT_REM__SHIFT) & A6XX_CP_CSQ_IB2_STAT_REM__MASK;
}
+#define REG_A6XX_CP_MRQ_MRB_STAT 0x0000094c
+#define A6XX_CP_MRQ_MRB_STAT_REM__MASK 0xffff0000
+#define A6XX_CP_MRQ_MRB_STAT_REM__SHIFT 16
+static inline uint32_t A6XX_CP_MRQ_MRB_STAT_REM(uint32_t val)
+{
+ return ((val) << A6XX_CP_MRQ_MRB_STAT_REM__SHIFT) & A6XX_CP_MRQ_MRB_STAT_REM__MASK;
+}
+
#define REG_A6XX_CP_ALWAYS_ON_COUNTER_LO 0x00000980
#define REG_A6XX_CP_ALWAYS_ON_COUNTER_HI 0x00000981
@@ -1212,6 +1203,10 @@ static inline uint32_t A6XX_CP_CSQ_IB2_STAT_REM(uint32_t val)
#define REG_A6XX_CP_APERTURE_CNTL_CD 0x00000a03
+#define REG_A6XX_CP_LPAC_PROG_FIFO_SIZE 0x00000b34
+
+#define REG_A6XX_CP_LPAC_SQE_INSTR_BASE 0x00000b82
+
#define REG_A6XX_VSC_ADDR_MODE_CNTL 0x00000c01
#define REG_A6XX_RBBM_INT_0_STATUS 0x00000201
@@ -1247,505 +1242,37 @@ static inline uint32_t A6XX_CP_CSQ_IB2_STAT_REM(uint32_t val)
#define REG_A6XX_RBBM_VBIF_GX_RESET_STATUS 0x00000215
-#define REG_A6XX_RBBM_PERFCTR_CP_0_LO 0x00000400
-
-#define REG_A6XX_RBBM_PERFCTR_CP_0_HI 0x00000401
-
-#define REG_A6XX_RBBM_PERFCTR_CP_1_LO 0x00000402
-
-#define REG_A6XX_RBBM_PERFCTR_CP_1_HI 0x00000403
-
-#define REG_A6XX_RBBM_PERFCTR_CP_2_LO 0x00000404
-
-#define REG_A6XX_RBBM_PERFCTR_CP_2_HI 0x00000405
-
-#define REG_A6XX_RBBM_PERFCTR_CP_3_LO 0x00000406
-
-#define REG_A6XX_RBBM_PERFCTR_CP_3_HI 0x00000407
-
-#define REG_A6XX_RBBM_PERFCTR_CP_4_LO 0x00000408
-
-#define REG_A6XX_RBBM_PERFCTR_CP_4_HI 0x00000409
-
-#define REG_A6XX_RBBM_PERFCTR_CP_5_LO 0x0000040a
-
-#define REG_A6XX_RBBM_PERFCTR_CP_5_HI 0x0000040b
-
-#define REG_A6XX_RBBM_PERFCTR_CP_6_LO 0x0000040c
-
-#define REG_A6XX_RBBM_PERFCTR_CP_6_HI 0x0000040d
-
-#define REG_A6XX_RBBM_PERFCTR_CP_7_LO 0x0000040e
-
-#define REG_A6XX_RBBM_PERFCTR_CP_7_HI 0x0000040f
-
-#define REG_A6XX_RBBM_PERFCTR_CP_8_LO 0x00000410
-
-#define REG_A6XX_RBBM_PERFCTR_CP_8_HI 0x00000411
-
-#define REG_A6XX_RBBM_PERFCTR_CP_9_LO 0x00000412
-
-#define REG_A6XX_RBBM_PERFCTR_CP_9_HI 0x00000413
-
-#define REG_A6XX_RBBM_PERFCTR_CP_10_LO 0x00000414
-
-#define REG_A6XX_RBBM_PERFCTR_CP_10_HI 0x00000415
-
-#define REG_A6XX_RBBM_PERFCTR_CP_11_LO 0x00000416
-
-#define REG_A6XX_RBBM_PERFCTR_CP_11_HI 0x00000417
-
-#define REG_A6XX_RBBM_PERFCTR_CP_12_LO 0x00000418
-
-#define REG_A6XX_RBBM_PERFCTR_CP_12_HI 0x00000419
-
-#define REG_A6XX_RBBM_PERFCTR_CP_13_LO 0x0000041a
-
-#define REG_A6XX_RBBM_PERFCTR_CP_13_HI 0x0000041b
-
-#define REG_A6XX_RBBM_PERFCTR_RBBM_0_LO 0x0000041c
-
-#define REG_A6XX_RBBM_PERFCTR_RBBM_0_HI 0x0000041d
-
-#define REG_A6XX_RBBM_PERFCTR_RBBM_1_LO 0x0000041e
-
-#define REG_A6XX_RBBM_PERFCTR_RBBM_1_HI 0x0000041f
-
-#define REG_A6XX_RBBM_PERFCTR_RBBM_2_LO 0x00000420
-
-#define REG_A6XX_RBBM_PERFCTR_RBBM_2_HI 0x00000421
-
-#define REG_A6XX_RBBM_PERFCTR_RBBM_3_LO 0x00000422
-
-#define REG_A6XX_RBBM_PERFCTR_RBBM_3_HI 0x00000423
-
-#define REG_A6XX_RBBM_PERFCTR_PC_0_LO 0x00000424
-
-#define REG_A6XX_RBBM_PERFCTR_PC_0_HI 0x00000425
-
-#define REG_A6XX_RBBM_PERFCTR_PC_1_LO 0x00000426
-
-#define REG_A6XX_RBBM_PERFCTR_PC_1_HI 0x00000427
-
-#define REG_A6XX_RBBM_PERFCTR_PC_2_LO 0x00000428
-
-#define REG_A6XX_RBBM_PERFCTR_PC_2_HI 0x00000429
-
-#define REG_A6XX_RBBM_PERFCTR_PC_3_LO 0x0000042a
-
-#define REG_A6XX_RBBM_PERFCTR_PC_3_HI 0x0000042b
-
-#define REG_A6XX_RBBM_PERFCTR_PC_4_LO 0x0000042c
-
-#define REG_A6XX_RBBM_PERFCTR_PC_4_HI 0x0000042d
-
-#define REG_A6XX_RBBM_PERFCTR_PC_5_LO 0x0000042e
-
-#define REG_A6XX_RBBM_PERFCTR_PC_5_HI 0x0000042f
-
-#define REG_A6XX_RBBM_PERFCTR_PC_6_LO 0x00000430
-
-#define REG_A6XX_RBBM_PERFCTR_PC_6_HI 0x00000431
-
-#define REG_A6XX_RBBM_PERFCTR_PC_7_LO 0x00000432
-
-#define REG_A6XX_RBBM_PERFCTR_PC_7_HI 0x00000433
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_0_LO 0x00000434
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_0_HI 0x00000435
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_1_LO 0x00000436
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_1_HI 0x00000437
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_2_LO 0x00000438
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_2_HI 0x00000439
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_3_LO 0x0000043a
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_3_HI 0x0000043b
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_4_LO 0x0000043c
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_4_HI 0x0000043d
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_5_LO 0x0000043e
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_5_HI 0x0000043f
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_6_LO 0x00000440
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_6_HI 0x00000441
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_7_LO 0x00000442
-
-#define REG_A6XX_RBBM_PERFCTR_VFD_7_HI 0x00000443
-
-#define REG_A6XX_RBBM_PERFCTR_HLSQ_0_LO 0x00000444
-
-#define REG_A6XX_RBBM_PERFCTR_HLSQ_0_HI 0x00000445
-
-#define REG_A6XX_RBBM_PERFCTR_HLSQ_1_LO 0x00000446
-
-#define REG_A6XX_RBBM_PERFCTR_HLSQ_1_HI 0x00000447
-
-#define REG_A6XX_RBBM_PERFCTR_HLSQ_2_LO 0x00000448
-
-#define REG_A6XX_RBBM_PERFCTR_HLSQ_2_HI 0x00000449
-
-#define REG_A6XX_RBBM_PERFCTR_HLSQ_3_LO 0x0000044a
-
-#define REG_A6XX_RBBM_PERFCTR_HLSQ_3_HI 0x0000044b
-
-#define REG_A6XX_RBBM_PERFCTR_HLSQ_4_LO 0x0000044c
-
-#define REG_A6XX_RBBM_PERFCTR_HLSQ_4_HI 0x0000044d
-
-#define REG_A6XX_RBBM_PERFCTR_HLSQ_5_LO 0x0000044e
-
-#define REG_A6XX_RBBM_PERFCTR_HLSQ_5_HI 0x0000044f
-
-#define REG_A6XX_RBBM_PERFCTR_VPC_0_LO 0x00000450
-
-#define REG_A6XX_RBBM_PERFCTR_VPC_0_HI 0x00000451
-
-#define REG_A6XX_RBBM_PERFCTR_VPC_1_LO 0x00000452
-
-#define REG_A6XX_RBBM_PERFCTR_VPC_1_HI 0x00000453
-
-#define REG_A6XX_RBBM_PERFCTR_VPC_2_LO 0x00000454
-
-#define REG_A6XX_RBBM_PERFCTR_VPC_2_HI 0x00000455
-
-#define REG_A6XX_RBBM_PERFCTR_VPC_3_LO 0x00000456
-
-#define REG_A6XX_RBBM_PERFCTR_VPC_3_HI 0x00000457
-
-#define REG_A6XX_RBBM_PERFCTR_VPC_4_LO 0x00000458
-
-#define REG_A6XX_RBBM_PERFCTR_VPC_4_HI 0x00000459
-
-#define REG_A6XX_RBBM_PERFCTR_VPC_5_LO 0x0000045a
-
-#define REG_A6XX_RBBM_PERFCTR_VPC_5_HI 0x0000045b
-
-#define REG_A6XX_RBBM_PERFCTR_CCU_0_LO 0x0000045c
-
-#define REG_A6XX_RBBM_PERFCTR_CCU_0_HI 0x0000045d
-
-#define REG_A6XX_RBBM_PERFCTR_CCU_1_LO 0x0000045e
-
-#define REG_A6XX_RBBM_PERFCTR_CCU_1_HI 0x0000045f
-
-#define REG_A6XX_RBBM_PERFCTR_CCU_2_LO 0x00000460
-
-#define REG_A6XX_RBBM_PERFCTR_CCU_2_HI 0x00000461
-
-#define REG_A6XX_RBBM_PERFCTR_CCU_3_LO 0x00000462
-
-#define REG_A6XX_RBBM_PERFCTR_CCU_3_HI 0x00000463
-
-#define REG_A6XX_RBBM_PERFCTR_CCU_4_LO 0x00000464
-
-#define REG_A6XX_RBBM_PERFCTR_CCU_4_HI 0x00000465
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_0_LO 0x00000466
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_0_HI 0x00000467
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_1_LO 0x00000468
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_1_HI 0x00000469
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_2_LO 0x0000046a
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_2_HI 0x0000046b
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_3_LO 0x0000046c
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_3_HI 0x0000046d
-
-#define REG_A6XX_RBBM_PERFCTR_RAS_0_LO 0x0000046e
-
-#define REG_A6XX_RBBM_PERFCTR_RAS_0_HI 0x0000046f
-
-#define REG_A6XX_RBBM_PERFCTR_RAS_1_LO 0x00000470
-
-#define REG_A6XX_RBBM_PERFCTR_RAS_1_HI 0x00000471
-
-#define REG_A6XX_RBBM_PERFCTR_RAS_2_LO 0x00000472
-
-#define REG_A6XX_RBBM_PERFCTR_RAS_2_HI 0x00000473
-
-#define REG_A6XX_RBBM_PERFCTR_RAS_3_LO 0x00000474
-
-#define REG_A6XX_RBBM_PERFCTR_RAS_3_HI 0x00000475
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_0_LO 0x00000476
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_0_HI 0x00000477
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_1_LO 0x00000478
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_1_HI 0x00000479
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_2_LO 0x0000047a
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_2_HI 0x0000047b
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_3_LO 0x0000047c
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_3_HI 0x0000047d
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_4_LO 0x0000047e
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_4_HI 0x0000047f
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_5_LO 0x00000480
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_5_HI 0x00000481
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_6_LO 0x00000482
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_6_HI 0x00000483
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_7_LO 0x00000484
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_7_HI 0x00000485
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_8_LO 0x00000486
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_8_HI 0x00000487
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_9_LO 0x00000488
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_9_HI 0x00000489
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_10_LO 0x0000048a
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_10_HI 0x0000048b
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_11_LO 0x0000048c
-
-#define REG_A6XX_RBBM_PERFCTR_UCHE_11_HI 0x0000048d
-
-#define REG_A6XX_RBBM_PERFCTR_TP_0_LO 0x0000048e
-
-#define REG_A6XX_RBBM_PERFCTR_TP_0_HI 0x0000048f
-
-#define REG_A6XX_RBBM_PERFCTR_TP_1_LO 0x00000490
-
-#define REG_A6XX_RBBM_PERFCTR_TP_1_HI 0x00000491
-
-#define REG_A6XX_RBBM_PERFCTR_TP_2_LO 0x00000492
-
-#define REG_A6XX_RBBM_PERFCTR_TP_2_HI 0x00000493
-
-#define REG_A6XX_RBBM_PERFCTR_TP_3_LO 0x00000494
-
-#define REG_A6XX_RBBM_PERFCTR_TP_3_HI 0x00000495
-
-#define REG_A6XX_RBBM_PERFCTR_TP_4_LO 0x00000496
-
-#define REG_A6XX_RBBM_PERFCTR_TP_4_HI 0x00000497
-
-#define REG_A6XX_RBBM_PERFCTR_TP_5_LO 0x00000498
-
-#define REG_A6XX_RBBM_PERFCTR_TP_5_HI 0x00000499
-
-#define REG_A6XX_RBBM_PERFCTR_TP_6_LO 0x0000049a
-
-#define REG_A6XX_RBBM_PERFCTR_TP_6_HI 0x0000049b
-
-#define REG_A6XX_RBBM_PERFCTR_TP_7_LO 0x0000049c
-
-#define REG_A6XX_RBBM_PERFCTR_TP_7_HI 0x0000049d
-
-#define REG_A6XX_RBBM_PERFCTR_TP_8_LO 0x0000049e
-
-#define REG_A6XX_RBBM_PERFCTR_TP_8_HI 0x0000049f
-
-#define REG_A6XX_RBBM_PERFCTR_TP_9_LO 0x000004a0
-
-#define REG_A6XX_RBBM_PERFCTR_TP_9_HI 0x000004a1
-
-#define REG_A6XX_RBBM_PERFCTR_TP_10_LO 0x000004a2
-
-#define REG_A6XX_RBBM_PERFCTR_TP_10_HI 0x000004a3
-
-#define REG_A6XX_RBBM_PERFCTR_TP_11_LO 0x000004a4
-
-#define REG_A6XX_RBBM_PERFCTR_TP_11_HI 0x000004a5
-
-#define REG_A6XX_RBBM_PERFCTR_SP_0_LO 0x000004a6
-
-#define REG_A6XX_RBBM_PERFCTR_SP_0_HI 0x000004a7
-
-#define REG_A6XX_RBBM_PERFCTR_SP_1_LO 0x000004a8
-
-#define REG_A6XX_RBBM_PERFCTR_SP_1_HI 0x000004a9
-
-#define REG_A6XX_RBBM_PERFCTR_SP_2_LO 0x000004aa
-
-#define REG_A6XX_RBBM_PERFCTR_SP_2_HI 0x000004ab
-
-#define REG_A6XX_RBBM_PERFCTR_SP_3_LO 0x000004ac
-
-#define REG_A6XX_RBBM_PERFCTR_SP_3_HI 0x000004ad
-
-#define REG_A6XX_RBBM_PERFCTR_SP_4_LO 0x000004ae
-
-#define REG_A6XX_RBBM_PERFCTR_SP_4_HI 0x000004af
-
-#define REG_A6XX_RBBM_PERFCTR_SP_5_LO 0x000004b0
-
-#define REG_A6XX_RBBM_PERFCTR_SP_5_HI 0x000004b1
-
-#define REG_A6XX_RBBM_PERFCTR_SP_6_LO 0x000004b2
-
-#define REG_A6XX_RBBM_PERFCTR_SP_6_HI 0x000004b3
-
-#define REG_A6XX_RBBM_PERFCTR_SP_7_LO 0x000004b4
-
-#define REG_A6XX_RBBM_PERFCTR_SP_7_HI 0x000004b5
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_CP(uint32_t i0) { return 0x00000400 + 0x2*i0; }
-#define REG_A6XX_RBBM_PERFCTR_SP_8_LO 0x000004b6
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM(uint32_t i0) { return 0x0000041c + 0x2*i0; }
-#define REG_A6XX_RBBM_PERFCTR_SP_8_HI 0x000004b7
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_PC(uint32_t i0) { return 0x00000424 + 0x2*i0; }
-#define REG_A6XX_RBBM_PERFCTR_SP_9_LO 0x000004b8
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_VFD(uint32_t i0) { return 0x00000434 + 0x2*i0; }
-#define REG_A6XX_RBBM_PERFCTR_SP_9_HI 0x000004b9
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_HLSQ(uint32_t i0) { return 0x00000444 + 0x2*i0; }
-#define REG_A6XX_RBBM_PERFCTR_SP_10_LO 0x000004ba
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_VPC(uint32_t i0) { return 0x00000450 + 0x2*i0; }
-#define REG_A6XX_RBBM_PERFCTR_SP_10_HI 0x000004bb
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_CCU(uint32_t i0) { return 0x0000045c + 0x2*i0; }
-#define REG_A6XX_RBBM_PERFCTR_SP_11_LO 0x000004bc
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_TSE(uint32_t i0) { return 0x00000466 + 0x2*i0; }
-#define REG_A6XX_RBBM_PERFCTR_SP_11_HI 0x000004bd
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_RAS(uint32_t i0) { return 0x0000046e + 0x2*i0; }
-#define REG_A6XX_RBBM_PERFCTR_SP_12_LO 0x000004be
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_UCHE(uint32_t i0) { return 0x00000476 + 0x2*i0; }
-#define REG_A6XX_RBBM_PERFCTR_SP_12_HI 0x000004bf
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_TP(uint32_t i0) { return 0x0000048e + 0x2*i0; }
-#define REG_A6XX_RBBM_PERFCTR_SP_13_LO 0x000004c0
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_SP(uint32_t i0) { return 0x000004a6 + 0x2*i0; }
-#define REG_A6XX_RBBM_PERFCTR_SP_13_HI 0x000004c1
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_RB(uint32_t i0) { return 0x000004d6 + 0x2*i0; }
-#define REG_A6XX_RBBM_PERFCTR_SP_14_LO 0x000004c2
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_VSC(uint32_t i0) { return 0x000004e6 + 0x2*i0; }
-#define REG_A6XX_RBBM_PERFCTR_SP_14_HI 0x000004c3
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_LRZ(uint32_t i0) { return 0x000004ea + 0x2*i0; }
-#define REG_A6XX_RBBM_PERFCTR_SP_15_LO 0x000004c4
-
-#define REG_A6XX_RBBM_PERFCTR_SP_15_HI 0x000004c5
-
-#define REG_A6XX_RBBM_PERFCTR_SP_16_LO 0x000004c6
-
-#define REG_A6XX_RBBM_PERFCTR_SP_16_HI 0x000004c7
-
-#define REG_A6XX_RBBM_PERFCTR_SP_17_LO 0x000004c8
-
-#define REG_A6XX_RBBM_PERFCTR_SP_17_HI 0x000004c9
-
-#define REG_A6XX_RBBM_PERFCTR_SP_18_LO 0x000004ca
-
-#define REG_A6XX_RBBM_PERFCTR_SP_18_HI 0x000004cb
-
-#define REG_A6XX_RBBM_PERFCTR_SP_19_LO 0x000004cc
-
-#define REG_A6XX_RBBM_PERFCTR_SP_19_HI 0x000004cd
-
-#define REG_A6XX_RBBM_PERFCTR_SP_20_LO 0x000004ce
-
-#define REG_A6XX_RBBM_PERFCTR_SP_20_HI 0x000004cf
-
-#define REG_A6XX_RBBM_PERFCTR_SP_21_LO 0x000004d0
-
-#define REG_A6XX_RBBM_PERFCTR_SP_21_HI 0x000004d1
-
-#define REG_A6XX_RBBM_PERFCTR_SP_22_LO 0x000004d2
-
-#define REG_A6XX_RBBM_PERFCTR_SP_22_HI 0x000004d3
-
-#define REG_A6XX_RBBM_PERFCTR_SP_23_LO 0x000004d4
-
-#define REG_A6XX_RBBM_PERFCTR_SP_23_HI 0x000004d5
-
-#define REG_A6XX_RBBM_PERFCTR_RB_0_LO 0x000004d6
-
-#define REG_A6XX_RBBM_PERFCTR_RB_0_HI 0x000004d7
-
-#define REG_A6XX_RBBM_PERFCTR_RB_1_LO 0x000004d8
-
-#define REG_A6XX_RBBM_PERFCTR_RB_1_HI 0x000004d9
-
-#define REG_A6XX_RBBM_PERFCTR_RB_2_LO 0x000004da
-
-#define REG_A6XX_RBBM_PERFCTR_RB_2_HI 0x000004db
-
-#define REG_A6XX_RBBM_PERFCTR_RB_3_LO 0x000004dc
-
-#define REG_A6XX_RBBM_PERFCTR_RB_3_HI 0x000004dd
-
-#define REG_A6XX_RBBM_PERFCTR_RB_4_LO 0x000004de
-
-#define REG_A6XX_RBBM_PERFCTR_RB_4_HI 0x000004df
-
-#define REG_A6XX_RBBM_PERFCTR_RB_5_LO 0x000004e0
-
-#define REG_A6XX_RBBM_PERFCTR_RB_5_HI 0x000004e1
-
-#define REG_A6XX_RBBM_PERFCTR_RB_6_LO 0x000004e2
-
-#define REG_A6XX_RBBM_PERFCTR_RB_6_HI 0x000004e3
-
-#define REG_A6XX_RBBM_PERFCTR_RB_7_LO 0x000004e4
-
-#define REG_A6XX_RBBM_PERFCTR_RB_7_HI 0x000004e5
-
-#define REG_A6XX_RBBM_PERFCTR_VSC_0_LO 0x000004e6
-
-#define REG_A6XX_RBBM_PERFCTR_VSC_0_HI 0x000004e7
-
-#define REG_A6XX_RBBM_PERFCTR_VSC_1_LO 0x000004e8
-
-#define REG_A6XX_RBBM_PERFCTR_VSC_1_HI 0x000004e9
-
-#define REG_A6XX_RBBM_PERFCTR_LRZ_0_LO 0x000004ea
-
-#define REG_A6XX_RBBM_PERFCTR_LRZ_0_HI 0x000004eb
-
-#define REG_A6XX_RBBM_PERFCTR_LRZ_1_LO 0x000004ec
-
-#define REG_A6XX_RBBM_PERFCTR_LRZ_1_HI 0x000004ed
-
-#define REG_A6XX_RBBM_PERFCTR_LRZ_2_LO 0x000004ee
-
-#define REG_A6XX_RBBM_PERFCTR_LRZ_2_HI 0x000004ef
-
-#define REG_A6XX_RBBM_PERFCTR_LRZ_3_LO 0x000004f0
-
-#define REG_A6XX_RBBM_PERFCTR_LRZ_3_HI 0x000004f1
-
-#define REG_A6XX_RBBM_PERFCTR_CMP_0_LO 0x000004f2
-
-#define REG_A6XX_RBBM_PERFCTR_CMP_0_HI 0x000004f3
-
-#define REG_A6XX_RBBM_PERFCTR_CMP_1_LO 0x000004f4
-
-#define REG_A6XX_RBBM_PERFCTR_CMP_1_HI 0x000004f5
-
-#define REG_A6XX_RBBM_PERFCTR_CMP_2_LO 0x000004f6
-
-#define REG_A6XX_RBBM_PERFCTR_CMP_2_HI 0x000004f7
-
-#define REG_A6XX_RBBM_PERFCTR_CMP_3_LO 0x000004f8
-
-#define REG_A6XX_RBBM_PERFCTR_CMP_3_HI 0x000004f9
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_CMP(uint32_t i0) { return 0x000004f2 + 0x2*i0; }
#define REG_A6XX_RBBM_PERFCTR_CNTL 0x00000500
@@ -1761,13 +1288,7 @@ static inline uint32_t A6XX_CP_CSQ_IB2_STAT_REM(uint32_t val)
#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000506
-#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_0 0x00000507
-
-#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_1 0x00000508
-
-#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_2 0x00000509
-
-#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000050a
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00000507 + 0x1*i0; }
#define REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000050b
@@ -2240,46 +1761,12 @@ static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000630
-#define REG_A6XX_VSC_PERFCTR_VSC_SEL_0 0x00000cd8
-
-#define REG_A6XX_VSC_PERFCTR_VSC_SEL_1 0x00000cd9
-
-#define REG_A6XX_HLSQ_ADDR_MODE_CNTL 0x0000be05
-
-#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x0000be10
-
-#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x0000be11
-
-#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x0000be12
-
-#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x0000be13
-
-#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x0000be14
-
-#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x0000be15
+static inline uint32_t REG_A6XX_VSC_PERFCTR_VSC_SEL(uint32_t i0) { return 0x00000cd8 + 0x1*i0; }
#define REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000c800
#define REG_A6XX_HLSQ_DBG_READ_SEL 0x0000d000
-#define REG_A6XX_VFD_ADDR_MODE_CNTL 0x0000a601
-
-#define REG_A6XX_VFD_PERFCTR_VFD_SEL_0 0x0000a610
-
-#define REG_A6XX_VFD_PERFCTR_VFD_SEL_1 0x0000a611
-
-#define REG_A6XX_VFD_PERFCTR_VFD_SEL_2 0x0000a612
-
-#define REG_A6XX_VFD_PERFCTR_VFD_SEL_3 0x0000a613
-
-#define REG_A6XX_VFD_PERFCTR_VFD_SEL_4 0x0000a614
-
-#define REG_A6XX_VFD_PERFCTR_VFD_SEL_5 0x0000a615
-
-#define REG_A6XX_VFD_PERFCTR_VFD_SEL_6 0x0000a616
-
-#define REG_A6XX_VFD_PERFCTR_VFD_SEL_7 0x0000a617
-
#define REG_A6XX_UCHE_ADDR_MODE_CNTL 0x00000e00
#define REG_A6XX_UCHE_MODE_CNTL 0x00000e01
@@ -2316,119 +1803,9 @@ static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
return ((val) << A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT) & A6XX_UCHE_CLIENT_PF_PERFSEL__MASK;
}
-#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000e1c
-
-#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000e1d
-
-#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000e1e
-
-#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000e1f
-
-#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000e20
-
-#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000e21
-
-#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000e22
-
-#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e23
-
-#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_8 0x00000e24
-
-#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_9 0x00000e25
-
-#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_10 0x00000e26
-
-#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_11 0x00000e27
-
-#define REG_A6XX_SP_ADDR_MODE_CNTL 0x0000ae01
-
-#define REG_A6XX_SP_NC_MODE_CNTL 0x0000ae02
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_0 0x0000ae10
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_1 0x0000ae11
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_2 0x0000ae12
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_3 0x0000ae13
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_4 0x0000ae14
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_5 0x0000ae15
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_6 0x0000ae16
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_7 0x0000ae17
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_8 0x0000ae18
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_9 0x0000ae19
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_10 0x0000ae1a
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_11 0x0000ae1b
+static inline uint32_t REG_A6XX_UCHE_PERFCTR_UCHE_SEL(uint32_t i0) { return 0x00000e1c + 0x1*i0; }
-#define REG_A6XX_SP_PERFCTR_SP_SEL_12 0x0000ae1c
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_13 0x0000ae1d
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_14 0x0000ae1e
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_15 0x0000ae1f
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_16 0x0000ae20
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_17 0x0000ae21
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_18 0x0000ae22
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_19 0x0000ae23
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_20 0x0000ae24
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_21 0x0000ae25
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_22 0x0000ae26
-
-#define REG_A6XX_SP_PERFCTR_SP_SEL_23 0x0000ae27
-
-#define REG_A6XX_TPL1_ADDR_MODE_CNTL 0x0000b601
-
-#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604
-
-#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 0x0000b608
-
-#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 0x0000b609
-
-#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 0x0000b60a
-
-#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 0x0000b60b
-
-#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c
-
-#define REG_A6XX_TPL1_PERFCTR_TP_SEL_0 0x0000b610
-
-#define REG_A6XX_TPL1_PERFCTR_TP_SEL_1 0x0000b611
-
-#define REG_A6XX_TPL1_PERFCTR_TP_SEL_2 0x0000b612
-
-#define REG_A6XX_TPL1_PERFCTR_TP_SEL_3 0x0000b613
-
-#define REG_A6XX_TPL1_PERFCTR_TP_SEL_4 0x0000b614
-
-#define REG_A6XX_TPL1_PERFCTR_TP_SEL_5 0x0000b615
-
-#define REG_A6XX_TPL1_PERFCTR_TP_SEL_6 0x0000b616
-
-#define REG_A6XX_TPL1_PERFCTR_TP_SEL_7 0x0000b617
-
-#define REG_A6XX_TPL1_PERFCTR_TP_SEL_8 0x0000b618
-
-#define REG_A6XX_TPL1_PERFCTR_TP_SEL_9 0x0000b619
-
-#define REG_A6XX_TPL1_PERFCTR_TP_SEL_10 0x0000b61a
-
-#define REG_A6XX_TPL1_PERFCTR_TP_SEL_11 0x0000b61b
+#define REG_A6XX_UCHE_CMDQ_CONFIG 0x00000e3c
#define REG_A6XX_VBIF_VERSION 0x00003000
@@ -2507,6 +1884,8 @@ static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+#define REG_A6XX_GBIF_SCACHE_CNTL0 0x00003c01
+
#define REG_A6XX_GBIF_SCACHE_CNTL1 0x00003c02
#define REG_A6XX_GBIF_QSB_SIDE0 0x00003c03
@@ -2555,36 +1934,6 @@ static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
#define REG_A6XX_GBIF_PWR_CNT_HIGH2 0x00003cd1
-#define REG_A6XX_SP_WINDOW_OFFSET 0x0000b4d1
-#define A6XX_SP_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_SP_WINDOW_OFFSET_X__MASK 0x00007fff
-#define A6XX_SP_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A6XX_SP_WINDOW_OFFSET_X(uint32_t val)
-{
- return ((val) << A6XX_SP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_WINDOW_OFFSET_X__MASK;
-}
-#define A6XX_SP_WINDOW_OFFSET_Y__MASK 0x7fff0000
-#define A6XX_SP_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A6XX_SP_WINDOW_OFFSET_Y(uint32_t val)
-{
- return ((val) << A6XX_SP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_WINDOW_OFFSET_Y__MASK;
-}
-
-#define REG_A6XX_SP_TP_WINDOW_OFFSET 0x0000b307
-#define A6XX_SP_TP_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_SP_TP_WINDOW_OFFSET_X__MASK 0x00007fff
-#define A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_X(uint32_t val)
-{
- return ((val) << A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_X__MASK;
-}
-#define A6XX_SP_TP_WINDOW_OFFSET_Y__MASK 0x7fff0000
-#define A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_Y(uint32_t val)
-{
- return ((val) << A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_Y__MASK;
-}
-
#define REG_A6XX_VSC_BIN_SIZE 0x00000c02
#define A6XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff
#define A6XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
@@ -2599,10 +1948,6 @@ static inline uint32_t A6XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
return ((val >> 4) << A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A6XX_VSC_BIN_SIZE_HEIGHT__MASK;
}
-#define REG_A6XX_VSC_DRAW_STRM_SIZE_ADDRESS_LO 0x00000c03
-
-#define REG_A6XX_VSC_DRAW_STRM_SIZE_ADDRESS_HI 0x00000c04
-
#define REG_A6XX_VSC_DRAW_STRM_SIZE_ADDRESS 0x00000c03
#define REG_A6XX_VSC_BIN_COUNT 0x00000c06
@@ -2647,20 +1992,12 @@ static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK;
}
-#define REG_A6XX_VSC_PRIM_STRM_ADDRESS_LO 0x00000c30
-
-#define REG_A6XX_VSC_PRIM_STRM_ADDRESS_HI 0x00000c31
-
#define REG_A6XX_VSC_PRIM_STRM_ADDRESS 0x00000c30
#define REG_A6XX_VSC_PRIM_STRM_PITCH 0x00000c32
#define REG_A6XX_VSC_PRIM_STRM_LIMIT 0x00000c33
-#define REG_A6XX_VSC_DRAW_STRM_ADDRESS_LO 0x00000c34
-
-#define REG_A6XX_VSC_DRAW_STRM_ADDRESS_HI 0x00000c35
-
#define REG_A6XX_VSC_DRAW_STRM_ADDRESS 0x00000c34
#define REG_A6XX_VSC_DRAW_STRM_PITCH 0x00000c36
@@ -2849,12 +2186,20 @@ static inline uint32_t A6XX_GRAS_SU_CNTL_UNK12(uint32_t val)
return ((val) << A6XX_GRAS_SU_CNTL_UNK12__SHIFT) & A6XX_GRAS_SU_CNTL_UNK12__MASK;
}
#define A6XX_GRAS_SU_CNTL_MSAA_ENABLE 0x00002000
-#define A6XX_GRAS_SU_CNTL_UNK15__MASK 0x007f8000
+#define A6XX_GRAS_SU_CNTL_UNK15__MASK 0x00018000
#define A6XX_GRAS_SU_CNTL_UNK15__SHIFT 15
static inline uint32_t A6XX_GRAS_SU_CNTL_UNK15(uint32_t val)
{
return ((val) << A6XX_GRAS_SU_CNTL_UNK15__SHIFT) & A6XX_GRAS_SU_CNTL_UNK15__MASK;
}
+#define A6XX_GRAS_SU_CNTL_UNK17 0x00020000
+#define A6XX_GRAS_SU_CNTL_MULTIVIEW_ENABLE 0x00040000
+#define A6XX_GRAS_SU_CNTL_UNK19__MASK 0x00780000
+#define A6XX_GRAS_SU_CNTL_UNK19__SHIFT 19
+static inline uint32_t A6XX_GRAS_SU_CNTL_UNK19(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_CNTL_UNK19__SHIFT) & A6XX_GRAS_SU_CNTL_UNK19__MASK;
+}
#define REG_A6XX_GRAS_SU_POINT_MINMAX 0x00008091
#define A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
@@ -3205,11 +2550,12 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
#define A6XX_GRAS_LRZ_CNTL_FC_ENABLE 0x00000008
#define A6XX_GRAS_LRZ_CNTL_Z_TEST_ENABLE 0x00000010
-#define A6XX_GRAS_LRZ_CNTL_UNK5__MASK 0x000003e0
-#define A6XX_GRAS_LRZ_CNTL_UNK5__SHIFT 5
-static inline uint32_t A6XX_GRAS_LRZ_CNTL_UNK5(uint32_t val)
+#define A6XX_GRAS_LRZ_CNTL_Z_BOUNDS_ENABLE 0x00000020
+#define A6XX_GRAS_LRZ_CNTL_UNK6__MASK 0x000003c0
+#define A6XX_GRAS_LRZ_CNTL_UNK6__SHIFT 6
+static inline uint32_t A6XX_GRAS_LRZ_CNTL_UNK6(uint32_t val)
{
- return ((val) << A6XX_GRAS_LRZ_CNTL_UNK5__SHIFT) & A6XX_GRAS_LRZ_CNTL_UNK5__MASK;
+ return ((val) << A6XX_GRAS_LRZ_CNTL_UNK6__SHIFT) & A6XX_GRAS_LRZ_CNTL_UNK6__MASK;
}
#define REG_A6XX_GRAS_UNKNOWN_8101 0x00008101
@@ -3222,10 +2568,6 @@ static inline uint32_t A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT(enum a6xx_format val)
return ((val) << A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK;
}
-#define REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO 0x00008103
-
-#define REG_A6XX_GRAS_LRZ_BUFFER_BASE_HI 0x00008104
-
#define REG_A6XX_GRAS_LRZ_BUFFER_BASE 0x00008103
#define A6XX_GRAS_LRZ_BUFFER_BASE__MASK 0xffffffff
#define A6XX_GRAS_LRZ_BUFFER_BASE__SHIFT 0
@@ -3248,10 +2590,6 @@ static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
return ((val >> 4) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK;
}
-#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x00008106
-
-#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x00008107
-
#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE 0x00008106
#define A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__MASK 0xffffffff
#define A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__SHIFT 0
@@ -3406,29 +2744,11 @@ static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_Y(uint32_t val)
#define REG_A6XX_GRAS_ADDR_MODE_CNTL 0x00008601
-#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_0 0x00008610
-
-#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_1 0x00008611
-
-#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_2 0x00008612
-
-#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_3 0x00008613
+static inline uint32_t REG_A6XX_GRAS_PERFCTR_TSE_SEL(uint32_t i0) { return 0x00008610 + 0x1*i0; }
-#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_0 0x00008614
+static inline uint32_t REG_A6XX_GRAS_PERFCTR_RAS_SEL(uint32_t i0) { return 0x00008614 + 0x1*i0; }
-#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_1 0x00008615
-
-#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_2 0x00008616
-
-#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_3 0x00008617
-
-#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_0 0x00008618
-
-#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_1 0x00008619
-
-#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_2 0x0000861a
-
-#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_3 0x0000861b
+static inline uint32_t REG_A6XX_GRAS_PERFCTR_LRZ_SEL(uint32_t i0) { return 0x00008618 + 0x1*i0; }
#define REG_A6XX_RB_BIN_CONTROL 0x00008800
#define A6XX_RB_BIN_CONTROL_BINW__MASK 0x0000003f
@@ -3889,10 +3209,6 @@ static inline uint32_t A6XX_RB_MRT_ARRAY_PITCH(uint32_t val)
return ((val >> 6) << A6XX_RB_MRT_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_ARRAY_PITCH__MASK;
}
-static inline uint32_t REG_A6XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x00008825 + 0x8*i0; }
-
-static inline uint32_t REG_A6XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x00008826 + 0x8*i0; }
-
static inline uint32_t REG_A6XX_RB_MRT_BASE(uint32_t i0) { return 0x00008825 + 0x8*i0; }
#define A6XX_RB_MRT_BASE__MASK 0xffffffff
#define A6XX_RB_MRT_BASE__SHIFT 0
@@ -4025,10 +3341,6 @@ static inline uint32_t A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
}
-#define REG_A6XX_RB_DEPTH_BUFFER_BASE_LO 0x00008875
-
-#define REG_A6XX_RB_DEPTH_BUFFER_BASE_HI 0x00008876
-
#define REG_A6XX_RB_DEPTH_BUFFER_BASE 0x00008875
#define A6XX_RB_DEPTH_BUFFER_BASE__MASK 0xffffffff
#define A6XX_RB_DEPTH_BUFFER_BASE__SHIFT 0
@@ -4134,10 +3446,6 @@ static inline uint32_t A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(uint32_t val)
return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK;
}
-#define REG_A6XX_RB_STENCIL_BUFFER_BASE_LO 0x00008884
-
-#define REG_A6XX_RB_STENCIL_BUFFER_BASE_HI 0x00008885
-
#define REG_A6XX_RB_STENCIL_BUFFER_BASE 0x00008884
#define A6XX_RB_STENCIL_BUFFER_BASE__MASK 0xffffffff
#define A6XX_RB_STENCIL_BUFFER_BASE__SHIFT 0
@@ -4355,10 +3663,6 @@ static inline uint32_t A6XX_RB_BLIT_DST(uint32_t val)
return ((val) << A6XX_RB_BLIT_DST__SHIFT) & A6XX_RB_BLIT_DST__MASK;
}
-#define REG_A6XX_RB_BLIT_DST_LO 0x000088d8
-
-#define REG_A6XX_RB_BLIT_DST_HI 0x000088d9
-
#define REG_A6XX_RB_BLIT_DST_PITCH 0x000088da
#define A6XX_RB_BLIT_DST_PITCH__MASK 0x0000ffff
#define A6XX_RB_BLIT_DST_PITCH__SHIFT 0
@@ -4383,10 +3687,6 @@ static inline uint32_t A6XX_RB_BLIT_FLAG_DST(uint32_t val)
return ((val) << A6XX_RB_BLIT_FLAG_DST__SHIFT) & A6XX_RB_BLIT_FLAG_DST__MASK;
}
-#define REG_A6XX_RB_BLIT_FLAG_DST_LO 0x000088dc
-
-#define REG_A6XX_RB_BLIT_FLAG_DST_HI 0x000088dd
-
#define REG_A6XX_RB_BLIT_FLAG_DST_PITCH 0x000088de
#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK 0x000007ff
#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT 0
@@ -4412,7 +3712,7 @@ static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH(uint32_t val)
#define REG_A6XX_RB_BLIT_INFO 0x000088e3
#define A6XX_RB_BLIT_INFO_UNK0 0x00000001
#define A6XX_RB_BLIT_INFO_GMEM 0x00000002
-#define A6XX_RB_BLIT_INFO_INTEGER 0x00000004
+#define A6XX_RB_BLIT_INFO_SAMPLE_0 0x00000004
#define A6XX_RB_BLIT_INFO_DEPTH 0x00000008
#define A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK 0x000000f0
#define A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT 4
@@ -4459,10 +3759,6 @@ static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
#define REG_A6XX_RB_UNKNOWN_88F4 0x000088f4
-#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x00008900
-
-#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x00008901
-
#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE 0x00008900
#define A6XX_RB_DEPTH_FLAG_BUFFER_BASE__MASK 0xffffffff
#define A6XX_RB_DEPTH_FLAG_BUFFER_BASE__SHIFT 0
@@ -4493,10 +3789,6 @@ static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x00008903 + 0x3*i0; }
-static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return 0x00008903 + 0x3*i0; }
-
-static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x00008904 + 0x3*i0; }
-
static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR(uint32_t i0) { return 0x00008903 + 0x3*i0; }
#define A6XX_RB_MRT_FLAG_BUFFER_ADDR__MASK 0xffffffff
#define A6XX_RB_MRT_FLAG_BUFFER_ADDR__SHIFT 0
@@ -4519,10 +3811,6 @@ static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
return ((val >> 7) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
}
-#define REG_A6XX_RB_SAMPLE_COUNT_ADDR_LO 0x00008927
-
-#define REG_A6XX_RB_SAMPLE_COUNT_ADDR_HI 0x00008928
-
#define REG_A6XX_RB_SAMPLE_COUNT_ADDR 0x00008927
#define A6XX_RB_SAMPLE_COUNT_ADDR__MASK 0xffffffff
#define A6XX_RB_SAMPLE_COUNT_ADDR__SHIFT 0
@@ -4608,13 +3896,19 @@ static inline uint32_t A6XX_RB_2D_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
return ((val) << A6XX_RB_2D_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_2D_DST_INFO_SAMPLES__MASK;
}
#define A6XX_RB_2D_DST_INFO_FILTER 0x00010000
+#define A6XX_RB_2D_DST_INFO_UNK17 0x00020000
#define A6XX_RB_2D_DST_INFO_SAMPLES_AVERAGE 0x00040000
+#define A6XX_RB_2D_DST_INFO_UNK19 0x00080000
#define A6XX_RB_2D_DST_INFO_UNK20 0x00100000
+#define A6XX_RB_2D_DST_INFO_UNK21 0x00200000
#define A6XX_RB_2D_DST_INFO_UNK22 0x00400000
-
-#define REG_A6XX_RB_2D_DST_LO 0x00008c18
-
-#define REG_A6XX_RB_2D_DST_HI 0x00008c19
+#define A6XX_RB_2D_DST_INFO_UNK23__MASK 0x07800000
+#define A6XX_RB_2D_DST_INFO_UNK23__SHIFT 23
+static inline uint32_t A6XX_RB_2D_DST_INFO_UNK23(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_UNK23__SHIFT) & A6XX_RB_2D_DST_INFO_UNK23__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_UNK28 0x10000000
#define REG_A6XX_RB_2D_DST 0x00008c18
#define A6XX_RB_2D_DST__MASK 0xffffffff
@@ -4656,10 +3950,6 @@ static inline uint32_t A6XX_RB_2D_DST_PLANE2(uint32_t val)
return ((val) << A6XX_RB_2D_DST_PLANE2__SHIFT) & A6XX_RB_2D_DST_PLANE2__MASK;
}
-#define REG_A6XX_RB_2D_DST_FLAGS_LO 0x00008c20
-
-#define REG_A6XX_RB_2D_DST_FLAGS_HI 0x00008c21
-
#define REG_A6XX_RB_2D_DST_FLAGS 0x00008c20
#define A6XX_RB_2D_DST_FLAGS__MASK 0xffffffff
#define A6XX_RB_2D_DST_FLAGS__SHIFT 0
@@ -4740,41 +4030,13 @@ static inline uint32_t A6XX_RB_NC_MODE_CNTL_UNK12(uint32_t val)
return ((val) << A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT) & A6XX_RB_NC_MODE_CNTL_UNK12__MASK;
}
-#define REG_A6XX_RB_PERFCTR_RB_SEL_0 0x00008e10
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_1 0x00008e11
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_2 0x00008e12
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_3 0x00008e13
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_4 0x00008e14
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_5 0x00008e15
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_6 0x00008e16
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_7 0x00008e17
-
-#define REG_A6XX_RB_PERFCTR_CCU_SEL_0 0x00008e18
-
-#define REG_A6XX_RB_PERFCTR_CCU_SEL_1 0x00008e19
-
-#define REG_A6XX_RB_PERFCTR_CCU_SEL_2 0x00008e1a
+static inline uint32_t REG_A6XX_RB_PERFCTR_RB_SEL(uint32_t i0) { return 0x00008e10 + 0x1*i0; }
-#define REG_A6XX_RB_PERFCTR_CCU_SEL_3 0x00008e1b
-
-#define REG_A6XX_RB_PERFCTR_CCU_SEL_4 0x00008e1c
+static inline uint32_t REG_A6XX_RB_PERFCTR_CCU_SEL(uint32_t i0) { return 0x00008e18 + 0x1*i0; }
#define REG_A6XX_RB_UNKNOWN_8E28 0x00008e28
-#define REG_A6XX_RB_PERFCTR_CMP_SEL_0 0x00008e2c
-
-#define REG_A6XX_RB_PERFCTR_CMP_SEL_1 0x00008e2d
-
-#define REG_A6XX_RB_PERFCTR_CMP_SEL_2 0x00008e2e
-
-#define REG_A6XX_RB_PERFCTR_CMP_SEL_3 0x00008e2f
+static inline uint32_t REG_A6XX_RB_PERFCTR_CMP_SEL(uint32_t i0) { return 0x00008e2c + 0x1*i0; }
#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST 0x00008e3b
@@ -4895,6 +4157,8 @@ static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_VIEWLOC(uint32_t val)
}
#define REG_A6XX_VPC_UNKNOWN_9107 0x00009107
+#define A6XX_VPC_UNKNOWN_9107_RASTER_DISCARD 0x00000001
+#define A6XX_VPC_UNKNOWN_9107_UNK2 0x00000004
#define REG_A6XX_VPC_POLYGON_MODE 0x00009108
#define A6XX_VPC_POLYGON_MODE_MODE__MASK 0x00000003
@@ -4921,13 +4185,13 @@ static inline uint32_t REG_A6XX_VPC_VAR(uint32_t i0) { return 0x00009212 + 0x1*i
static inline uint32_t REG_A6XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x00009212 + 0x1*i0; }
#define REG_A6XX_VPC_SO_CNTL 0x00009216
-#define A6XX_VPC_SO_CNTL_UNK0__MASK 0x000000ff
-#define A6XX_VPC_SO_CNTL_UNK0__SHIFT 0
-static inline uint32_t A6XX_VPC_SO_CNTL_UNK0(uint32_t val)
+#define A6XX_VPC_SO_CNTL_ADDR__MASK 0x000000ff
+#define A6XX_VPC_SO_CNTL_ADDR__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_CNTL_ADDR(uint32_t val)
{
- return ((val) << A6XX_VPC_SO_CNTL_UNK0__SHIFT) & A6XX_VPC_SO_CNTL_UNK0__MASK;
+ return ((val) << A6XX_VPC_SO_CNTL_ADDR__SHIFT) & A6XX_VPC_SO_CNTL_ADDR__MASK;
}
-#define A6XX_VPC_SO_CNTL_ENABLE 0x00010000
+#define A6XX_VPC_SO_CNTL_RESET 0x00010000
#define REG_A6XX_VPC_SO_PROG 0x00009217
#define A6XX_VPC_SO_PROG_A_BUF__MASK 0x00000003
@@ -4957,10 +4221,6 @@ static inline uint32_t A6XX_VPC_SO_PROG_B_OFF(uint32_t val)
}
#define A6XX_VPC_SO_PROG_B_EN 0x00800000
-#define REG_A6XX_VPC_SO_STREAM_COUNTS_LO 0x00009218
-
-#define REG_A6XX_VPC_SO_STREAM_COUNTS_HI 0x00009219
-
#define REG_A6XX_VPC_SO_STREAM_COUNTS 0x00009218
#define A6XX_VPC_SO_STREAM_COUNTS__MASK 0xffffffff
#define A6XX_VPC_SO_STREAM_COUNTS__SHIFT 0
@@ -4979,10 +4239,6 @@ static inline uint32_t A6XX_VPC_SO_BUFFER_BASE(uint32_t val)
return ((val) << A6XX_VPC_SO_BUFFER_BASE__SHIFT) & A6XX_VPC_SO_BUFFER_BASE__MASK;
}
-static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE_LO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
-
-static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE_HI(uint32_t i0) { return 0x0000921b + 0x7*i0; }
-
static inline uint32_t REG_A6XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000921c + 0x7*i0; }
#define A6XX_VPC_SO_BUFFER_SIZE__MASK 0xfffffffc
#define A6XX_VPC_SO_BUFFER_SIZE__SHIFT 2
@@ -5009,10 +4265,6 @@ static inline uint32_t A6XX_VPC_SO_FLUSH_BASE(uint32_t val)
return ((val) << A6XX_VPC_SO_FLUSH_BASE__SHIFT) & A6XX_VPC_SO_FLUSH_BASE__MASK;
}
-static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE_LO(uint32_t i0) { return 0x0000921f + 0x7*i0; }
-
-static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE_HI(uint32_t i0) { return 0x00009220 + 0x7*i0; }
-
#define REG_A6XX_VPC_POINT_COORD_INVERT 0x00009236
#define A6XX_VPC_POINT_COORD_INVERT_INVERT 0x00000001
@@ -5037,11 +4289,11 @@ static inline uint32_t A6XX_VPC_VS_PACK_PSIZELOC(uint32_t val)
{
return ((val) << A6XX_VPC_VS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_VS_PACK_PSIZELOC__MASK;
}
-#define A6XX_VPC_VS_PACK_UNK24__MASK 0x0f000000
-#define A6XX_VPC_VS_PACK_UNK24__SHIFT 24
-static inline uint32_t A6XX_VPC_VS_PACK_UNK24(uint32_t val)
+#define A6XX_VPC_VS_PACK_EXTRAPOS__MASK 0x0f000000
+#define A6XX_VPC_VS_PACK_EXTRAPOS__SHIFT 24
+static inline uint32_t A6XX_VPC_VS_PACK_EXTRAPOS(uint32_t val)
{
- return ((val) << A6XX_VPC_VS_PACK_UNK24__SHIFT) & A6XX_VPC_VS_PACK_UNK24__MASK;
+ return ((val) << A6XX_VPC_VS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_VS_PACK_EXTRAPOS__MASK;
}
#define REG_A6XX_VPC_GS_PACK 0x00009302
@@ -5063,11 +4315,11 @@ static inline uint32_t A6XX_VPC_GS_PACK_PSIZELOC(uint32_t val)
{
return ((val) << A6XX_VPC_GS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_GS_PACK_PSIZELOC__MASK;
}
-#define A6XX_VPC_GS_PACK_UNK24__MASK 0x0f000000
-#define A6XX_VPC_GS_PACK_UNK24__SHIFT 24
-static inline uint32_t A6XX_VPC_GS_PACK_UNK24(uint32_t val)
+#define A6XX_VPC_GS_PACK_EXTRAPOS__MASK 0x0f000000
+#define A6XX_VPC_GS_PACK_EXTRAPOS__SHIFT 24
+static inline uint32_t A6XX_VPC_GS_PACK_EXTRAPOS(uint32_t val)
{
- return ((val) << A6XX_VPC_GS_PACK_UNK24__SHIFT) & A6XX_VPC_GS_PACK_UNK24__MASK;
+ return ((val) << A6XX_VPC_GS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_GS_PACK_EXTRAPOS__MASK;
}
#define REG_A6XX_VPC_DS_PACK 0x00009303
@@ -5089,11 +4341,11 @@ static inline uint32_t A6XX_VPC_DS_PACK_PSIZELOC(uint32_t val)
{
return ((val) << A6XX_VPC_DS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_DS_PACK_PSIZELOC__MASK;
}
-#define A6XX_VPC_DS_PACK_UNK24__MASK 0x0f000000
-#define A6XX_VPC_DS_PACK_UNK24__SHIFT 24
-static inline uint32_t A6XX_VPC_DS_PACK_UNK24(uint32_t val)
+#define A6XX_VPC_DS_PACK_EXTRAPOS__MASK 0x0f000000
+#define A6XX_VPC_DS_PACK_EXTRAPOS__SHIFT 24
+static inline uint32_t A6XX_VPC_DS_PACK_EXTRAPOS(uint32_t val)
{
- return ((val) << A6XX_VPC_DS_PACK_UNK24__SHIFT) & A6XX_VPC_DS_PACK_UNK24__MASK;
+ return ((val) << A6XX_VPC_DS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_DS_PACK_EXTRAPOS__MASK;
}
#define REG_A6XX_VPC_CNTL_0 0x00009304
@@ -5110,24 +4362,43 @@ static inline uint32_t A6XX_VPC_CNTL_0_PRIMIDLOC(uint32_t val)
return ((val) << A6XX_VPC_CNTL_0_PRIMIDLOC__SHIFT) & A6XX_VPC_CNTL_0_PRIMIDLOC__MASK;
}
#define A6XX_VPC_CNTL_0_VARYING 0x00010000
-#define A6XX_VPC_CNTL_0_UNKLOC__MASK 0xff000000
-#define A6XX_VPC_CNTL_0_UNKLOC__SHIFT 24
-static inline uint32_t A6XX_VPC_CNTL_0_UNKLOC(uint32_t val)
+#define A6XX_VPC_CNTL_0_VIEWIDLOC__MASK 0xff000000
+#define A6XX_VPC_CNTL_0_VIEWIDLOC__SHIFT 24
+static inline uint32_t A6XX_VPC_CNTL_0_VIEWIDLOC(uint32_t val)
{
- return ((val) << A6XX_VPC_CNTL_0_UNKLOC__SHIFT) & A6XX_VPC_CNTL_0_UNKLOC__MASK;
+ return ((val) << A6XX_VPC_CNTL_0_VIEWIDLOC__SHIFT) & A6XX_VPC_CNTL_0_VIEWIDLOC__MASK;
}
-#define REG_A6XX_VPC_SO_BUF_CNTL 0x00009305
-#define A6XX_VPC_SO_BUF_CNTL_BUF0 0x00000001
-#define A6XX_VPC_SO_BUF_CNTL_BUF1 0x00000008
-#define A6XX_VPC_SO_BUF_CNTL_BUF2 0x00000040
-#define A6XX_VPC_SO_BUF_CNTL_BUF3 0x00000200
-#define A6XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000
-#define A6XX_VPC_SO_BUF_CNTL_UNK16__MASK 0x000f0000
-#define A6XX_VPC_SO_BUF_CNTL_UNK16__SHIFT 16
-static inline uint32_t A6XX_VPC_SO_BUF_CNTL_UNK16(uint32_t val)
+#define REG_A6XX_VPC_SO_STREAM_CNTL 0x00009305
+#define A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__MASK 0x00000007
+#define A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__MASK;
+}
+#define A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__MASK 0x00000038
+#define A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__SHIFT 3
+static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__MASK;
+}
+#define A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__MASK 0x000001c0
+#define A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__SHIFT 6
+static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__MASK;
+}
+#define A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__MASK 0x00000e00
+#define A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__SHIFT 9
+static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM(uint32_t val)
{
- return ((val) << A6XX_VPC_SO_BUF_CNTL_UNK16__SHIFT) & A6XX_VPC_SO_BUF_CNTL_UNK16__MASK;
+ return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__MASK;
+}
+#define A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__MASK 0x00078000
+#define A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT 15
+static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__MASK;
}
#define REG_A6XX_VPC_SO_DISABLE 0x00009306
@@ -5141,32 +4412,22 @@ static inline uint32_t A6XX_VPC_SO_BUF_CNTL_UNK16(uint32_t val)
#define REG_A6XX_VPC_UNKNOWN_9603 0x00009603
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_0 0x00009604
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_1 0x00009605
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_2 0x00009606
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_3 0x00009607
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_4 0x00009608
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_5 0x00009609
+static inline uint32_t REG_A6XX_VPC_PERFCTR_VPC_SEL(uint32_t i0) { return 0x00009604 + 0x1*i0; }
#define REG_A6XX_PC_TESS_NUM_VERTEX 0x00009800
-#define REG_A6XX_PC_UNKNOWN_9801 0x00009801
-#define A6XX_PC_UNKNOWN_9801_UNK0__MASK 0x000007ff
-#define A6XX_PC_UNKNOWN_9801_UNK0__SHIFT 0
-static inline uint32_t A6XX_PC_UNKNOWN_9801_UNK0(uint32_t val)
+#define REG_A6XX_PC_HS_INPUT_SIZE 0x00009801
+#define A6XX_PC_HS_INPUT_SIZE_SIZE__MASK 0x000007ff
+#define A6XX_PC_HS_INPUT_SIZE_SIZE__SHIFT 0
+static inline uint32_t A6XX_PC_HS_INPUT_SIZE_SIZE(uint32_t val)
{
- return ((val) << A6XX_PC_UNKNOWN_9801_UNK0__SHIFT) & A6XX_PC_UNKNOWN_9801_UNK0__MASK;
+ return ((val) << A6XX_PC_HS_INPUT_SIZE_SIZE__SHIFT) & A6XX_PC_HS_INPUT_SIZE_SIZE__MASK;
}
-#define A6XX_PC_UNKNOWN_9801_UNK13__MASK 0x00002000
-#define A6XX_PC_UNKNOWN_9801_UNK13__SHIFT 13
-static inline uint32_t A6XX_PC_UNKNOWN_9801_UNK13(uint32_t val)
+#define A6XX_PC_HS_INPUT_SIZE_UNK13__MASK 0x00002000
+#define A6XX_PC_HS_INPUT_SIZE_UNK13__SHIFT 13
+static inline uint32_t A6XX_PC_HS_INPUT_SIZE_UNK13(uint32_t val)
{
- return ((val) << A6XX_PC_UNKNOWN_9801_UNK13__SHIFT) & A6XX_PC_UNKNOWN_9801_UNK13__MASK;
+ return ((val) << A6XX_PC_HS_INPUT_SIZE_UNK13__SHIFT) & A6XX_PC_HS_INPUT_SIZE_UNK13__MASK;
}
#define REG_A6XX_PC_TESS_CNTL 0x00009802
@@ -5221,6 +4482,8 @@ static inline uint32_t A6XX_PC_EVENT_CMD_EVENT(enum vgt_event_type val)
return ((val) << A6XX_PC_EVENT_CMD_EVENT__SHIFT) & A6XX_PC_EVENT_CMD_EVENT__MASK;
}
+#define REG_A6XX_PC_MARKER 0x00009880
+
#define REG_A6XX_PC_POLYGON_MODE 0x00009981
#define A6XX_PC_POLYGON_MODE_MODE__MASK 0x00000003
#define A6XX_PC_POLYGON_MODE_MODE__SHIFT 0
@@ -5229,7 +4492,14 @@ static inline uint32_t A6XX_PC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val)
return ((val) << A6XX_PC_POLYGON_MODE_MODE__SHIFT) & A6XX_PC_POLYGON_MODE_MODE__MASK;
}
-#define REG_A6XX_PC_UNKNOWN_9980 0x00009980
+#define REG_A6XX_PC_RASTER_CNTL 0x00009980
+#define A6XX_PC_RASTER_CNTL_STREAM__MASK 0x00000003
+#define A6XX_PC_RASTER_CNTL_STREAM__SHIFT 0
+static inline uint32_t A6XX_PC_RASTER_CNTL_STREAM(uint32_t val)
+{
+ return ((val) << A6XX_PC_RASTER_CNTL_STREAM__SHIFT) & A6XX_PC_RASTER_CNTL_STREAM__MASK;
+}
+#define A6XX_PC_RASTER_CNTL_DISCARD 0x00000004
#define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00
#define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001
@@ -5327,9 +4597,17 @@ static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(uint32_t val)
return ((val) << A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK;
}
-#define REG_A6XX_PC_UNKNOWN_9B07 0x00009b07
+#define REG_A6XX_PC_MULTIVIEW_CNTL 0x00009b07
+#define A6XX_PC_MULTIVIEW_CNTL_ENABLE 0x00000001
+#define A6XX_PC_MULTIVIEW_CNTL_DISABLEMULTIPOS 0x00000002
+#define A6XX_PC_MULTIVIEW_CNTL_VIEWS__MASK 0x0000007c
+#define A6XX_PC_MULTIVIEW_CNTL_VIEWS__SHIFT 2
+static inline uint32_t A6XX_PC_MULTIVIEW_CNTL_VIEWS(uint32_t val)
+{
+ return ((val) << A6XX_PC_MULTIVIEW_CNTL_VIEWS__SHIFT) & A6XX_PC_MULTIVIEW_CNTL_VIEWS__MASK;
+}
-#define REG_A6XX_PC_UNKNOWN_9B08 0x00009b08
+#define REG_A6XX_PC_MULTIVIEW_MASK 0x00009b08
#define REG_A6XX_PC_2D_EVENT_CMD 0x00009c00
#define A6XX_PC_2D_EVENT_CMD_EVENT__MASK 0x0000007f
@@ -5349,9 +4627,11 @@ static inline uint32_t A6XX_PC_2D_EVENT_CMD_STATE_ID(uint32_t val)
#define REG_A6XX_PC_ADDR_MODE_CNTL 0x00009e01
-#define REG_A6XX_PC_TESSFACTOR_ADDR_LO 0x00009e08
+#define REG_A6XX_PC_DRAW_INDX_BASE 0x00009e04
-#define REG_A6XX_PC_TESSFACTOR_ADDR_HI 0x00009e09
+#define REG_A6XX_PC_DRAW_FIRST_INDX 0x00009e06
+
+#define REG_A6XX_PC_DRAW_MAX_INDICES 0x00009e07
#define REG_A6XX_PC_TESSFACTOR_ADDR 0x00009e08
#define A6XX_PC_TESSFACTOR_ADDR__MASK 0xffffffff
@@ -5361,6 +4641,44 @@ static inline uint32_t A6XX_PC_TESSFACTOR_ADDR(uint32_t val)
return ((val) << A6XX_PC_TESSFACTOR_ADDR__SHIFT) & A6XX_PC_TESSFACTOR_ADDR__MASK;
}
+#define REG_A6XX_PC_DRAW_INITIATOR 0x00009e0b
+#define A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
+#define A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
+static inline uint32_t A6XX_PC_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__MASK;
+}
+#define A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
+#define A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__MASK;
+}
+#define A6XX_PC_DRAW_INITIATOR_VIS_CULL__MASK 0x00000300
+#define A6XX_PC_DRAW_INITIATOR_VIS_CULL__SHIFT 8
+static inline uint32_t A6XX_PC_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A6XX_PC_DRAW_INITIATOR_VIS_CULL__SHIFT) & A6XX_PC_DRAW_INITIATOR_VIS_CULL__MASK;
+}
+#define A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000c00
+#define A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__SHIFT 10
+static inline uint32_t A6XX_PC_DRAW_INITIATOR_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__MASK;
+}
+#define A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__MASK 0x00003000
+#define A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__SHIFT 12
+static inline uint32_t A6XX_PC_DRAW_INITIATOR_PATCH_TYPE(enum a6xx_patch_type val)
+{
+ return ((val) << A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__SHIFT) & A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__MASK;
+}
+#define A6XX_PC_DRAW_INITIATOR_GS_ENABLE 0x00010000
+#define A6XX_PC_DRAW_INITIATOR_TESS_ENABLE 0x00020000
+
+#define REG_A6XX_PC_DRAW_NUM_INSTANCES 0x00009e0c
+
+#define REG_A6XX_PC_DRAW_NUM_INDICES 0x00009e0d
+
#define REG_A6XX_PC_VSTREAM_CONTROL 0x00009e11
#define A6XX_PC_VSTREAM_CONTROL_UNK0__MASK 0x0000ffff
#define A6XX_PC_VSTREAM_CONTROL_UNK0__SHIFT 0
@@ -5397,21 +4715,10 @@ static inline uint32_t A6XX_PC_BIN_DRAW_STRM(uint32_t val)
return ((val) << A6XX_PC_BIN_DRAW_STRM__SHIFT) & A6XX_PC_BIN_DRAW_STRM__MASK;
}
-#define REG_A6XX_PC_PERFCTR_PC_SEL_0 0x00009e34
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_1 0x00009e35
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_2 0x00009e36
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_3 0x00009e37
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_4 0x00009e38
+#define REG_A6XX_PC_VISIBILITY_OVERRIDE 0x00009e1c
+#define A6XX_PC_VISIBILITY_OVERRIDE_OVERRIDE 0x00000001
-#define REG_A6XX_PC_PERFCTR_PC_SEL_5 0x00009e39
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_6 0x00009e3a
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_7 0x00009e3b
+static inline uint32_t REG_A6XX_PC_PERFCTR_PC_SEL(uint32_t i0) { return 0x00009e34 + 0x1*i0; }
#define REG_A6XX_PC_UNKNOWN_9E72 0x00009e72
@@ -5448,6 +4755,12 @@ static inline uint32_t A6XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
{
return ((val) << A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK;
}
+#define A6XX_VFD_CONTROL_1_REGID4VIEWID__MASK 0xff000000
+#define A6XX_VFD_CONTROL_1_REGID4VIEWID__SHIFT 24
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4VIEWID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4VIEWID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4VIEWID__MASK;
+}
#define REG_A6XX_VFD_CONTROL_2 0x0000a002
#define A6XX_VFD_CONTROL_2_REGID_HSPATCHID__MASK 0x000000ff
@@ -5464,6 +4777,12 @@ static inline uint32_t A6XX_VFD_CONTROL_2_REGID_INVOCATIONID(uint32_t val)
}
#define REG_A6XX_VFD_CONTROL_3 0x0000a003
+#define A6XX_VFD_CONTROL_3_UNK0__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_3_UNK0__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_3_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_UNK0__SHIFT) & A6XX_VFD_CONTROL_3_UNK0__MASK;
+}
#define A6XX_VFD_CONTROL_3_REGID_DSPATCHID__MASK 0x0000ff00
#define A6XX_VFD_CONTROL_3_REGID_DSPATCHID__SHIFT 8
static inline uint32_t A6XX_VFD_CONTROL_3_REGID_DSPATCHID(uint32_t val)
@@ -5484,6 +4803,12 @@ static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
}
#define REG_A6XX_VFD_CONTROL_4 0x0000a004
+#define A6XX_VFD_CONTROL_4_UNK0__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_4_UNK0__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_4_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_4_UNK0__SHIFT) & A6XX_VFD_CONTROL_4_UNK0__MASK;
+}
#define REG_A6XX_VFD_CONTROL_5 0x0000a005
#define A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK 0x000000ff
@@ -5492,14 +4817,30 @@ static inline uint32_t A6XX_VFD_CONTROL_5_REGID_GSHEADER(uint32_t val)
{
return ((val) << A6XX_VFD_CONTROL_5_REGID_GSHEADER__SHIFT) & A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK;
}
+#define A6XX_VFD_CONTROL_5_UNK8__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_5_UNK8__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_5_UNK8(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_5_UNK8__SHIFT) & A6XX_VFD_CONTROL_5_UNK8__MASK;
+}
#define REG_A6XX_VFD_CONTROL_6 0x0000a006
#define A6XX_VFD_CONTROL_6_PRIMID_PASSTHRU 0x00000001
#define REG_A6XX_VFD_MODE_CNTL 0x0000a007
#define A6XX_VFD_MODE_CNTL_BINNING_PASS 0x00000001
+#define A6XX_VFD_MODE_CNTL_UNK1 0x00000002
+#define A6XX_VFD_MODE_CNTL_UNK2 0x00000004
-#define REG_A6XX_VFD_UNKNOWN_A008 0x0000a008
+#define REG_A6XX_VFD_MULTIVIEW_CNTL 0x0000a008
+#define A6XX_VFD_MULTIVIEW_CNTL_ENABLE 0x00000001
+#define A6XX_VFD_MULTIVIEW_CNTL_DISABLEMULTIPOS 0x00000002
+#define A6XX_VFD_MULTIVIEW_CNTL_VIEWS__MASK 0x0000007c
+#define A6XX_VFD_MULTIVIEW_CNTL_VIEWS__SHIFT 2
+static inline uint32_t A6XX_VFD_MULTIVIEW_CNTL_VIEWS(uint32_t val)
+{
+ return ((val) << A6XX_VFD_MULTIVIEW_CNTL_VIEWS__SHIFT) & A6XX_VFD_MULTIVIEW_CNTL_VIEWS__MASK;
+}
#define REG_A6XX_VFD_ADD_OFFSET 0x0000a009
#define A6XX_VFD_ADD_OFFSET_VERTEX 0x00000001
@@ -5512,10 +4853,12 @@ static inline uint32_t A6XX_VFD_CONTROL_5_REGID_GSHEADER(uint32_t val)
static inline uint32_t REG_A6XX_VFD_FETCH(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
static inline uint32_t REG_A6XX_VFD_FETCH_BASE(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
-
-static inline uint32_t REG_A6XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
-
-static inline uint32_t REG_A6XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000a011 + 0x4*i0; }
+#define A6XX_VFD_FETCH_BASE__MASK 0xffffffff
+#define A6XX_VFD_FETCH_BASE__SHIFT 0
+static inline uint32_t A6XX_VFD_FETCH_BASE(uint32_t val)
+{
+ return ((val) << A6XX_VFD_FETCH_BASE__SHIFT) & A6XX_VFD_FETCH_BASE__MASK;
+}
static inline uint32_t REG_A6XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000a012 + 0x4*i0; }
@@ -5572,7 +4915,19 @@ static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
#define REG_A6XX_SP_UNKNOWN_A0F8 0x0000a0f8
+#define REG_A6XX_VFD_ADDR_MODE_CNTL 0x0000a601
+
+static inline uint32_t REG_A6XX_VFD_PERFCTR_VFD_SEL(uint32_t i0) { return 0x0000a610 + 0x1*i0; }
+
#define REG_A6XX_SP_VS_CTRL_REG0 0x0000a800
+#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x00100000
+#define A6XX_SP_VS_CTRL_REG0_UNK21 0x00200000
+#define A6XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
+}
#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
static inline uint32_t A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
@@ -5585,22 +4940,13 @@ static inline uint32_t A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
return ((val) << A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
}
+#define A6XX_SP_VS_CTRL_REG0_UNK13 0x00002000
#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 14
static inline uint32_t A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
{
return ((val) << A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
}
-#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A6XX_SP_VS_CTRL_REG0_VARYING 0x00400000
-#define A6XX_SP_VS_CTRL_REG0_DIFF_FINE 0x00800000
-#define A6XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x04000000
-#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x80000000
#define REG_A6XX_SP_VS_BRANCH_COND 0x0000a801
@@ -5611,6 +4957,12 @@ static inline uint32_t A6XX_SP_VS_PRIMITIVE_CNTL_OUT(uint32_t val)
{
return ((val) << A6XX_SP_VS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_VS_PRIMITIVE_CNTL_OUT__MASK;
}
+#define A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0
+#define A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6
+static inline uint32_t A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
+}
static inline uint32_t REG_A6XX_SP_VS_OUT(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
@@ -5668,11 +5020,46 @@ static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
}
-#define REG_A6XX_SP_UNKNOWN_A81B 0x0000a81b
+#define REG_A6XX_SP_VS_OBJ_FIRST_EXEC_OFFSET 0x0000a81b
-#define REG_A6XX_SP_VS_OBJ_START_LO 0x0000a81c
+#define REG_A6XX_SP_VS_OBJ_START 0x0000a81c
+#define A6XX_SP_VS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_VS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_VS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OBJ_START__SHIFT) & A6XX_SP_VS_OBJ_START__MASK;
+}
-#define REG_A6XX_SP_VS_OBJ_START_HI 0x0000a81d
+#define REG_A6XX_SP_VS_PVT_MEM_PARAM 0x0000a81e
+#define A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_VS_PVT_MEM_ADDR 0x0000a81f
+#define A6XX_SP_VS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_VS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_VS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_VS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_VS_PVT_MEM_SIZE 0x0000a821
+#define A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_VS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
#define REG_A6XX_SP_VS_TEX_COUNT 0x0000a822
@@ -5694,7 +5081,7 @@ static inline uint32_t A6XX_SP_VS_CONFIG_NSAMP(uint32_t val)
{
return ((val) << A6XX_SP_VS_CONFIG_NSAMP__SHIFT) & A6XX_SP_VS_CONFIG_NSAMP__MASK;
}
-#define A6XX_SP_VS_CONFIG_NIBO__MASK 0x3fc00000
+#define A6XX_SP_VS_CONFIG_NIBO__MASK 0x1fc00000
#define A6XX_SP_VS_CONFIG_NIBO__SHIFT 22
static inline uint32_t A6XX_SP_VS_CONFIG_NIBO(uint32_t val)
{
@@ -5703,7 +5090,22 @@ static inline uint32_t A6XX_SP_VS_CONFIG_NIBO(uint32_t val)
#define REG_A6XX_SP_VS_INSTRLEN 0x0000a824
+#define REG_A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET 0x0000a825
+#define A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET__SHIFT) & A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET__MASK;
+}
+
#define REG_A6XX_SP_HS_CTRL_REG0 0x0000a830
+#define A6XX_SP_HS_CTRL_REG0_UNK20 0x00100000
+#define A6XX_SP_HS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_HS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_HS_CTRL_REG0_THREADMODE__MASK;
+}
#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
static inline uint32_t A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
@@ -5716,30 +5118,58 @@ static inline uint32_t A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
return ((val) << A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
}
+#define A6XX_SP_HS_CTRL_REG0_UNK13 0x00002000
#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 14
static inline uint32_t A6XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
{
return ((val) << A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
}
-#define A6XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A6XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+
+#define REG_A6XX_SP_HS_WAVE_INPUT_SIZE 0x0000a831
+
+#define REG_A6XX_SP_HS_BRANCH_COND 0x0000a832
+
+#define REG_A6XX_SP_HS_OBJ_FIRST_EXEC_OFFSET 0x0000a833
+
+#define REG_A6XX_SP_HS_OBJ_START 0x0000a834
+#define A6XX_SP_HS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_HS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_HS_OBJ_START(uint32_t val)
{
- return ((val) << A6XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_HS_CTRL_REG0_THREADSIZE__MASK;
+ return ((val) << A6XX_SP_HS_OBJ_START__SHIFT) & A6XX_SP_HS_OBJ_START__MASK;
}
-#define A6XX_SP_HS_CTRL_REG0_VARYING 0x00400000
-#define A6XX_SP_HS_CTRL_REG0_DIFF_FINE 0x00800000
-#define A6XX_SP_HS_CTRL_REG0_PIXLODENABLE 0x04000000
-#define A6XX_SP_HS_CTRL_REG0_MERGEDREGS 0x80000000
-#define REG_A6XX_SP_HS_UNKNOWN_A831 0x0000a831
-
-#define REG_A6XX_SP_HS_UNKNOWN_A833 0x0000a833
+#define REG_A6XX_SP_HS_PVT_MEM_PARAM 0x0000a836
+#define A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
-#define REG_A6XX_SP_HS_OBJ_START_LO 0x0000a834
+#define REG_A6XX_SP_HS_PVT_MEM_ADDR 0x0000a837
+#define A6XX_SP_HS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_HS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_HS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_HS_PVT_MEM_ADDR__MASK;
+}
-#define REG_A6XX_SP_HS_OBJ_START_HI 0x0000a835
+#define REG_A6XX_SP_HS_PVT_MEM_SIZE 0x0000a839
+#define A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_HS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
#define REG_A6XX_SP_HS_TEX_COUNT 0x0000a83a
@@ -5761,7 +5191,7 @@ static inline uint32_t A6XX_SP_HS_CONFIG_NSAMP(uint32_t val)
{
return ((val) << A6XX_SP_HS_CONFIG_NSAMP__SHIFT) & A6XX_SP_HS_CONFIG_NSAMP__MASK;
}
-#define A6XX_SP_HS_CONFIG_NIBO__MASK 0x3fc00000
+#define A6XX_SP_HS_CONFIG_NIBO__MASK 0x1fc00000
#define A6XX_SP_HS_CONFIG_NIBO__SHIFT 22
static inline uint32_t A6XX_SP_HS_CONFIG_NIBO(uint32_t val)
{
@@ -5770,7 +5200,22 @@ static inline uint32_t A6XX_SP_HS_CONFIG_NIBO(uint32_t val)
#define REG_A6XX_SP_HS_INSTRLEN 0x0000a83c
+#define REG_A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET 0x0000a83d
+#define A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET__SHIFT) & A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET__MASK;
+}
+
#define REG_A6XX_SP_DS_CTRL_REG0 0x0000a840
+#define A6XX_SP_DS_CTRL_REG0_MERGEDREGS 0x00100000
+#define A6XX_SP_DS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_DS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_DS_CTRL_REG0_THREADMODE__MASK;
+}
#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
static inline uint32_t A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
@@ -5783,22 +5228,15 @@ static inline uint32_t A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
return ((val) << A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
}
+#define A6XX_SP_DS_CTRL_REG0_UNK13 0x00002000
#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 14
static inline uint32_t A6XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
{
return ((val) << A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
}
-#define A6XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A6XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A6XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_DS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A6XX_SP_DS_CTRL_REG0_VARYING 0x00400000
-#define A6XX_SP_DS_CTRL_REG0_DIFF_FINE 0x00800000
-#define A6XX_SP_DS_CTRL_REG0_PIXLODENABLE 0x04000000
-#define A6XX_SP_DS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_DS_BRANCH_COND 0x0000a841
#define REG_A6XX_SP_DS_PRIMITIVE_CNTL 0x0000a842
#define A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f
@@ -5807,6 +5245,12 @@ static inline uint32_t A6XX_SP_DS_PRIMITIVE_CNTL_OUT(uint32_t val)
{
return ((val) << A6XX_SP_DS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK;
}
+#define A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0
+#define A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6
+static inline uint32_t A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
+}
static inline uint32_t REG_A6XX_SP_DS_OUT(uint32_t i0) { return 0x0000a843 + 0x1*i0; }
@@ -5864,11 +5308,46 @@ static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK;
}
-#define REG_A6XX_SP_DS_UNKNOWN_A85B 0x0000a85b
+#define REG_A6XX_SP_DS_OBJ_FIRST_EXEC_OFFSET 0x0000a85b
-#define REG_A6XX_SP_DS_OBJ_START_LO 0x0000a85c
+#define REG_A6XX_SP_DS_OBJ_START 0x0000a85c
+#define A6XX_SP_DS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_DS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_DS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_OBJ_START__SHIFT) & A6XX_SP_DS_OBJ_START__MASK;
+}
-#define REG_A6XX_SP_DS_OBJ_START_HI 0x0000a85d
+#define REG_A6XX_SP_DS_PVT_MEM_PARAM 0x0000a85e
+#define A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_DS_PVT_MEM_ADDR 0x0000a85f
+#define A6XX_SP_DS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_DS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_DS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_DS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_DS_PVT_MEM_SIZE 0x0000a861
+#define A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_DS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
#define REG_A6XX_SP_DS_TEX_COUNT 0x0000a862
@@ -5890,7 +5369,7 @@ static inline uint32_t A6XX_SP_DS_CONFIG_NSAMP(uint32_t val)
{
return ((val) << A6XX_SP_DS_CONFIG_NSAMP__SHIFT) & A6XX_SP_DS_CONFIG_NSAMP__MASK;
}
-#define A6XX_SP_DS_CONFIG_NIBO__MASK 0x3fc00000
+#define A6XX_SP_DS_CONFIG_NIBO__MASK 0x1fc00000
#define A6XX_SP_DS_CONFIG_NIBO__SHIFT 22
static inline uint32_t A6XX_SP_DS_CONFIG_NIBO(uint32_t val)
{
@@ -5899,7 +5378,22 @@ static inline uint32_t A6XX_SP_DS_CONFIG_NIBO(uint32_t val)
#define REG_A6XX_SP_DS_INSTRLEN 0x0000a864
+#define REG_A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET 0x0000a865
+#define A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET__SHIFT) & A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET__MASK;
+}
+
#define REG_A6XX_SP_GS_CTRL_REG0 0x0000a870
+#define A6XX_SP_GS_CTRL_REG0_UNK20 0x00100000
+#define A6XX_SP_GS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_GS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_GS_CTRL_REG0_THREADMODE__MASK;
+}
#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
static inline uint32_t A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
@@ -5912,22 +5406,13 @@ static inline uint32_t A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
return ((val) << A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
}
+#define A6XX_SP_GS_CTRL_REG0_UNK13 0x00002000
#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 14
static inline uint32_t A6XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
{
return ((val) << A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
}
-#define A6XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A6XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A6XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_GS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A6XX_SP_GS_CTRL_REG0_VARYING 0x00400000
-#define A6XX_SP_GS_CTRL_REG0_DIFF_FINE 0x00800000
-#define A6XX_SP_GS_CTRL_REG0_PIXLODENABLE 0x04000000
-#define A6XX_SP_GS_CTRL_REG0_MERGEDREGS 0x80000000
#define REG_A6XX_SP_GS_PRIM_SIZE 0x0000a871
@@ -6003,9 +5488,46 @@ static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK;
}
-#define REG_A6XX_SP_GS_OBJ_START_LO 0x0000a88d
+#define REG_A6XX_SP_GS_OBJ_FIRST_EXEC_OFFSET 0x0000a88c
+
+#define REG_A6XX_SP_GS_OBJ_START 0x0000a88d
+#define A6XX_SP_GS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_GS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_GS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_OBJ_START__SHIFT) & A6XX_SP_GS_OBJ_START__MASK;
+}
+
+#define REG_A6XX_SP_GS_PVT_MEM_PARAM 0x0000a88f
+#define A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_GS_PVT_MEM_ADDR 0x0000a890
+#define A6XX_SP_GS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_GS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_GS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_GS_PVT_MEM_ADDR__MASK;
+}
-#define REG_A6XX_SP_GS_OBJ_START_HI 0x0000a88e
+#define REG_A6XX_SP_GS_PVT_MEM_SIZE 0x0000a892
+#define A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_GS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
#define REG_A6XX_SP_GS_TEX_COUNT 0x0000a893
@@ -6027,7 +5549,7 @@ static inline uint32_t A6XX_SP_GS_CONFIG_NSAMP(uint32_t val)
{
return ((val) << A6XX_SP_GS_CONFIG_NSAMP__SHIFT) & A6XX_SP_GS_CONFIG_NSAMP__MASK;
}
-#define A6XX_SP_GS_CONFIG_NIBO__MASK 0x3fc00000
+#define A6XX_SP_GS_CONFIG_NIBO__MASK 0x1fc00000
#define A6XX_SP_GS_CONFIG_NIBO__SHIFT 22
static inline uint32_t A6XX_SP_GS_CONFIG_NIBO(uint32_t val)
{
@@ -6036,39 +5558,104 @@ static inline uint32_t A6XX_SP_GS_CONFIG_NIBO(uint32_t val)
#define REG_A6XX_SP_GS_INSTRLEN 0x0000a895
-#define REG_A6XX_SP_VS_TEX_SAMP_LO 0x0000a8a0
-
-#define REG_A6XX_SP_VS_TEX_SAMP_HI 0x0000a8a1
-
-#define REG_A6XX_SP_HS_TEX_SAMP_LO 0x0000a8a2
-
-#define REG_A6XX_SP_HS_TEX_SAMP_HI 0x0000a8a3
-
-#define REG_A6XX_SP_DS_TEX_SAMP_LO 0x0000a8a4
-
-#define REG_A6XX_SP_DS_TEX_SAMP_HI 0x0000a8a5
-
-#define REG_A6XX_SP_GS_TEX_SAMP_LO 0x0000a8a6
-
-#define REG_A6XX_SP_GS_TEX_SAMP_HI 0x0000a8a7
+#define REG_A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET 0x0000a896
+#define A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET__SHIFT) & A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET__MASK;
+}
-#define REG_A6XX_SP_VS_TEX_CONST_LO 0x0000a8a8
+#define REG_A6XX_SP_VS_TEX_SAMP 0x0000a8a0
+#define A6XX_SP_VS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_VS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_VS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_TEX_SAMP__SHIFT) & A6XX_SP_VS_TEX_SAMP__MASK;
+}
-#define REG_A6XX_SP_VS_TEX_CONST_HI 0x0000a8a9
+#define REG_A6XX_SP_HS_TEX_SAMP 0x0000a8a2
+#define A6XX_SP_HS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_HS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_HS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_TEX_SAMP__SHIFT) & A6XX_SP_HS_TEX_SAMP__MASK;
+}
-#define REG_A6XX_SP_HS_TEX_CONST_LO 0x0000a8aa
+#define REG_A6XX_SP_DS_TEX_SAMP 0x0000a8a4
+#define A6XX_SP_DS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_DS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_DS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_TEX_SAMP__SHIFT) & A6XX_SP_DS_TEX_SAMP__MASK;
+}
-#define REG_A6XX_SP_HS_TEX_CONST_HI 0x0000a8ab
+#define REG_A6XX_SP_GS_TEX_SAMP 0x0000a8a6
+#define A6XX_SP_GS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_GS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_GS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_TEX_SAMP__SHIFT) & A6XX_SP_GS_TEX_SAMP__MASK;
+}
-#define REG_A6XX_SP_DS_TEX_CONST_LO 0x0000a8ac
+#define REG_A6XX_SP_VS_TEX_CONST 0x0000a8a8
+#define A6XX_SP_VS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_VS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_VS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_TEX_CONST__SHIFT) & A6XX_SP_VS_TEX_CONST__MASK;
+}
-#define REG_A6XX_SP_DS_TEX_CONST_HI 0x0000a8ad
+#define REG_A6XX_SP_HS_TEX_CONST 0x0000a8aa
+#define A6XX_SP_HS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_HS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_HS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_TEX_CONST__SHIFT) & A6XX_SP_HS_TEX_CONST__MASK;
+}
-#define REG_A6XX_SP_GS_TEX_CONST_LO 0x0000a8ae
+#define REG_A6XX_SP_DS_TEX_CONST 0x0000a8ac
+#define A6XX_SP_DS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_DS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_DS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_TEX_CONST__SHIFT) & A6XX_SP_DS_TEX_CONST__MASK;
+}
-#define REG_A6XX_SP_GS_TEX_CONST_HI 0x0000a8af
+#define REG_A6XX_SP_GS_TEX_CONST 0x0000a8ae
+#define A6XX_SP_GS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_GS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_GS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_TEX_CONST__SHIFT) & A6XX_SP_GS_TEX_CONST__MASK;
+}
#define REG_A6XX_SP_FS_CTRL_REG0 0x0000a980
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_UNK21 0x00200000
+#define A6XX_SP_FS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_FS_CTRL_REG0_DIFF_FINE 0x00800000
+#define A6XX_SP_FS_CTRL_REG0_UNK24 0x01000000
+#define A6XX_SP_FS_CTRL_REG0_UNK25 0x02000000
+#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_FS_CTRL_REG0_UNK27__MASK 0x18000000
+#define A6XX_SP_FS_CTRL_REG0_UNK27__SHIFT 27
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_UNK27(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_UNK27__SHIFT) & A6XX_SP_FS_CTRL_REG0_UNK27__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000
+#define A6XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
+}
#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
static inline uint32_t A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
@@ -6081,33 +5668,64 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
return ((val) << A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
}
+#define A6XX_SP_FS_CTRL_REG0_UNK13 0x00002000
#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 14
static inline uint32_t A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
{
return ((val) << A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK;
}
-#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A6XX_SP_FS_CTRL_REG0_VARYING 0x00400000
-#define A6XX_SP_FS_CTRL_REG0_DIFF_FINE 0x00800000
-#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000
-#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000
#define REG_A6XX_SP_FS_BRANCH_COND 0x0000a981
-#define REG_A6XX_SP_UNKNOWN_A982 0x0000a982
+#define REG_A6XX_SP_FS_OBJ_FIRST_EXEC_OFFSET 0x0000a982
-#define REG_A6XX_SP_FS_OBJ_START_LO 0x0000a983
+#define REG_A6XX_SP_FS_OBJ_START 0x0000a983
+#define A6XX_SP_FS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_FS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OBJ_START__SHIFT) & A6XX_SP_FS_OBJ_START__MASK;
+}
-#define REG_A6XX_SP_FS_OBJ_START_HI 0x0000a984
+#define REG_A6XX_SP_FS_PVT_MEM_PARAM 0x0000a985
+#define A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_FS_PVT_MEM_ADDR 0x0000a986
+#define A6XX_SP_FS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_FS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_FS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_FS_PVT_MEM_SIZE 0x0000a988
+#define A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_FS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
#define REG_A6XX_SP_BLEND_CNTL 0x0000a989
-#define A6XX_SP_BLEND_CNTL_ENABLED 0x00000001
+#define A6XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A6XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A6XX_SP_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A6XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100
#define A6XX_SP_BLEND_CNTL_DUAL_COLOR_IN_ENABLE 0x00000200
#define A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
@@ -6201,6 +5819,17 @@ static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL1_MRT(uint32_t val)
return ((val) << A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK;
}
+static inline uint32_t REG_A6XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
+#define A6XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
static inline uint32_t REG_A6XX_SP_FS_MRT(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
static inline uint32_t REG_A6XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
@@ -6212,6 +5841,7 @@ static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_format val)
}
#define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
#define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
+#define A6XX_SP_FS_MRT_REG_UNK10 0x00000400
#define REG_A6XX_SP_FS_PREFETCH_CNTL 0x0000a99e
#define A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK 0x00000007
@@ -6227,6 +5857,12 @@ static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_UNK4(uint32_t val)
{
return ((val) << A6XX_SP_FS_PREFETCH_CNTL_UNK4__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_UNK4__MASK;
}
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK12__MASK 0x00007000
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK12__SHIFT 12
+static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_UNK12(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CNTL_UNK12__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_UNK12__MASK;
+}
static inline uint32_t REG_A6XX_SP_FS_PREFETCH(uint32_t i0) { return 0x0000a99f + 0x1*i0; }
@@ -6272,13 +5908,13 @@ static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_CMD(uint32_t val)
static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; }
static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH_CMD(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; }
-#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK 0x000000ff
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK 0x0000ffff
#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT 0
static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID(uint32_t val)
{
return ((val) << A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT) & A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK;
}
-#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK 0x00ff0000
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK 0xffff0000
#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__SHIFT 16
static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(uint32_t val)
{
@@ -6289,50 +5925,31 @@ static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(uint32_t val)
#define REG_A6XX_SP_UNKNOWN_A9A8 0x0000a9a8
-#define REG_A6XX_SP_CS_UNKNOWN_A9B1 0x0000a9b1
-#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE_2K__MASK 0x00000001
-#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE_2K__SHIFT 0
-static inline uint32_t A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE_2K(uint32_t val)
+#define REG_A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET 0x0000a9a9
+#define A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET(uint32_t val)
{
- return ((val) << A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE_2K__SHIFT) & A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE_2K__MASK;
+ return ((val >> 11) << A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET__SHIFT) & A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET__MASK;
}
-#define REG_A6XX_SP_CS_UNKNOWN_A9B3 0x0000a9b3
-
-#define REG_A6XX_SP_CS_TEX_COUNT 0x0000a9ba
-
-#define REG_A6XX_SP_FS_TEX_SAMP_LO 0x0000a9e0
-
-#define REG_A6XX_SP_FS_TEX_SAMP_HI 0x0000a9e1
-
-#define REG_A6XX_SP_CS_TEX_SAMP_LO 0x0000a9e2
-
-#define REG_A6XX_SP_CS_TEX_SAMP_HI 0x0000a9e3
-
-#define REG_A6XX_SP_FS_TEX_CONST_LO 0x0000a9e4
-
-#define REG_A6XX_SP_FS_TEX_CONST_HI 0x0000a9e5
-
-#define REG_A6XX_SP_CS_TEX_CONST_LO 0x0000a9e6
-
-#define REG_A6XX_SP_CS_TEX_CONST_HI 0x0000a9e7
-
-static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
-
-static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
-
-static inline uint32_t REG_A6XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
-
-static inline uint32_t REG_A6XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
-#define A6XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
-#define A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
-static inline uint32_t A6XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+#define REG_A6XX_SP_CS_CTRL_REG0 0x0000a9b0
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val)
{
- return ((val) << A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_REG_REGID__MASK;
+ return ((val) << A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_UNK21 0x00200000
+#define A6XX_SP_CS_CTRL_REG0_UNK22 0x00400000
+#define A6XX_SP_CS_CTRL_REG0_SEPARATEPROLOG 0x00800000
+#define A6XX_SP_CS_CTRL_REG0_MERGEDREGS 0x80000000
+#define A6XX_SP_CS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADMODE__MASK;
}
-#define A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
-
-#define REG_A6XX_SP_CS_CTRL_REG0 0x0000a9b0
#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
static inline uint32_t A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
@@ -6345,26 +5962,68 @@ static inline uint32_t A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
return ((val) << A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
}
+#define A6XX_SP_CS_CTRL_REG0_UNK13 0x00002000
#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 14
static inline uint32_t A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
{
return ((val) << A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
}
-#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+
+#define REG_A6XX_SP_CS_UNKNOWN_A9B1 0x0000a9b1
+#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__MASK 0x0000001f
+#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__SHIFT 0
+static inline uint32_t A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE(uint32_t val)
{
- return ((val) << A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
+ return ((val) << A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__SHIFT) & A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__MASK;
}
-#define A6XX_SP_CS_CTRL_REG0_VARYING 0x00400000
-#define A6XX_SP_CS_CTRL_REG0_DIFF_FINE 0x00800000
-#define A6XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x04000000
-#define A6XX_SP_CS_CTRL_REG0_MERGEDREGS 0x80000000
+#define A6XX_SP_CS_UNKNOWN_A9B1_UNK5 0x00000020
+#define A6XX_SP_CS_UNKNOWN_A9B1_UNK6 0x00000040
-#define REG_A6XX_SP_CS_OBJ_START_LO 0x0000a9b4
+#define REG_A6XX_SP_CS_BRANCH_COND 0x0000a9b2
-#define REG_A6XX_SP_CS_OBJ_START_HI 0x0000a9b5
+#define REG_A6XX_SP_CS_OBJ_FIRST_EXEC_OFFSET 0x0000a9b3
+
+#define REG_A6XX_SP_CS_OBJ_START 0x0000a9b4
+#define A6XX_SP_CS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_CS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_CS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_OBJ_START__SHIFT) & A6XX_SP_CS_OBJ_START__MASK;
+}
+
+#define REG_A6XX_SP_CS_PVT_MEM_PARAM 0x0000a9b6
+#define A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_CS_PVT_MEM_ADDR 0x0000a9b7
+#define A6XX_SP_CS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_CS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_CS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_CS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_CS_PVT_MEM_SIZE 0x0000a9b9
+#define A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_CS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
+
+#define REG_A6XX_SP_CS_TEX_COUNT 0x0000a9ba
#define REG_A6XX_SP_CS_CONFIG 0x0000a9bb
#define A6XX_SP_CS_CONFIG_BINDLESS_TEX 0x00000001
@@ -6384,7 +6043,7 @@ static inline uint32_t A6XX_SP_CS_CONFIG_NSAMP(uint32_t val)
{
return ((val) << A6XX_SP_CS_CONFIG_NSAMP__SHIFT) & A6XX_SP_CS_CONFIG_NSAMP__MASK;
}
-#define A6XX_SP_CS_CONFIG_NIBO__MASK 0x3fc00000
+#define A6XX_SP_CS_CONFIG_NIBO__MASK 0x1fc00000
#define A6XX_SP_CS_CONFIG_NIBO__SHIFT 22
static inline uint32_t A6XX_SP_CS_CONFIG_NIBO(uint32_t val)
{
@@ -6393,13 +6052,65 @@ static inline uint32_t A6XX_SP_CS_CONFIG_NIBO(uint32_t val)
#define REG_A6XX_SP_CS_INSTRLEN 0x0000a9bc
-#define REG_A6XX_SP_CS_IBO_LO 0x0000a9f2
+#define REG_A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET 0x0000a9bd
+#define A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET__SHIFT) & A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET__MASK;
+}
-#define REG_A6XX_SP_CS_IBO_HI 0x0000a9f3
+#define REG_A6XX_SP_FS_TEX_SAMP 0x0000a9e0
+#define A6XX_SP_FS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_FS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_FS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_TEX_SAMP__SHIFT) & A6XX_SP_FS_TEX_SAMP__MASK;
+}
+
+#define REG_A6XX_SP_CS_TEX_SAMP 0x0000a9e2
+#define A6XX_SP_CS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_CS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_CS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_TEX_SAMP__SHIFT) & A6XX_SP_CS_TEX_SAMP__MASK;
+}
+
+#define REG_A6XX_SP_FS_TEX_CONST 0x0000a9e4
+#define A6XX_SP_FS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_FS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_FS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_TEX_CONST__SHIFT) & A6XX_SP_FS_TEX_CONST__MASK;
+}
+
+#define REG_A6XX_SP_CS_TEX_CONST 0x0000a9e6
+#define A6XX_SP_CS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_CS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_CS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_TEX_CONST__SHIFT) & A6XX_SP_CS_TEX_CONST__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
+
+#define REG_A6XX_SP_CS_IBO 0x0000a9f2
+#define A6XX_SP_CS_IBO__MASK 0xffffffff
+#define A6XX_SP_CS_IBO__SHIFT 0
+static inline uint32_t A6XX_SP_CS_IBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_IBO__SHIFT) & A6XX_SP_CS_IBO__MASK;
+}
#define REG_A6XX_SP_CS_IBO_COUNT 0x0000aa00
-#define REG_A6XX_SP_UNKNOWN_AB00 0x0000ab00
+#define REG_A6XX_SP_MODE_CONTROL 0x0000ab00
+#define A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE 0x00000001
+#define A6XX_SP_MODE_CONTROL_UNK1 0x00000002
+#define A6XX_SP_MODE_CONTROL_UNK2 0x00000004
+#define A6XX_SP_MODE_CONTROL_SHARED_CONSTS_ENABLE 0x00000008
#define REG_A6XX_SP_FS_CONFIG 0x0000ab04
#define A6XX_SP_FS_CONFIG_BINDLESS_TEX 0x00000001
@@ -6419,7 +6130,7 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
{
return ((val) << A6XX_SP_FS_CONFIG_NSAMP__SHIFT) & A6XX_SP_FS_CONFIG_NSAMP__MASK;
}
-#define A6XX_SP_FS_CONFIG_NIBO__MASK 0x3fc00000
+#define A6XX_SP_FS_CONFIG_NIBO__MASK 0x1fc00000
#define A6XX_SP_FS_CONFIG_NIBO__SHIFT 22
static inline uint32_t A6XX_SP_FS_CONFIG_NIBO(uint32_t val)
{
@@ -6432,9 +6143,13 @@ static inline uint32_t REG_A6XX_SP_BINDLESS_BASE(uint32_t i0) { return 0x0000ab1
static inline uint32_t REG_A6XX_SP_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000ab10 + 0x2*i0; }
-#define REG_A6XX_SP_IBO_LO 0x0000ab1a
-
-#define REG_A6XX_SP_IBO_HI 0x0000ab1b
+#define REG_A6XX_SP_IBO 0x0000ab1a
+#define A6XX_SP_IBO__MASK 0xffffffff
+#define A6XX_SP_IBO__SHIFT 0
+static inline uint32_t A6XX_SP_IBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_IBO__SHIFT) & A6XX_SP_IBO__MASK;
+}
#define REG_A6XX_SP_IBO_COUNT 0x0000ab20
@@ -6458,18 +6173,41 @@ static inline uint32_t A6XX_SP_2D_DST_FORMAT_MASK(uint32_t val)
#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00
+#define REG_A6XX_SP_ADDR_MODE_CNTL 0x0000ae01
+
+#define REG_A6XX_SP_NC_MODE_CNTL 0x0000ae02
+
#define REG_A6XX_SP_UNKNOWN_AE03 0x0000ae03
-#define REG_A6XX_SP_UNKNOWN_AE04 0x0000ae04
+#define REG_A6XX_SP_FLOAT_CNTL 0x0000ae04
+#define A6XX_SP_FLOAT_CNTL_F16_NO_INF 0x00000008
+
+#define REG_A6XX_SP_PERFCTR_ENABLE 0x0000ae0f
+#define A6XX_SP_PERFCTR_ENABLE_VS 0x00000001
+#define A6XX_SP_PERFCTR_ENABLE_HS 0x00000002
+#define A6XX_SP_PERFCTR_ENABLE_DS 0x00000004
+#define A6XX_SP_PERFCTR_ENABLE_GS 0x00000008
+#define A6XX_SP_PERFCTR_ENABLE_FS 0x00000010
+#define A6XX_SP_PERFCTR_ENABLE_CS 0x00000020
-#define REG_A6XX_SP_UNKNOWN_AE0F 0x0000ae0f
+static inline uint32_t REG_A6XX_SP_PERFCTR_SP_SEL(uint32_t i0) { return 0x0000ae10 + 0x1*i0; }
#define REG_A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR 0x0000b180
+#define A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__MASK 0xffffffff
+#define A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__SHIFT) & A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__MASK;
+}
#define REG_A6XX_SP_UNKNOWN_B182 0x0000b182
#define REG_A6XX_SP_UNKNOWN_B183 0x0000b183
+#define REG_A6XX_SP_UNKNOWN_B190 0x0000b190
+
+#define REG_A6XX_SP_UNKNOWN_B191 0x0000b191
+
#define REG_A6XX_SP_TP_RAS_MSAA_CNTL 0x0000b300
#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
@@ -6477,6 +6215,12 @@ static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples v
{
return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK;
}
+#define A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__MASK 0x0000000c
+#define A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__SHIFT 2
+static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_UNK2(uint32_t val)
+{
+ return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__MASK;
+}
#define REG_A6XX_SP_TP_DEST_MSAA_CNTL 0x0000b301
#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
@@ -6488,10 +6232,12 @@ static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples
#define A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR 0x0000b302
-
-#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO 0x0000b302
-
-#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_HI 0x0000b303
+#define A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__MASK 0xffffffff
+#define A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__SHIFT) & A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__MASK;
+}
#define REG_A6XX_SP_TP_SAMPLE_CONFIG 0x0000b304
#define A6XX_SP_TP_SAMPLE_CONFIG_UNK0 0x00000001
@@ -6597,6 +6343,20 @@ static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
}
+#define REG_A6XX_SP_TP_WINDOW_OFFSET 0x0000b307
+#define A6XX_SP_TP_WINDOW_OFFSET_X__MASK 0x00003fff
+#define A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_SP_TP_WINDOW_OFFSET_Y__MASK 0x3fff0000
+#define A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_Y__MASK;
+}
+
#define REG_A6XX_SP_TP_UNKNOWN_B309 0x0000b309
#define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0
@@ -6627,9 +6387,19 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_SAMPLES(enum a3xx_msaa_samples val
return ((val) << A6XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK;
}
#define A6XX_SP_PS_2D_SRC_INFO_FILTER 0x00010000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK17 0x00020000
#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES_AVERAGE 0x00040000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK19 0x00080000
#define A6XX_SP_PS_2D_SRC_INFO_UNK20 0x00100000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK21 0x00200000
#define A6XX_SP_PS_2D_SRC_INFO_UNK22 0x00400000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK23__MASK 0x07800000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT 23
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_UNK23(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_UNK23__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_UNK28 0x10000000
#define REG_A6XX_SP_PS_2D_SRC_SIZE 0x0000b4c1
#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK 0x00007fff
@@ -6645,43 +6415,131 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
return ((val) << A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK;
}
-#define REG_A6XX_SP_PS_2D_SRC_LO 0x0000b4c2
-
-#define REG_A6XX_SP_PS_2D_SRC_HI 0x0000b4c3
-
#define REG_A6XX_SP_PS_2D_SRC 0x0000b4c2
+#define A6XX_SP_PS_2D_SRC__MASK 0xffffffff
+#define A6XX_SP_PS_2D_SRC__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC__SHIFT) & A6XX_SP_PS_2D_SRC__MASK;
+}
#define REG_A6XX_SP_PS_2D_SRC_PITCH 0x0000b4c4
-#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x01fffe00
+#define A6XX_SP_PS_2D_SRC_PITCH_UNK0__MASK 0x000001ff
+#define A6XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_UNK0__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x00fffe00
#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9
static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
{
return ((val >> 6) << A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK;
}
-#define REG_A6XX_SP_PS_2D_SRC_FLAGS_LO 0x0000b4ca
+#define REG_A6XX_SP_PS_2D_SRC_PLANE1 0x0000b4c5
+#define A6XX_SP_PS_2D_SRC_PLANE1__MASK 0xffffffff
+#define A6XX_SP_PS_2D_SRC_PLANE1__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE1(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_PLANE1__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE1__MASK;
+}
-#define REG_A6XX_SP_PS_2D_SRC_FLAGS_HI 0x0000b4cb
+#define REG_A6XX_SP_PS_2D_SRC_PLANE_PITCH 0x0000b4c7
+#define A6XX_SP_PS_2D_SRC_PLANE_PITCH__MASK 0x00000fff
+#define A6XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE_PITCH__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_PLANE2 0x0000b4c8
+#define A6XX_SP_PS_2D_SRC_PLANE2__MASK 0xffffffff
+#define A6XX_SP_PS_2D_SRC_PLANE2__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE2(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_PLANE2__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE2__MASK;
+}
#define REG_A6XX_SP_PS_2D_SRC_FLAGS 0x0000b4ca
+#define A6XX_SP_PS_2D_SRC_FLAGS__MASK 0xffffffff
+#define A6XX_SP_PS_2D_SRC_FLAGS__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_FLAGS__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS__MASK;
+}
#define REG_A6XX_SP_PS_2D_SRC_FLAGS_PITCH 0x0000b4cc
-#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH_PITCH__MASK 0x000007ff
-#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH_PITCH__SHIFT 0
-static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS_PITCH_PITCH(uint32_t val)
+#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK 0x000000ff
+#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK;
+}
+
+#define REG_A6XX_SP_PS_UNKNOWN_B4CD 0x0000b4cd
+
+#define REG_A6XX_SP_PS_UNKNOWN_B4CE 0x0000b4ce
+
+#define REG_A6XX_SP_PS_UNKNOWN_B4CF 0x0000b4cf
+
+#define REG_A6XX_SP_PS_UNKNOWN_B4D0 0x0000b4d0
+
+#define REG_A6XX_SP_WINDOW_OFFSET 0x0000b4d1
+#define A6XX_SP_WINDOW_OFFSET_X__MASK 0x00003fff
+#define A6XX_SP_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_SP_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_SP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_SP_WINDOW_OFFSET_Y__MASK 0x3fff0000
+#define A6XX_SP_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_SP_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_SP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_TPL1_UNKNOWN_B600 0x0000b600
+
+#define REG_A6XX_TPL1_ADDR_MODE_CNTL 0x0000b601
+
+#define REG_A6XX_TPL1_UNKNOWN_B602 0x0000b602
+
+#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604
+#define A6XX_TPL1_NC_MODE_CNTL_MODE 0x00000001
+#define A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__MASK 0x00000006
+#define A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__SHIFT 1
+static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT(uint32_t val)
+{
+ return ((val) << A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__MASK;
+}
+#define A6XX_TPL1_NC_MODE_CNTL_MIN_ACCESS_LENGTH 0x00000008
+#define A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__MASK 0x00000010
+#define A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__SHIFT 4
+static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT(uint32_t val)
{
- return ((val >> 6) << A6XX_SP_PS_2D_SRC_FLAGS_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS_PITCH_PITCH__MASK;
+ return ((val) << A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__MASK;
}
-#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH_ARRAY_PITCH__MASK 0x003ff800
-#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH_ARRAY_PITCH__SHIFT 11
-static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS_PITCH_ARRAY_PITCH(uint32_t val)
+#define A6XX_TPL1_NC_MODE_CNTL_UNK6__MASK 0x000000c0
+#define A6XX_TPL1_NC_MODE_CNTL_UNK6__SHIFT 6
+static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_UNK6(uint32_t val)
{
- return ((val >> 7) << A6XX_SP_PS_2D_SRC_FLAGS_PITCH_ARRAY_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS_PITCH_ARRAY_PITCH__MASK;
+ return ((val) << A6XX_TPL1_NC_MODE_CNTL_UNK6__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_UNK6__MASK;
}
-#define REG_A6XX_SP_UNKNOWN_B600 0x0000b600
+#define REG_A6XX_TPL1_UNKNOWN_B605 0x0000b605
-#define REG_A6XX_SP_UNKNOWN_B605 0x0000b605
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 0x0000b608
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 0x0000b609
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 0x0000b60a
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 0x0000b60b
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c
+
+static inline uint32_t REG_A6XX_TPL1_PERFCTR_TP_SEL(uint32_t i0) { return 0x0000b610 + 0x1*i0; }
#define REG_A6XX_HLSQ_VS_CNTL 0x0000b800
#define A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK 0x000000ff
@@ -6722,10 +6580,31 @@ static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val)
#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_CMD 0x0000b820
#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR 0x0000b821
+#define A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__MASK 0xffffffff
+#define A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__SHIFT 0
+static inline uint32_t A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__SHIFT) & A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__MASK;
+}
#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_DATA 0x0000b823
-#define REG_A6XX_HLSQ_UNKNOWN_B980 0x0000b980
+#define REG_A6XX_HLSQ_FS_CNTL_0 0x0000b980
+#define A6XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK 0x00000001
+#define A6XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT 0
+static inline uint32_t A6XX_HLSQ_FS_CNTL_0_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A6XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT) & A6XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK;
+}
+#define A6XX_HLSQ_FS_CNTL_0_VARYINGS 0x00000002
+#define A6XX_HLSQ_FS_CNTL_0_UNK2__MASK 0x00000ffc
+#define A6XX_HLSQ_FS_CNTL_0_UNK2__SHIFT 2
+static inline uint32_t A6XX_HLSQ_FS_CNTL_0_UNK2(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_FS_CNTL_0_UNK2__SHIFT) & A6XX_HLSQ_FS_CNTL_0_UNK2__MASK;
+}
+
+#define REG_A6XX_HLSQ_UNKNOWN_B981 0x0000b981
#define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982
@@ -6808,6 +6687,18 @@ static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
}
#define REG_A6XX_HLSQ_CONTROL_5_REG 0x0000b986
+#define A6XX_HLSQ_CONTROL_5_REG_UNK0__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_5_REG_UNK0__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_5_REG_UNK0__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_UNK0__MASK;
+}
+#define A6XX_HLSQ_CONTROL_5_REG_UNK8__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_5_REG_UNK8__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_UNK8(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_5_REG_UNK8__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_UNK8__MASK;
+}
#define REG_A6XX_HLSQ_CS_CNTL 0x0000b987
#define A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK 0x000000ff
@@ -6899,17 +6790,17 @@ static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val)
{
return ((val) << A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK;
}
-#define A6XX_HLSQ_CS_CNTL_0_UNK0__MASK 0x0000ff00
-#define A6XX_HLSQ_CS_CNTL_0_UNK0__SHIFT 8
-static inline uint32_t A6XX_HLSQ_CS_CNTL_0_UNK0(uint32_t val)
+#define A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__MASK 0x0000ff00
+#define A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID(uint32_t val)
{
- return ((val) << A6XX_HLSQ_CS_CNTL_0_UNK0__SHIFT) & A6XX_HLSQ_CS_CNTL_0_UNK0__MASK;
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__MASK;
}
-#define A6XX_HLSQ_CS_CNTL_0_UNK1__MASK 0x00ff0000
-#define A6XX_HLSQ_CS_CNTL_0_UNK1__SHIFT 16
-static inline uint32_t A6XX_HLSQ_CS_CNTL_0_UNK1(uint32_t val)
+#define A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__MASK 0x00ff0000
+#define A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID(uint32_t val)
{
- return ((val) << A6XX_HLSQ_CS_CNTL_0_UNK1__SHIFT) & A6XX_HLSQ_CS_CNTL_0_UNK1__MASK;
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__MASK;
}
#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000
#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24
@@ -6918,7 +6809,21 @@ static inline uint32_t A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
return ((val) << A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK;
}
-#define REG_A6XX_HLSQ_CS_UNKNOWN_B998 0x0000b998
+#define REG_A6XX_HLSQ_CS_CNTL_1 0x0000b998
+#define A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_1_SINGLE_SP_CORE 0x00000100
+#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK 0x00000200
+#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT 9
+static inline uint32_t A6XX_HLSQ_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT) & A6XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE_SCALAR 0x00000400
#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_X 0x0000b999
@@ -6929,6 +6834,12 @@ static inline uint32_t A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_CMD 0x0000b9a0
#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR 0x0000b9a1
+#define A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__MASK 0xffffffff
+#define A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__SHIFT 0
+static inline uint32_t A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__SHIFT) & A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__MASK;
+}
#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_DATA 0x0000b9a3
@@ -7026,6 +6937,12 @@ static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_EVENT(enum vgt_event_type val)
#define REG_A6XX_HLSQ_UNKNOWN_BE04 0x0000be04
+#define REG_A6XX_HLSQ_ADDR_MODE_CNTL 0x0000be05
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE08 0x0000be08
+
+static inline uint32_t REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL(uint32_t i0) { return 0x0000be10 + 0x1*i0; }
+
#define REG_A6XX_CP_EVENT_START 0x0000d600
#define A6XX_CP_EVENT_START_STATE_ID__MASK 0x000000ff
#define A6XX_CP_EVENT_START_STATE_ID__SHIFT 0
@@ -7135,11 +7052,11 @@ static inline uint32_t A6XX_TEX_SAMP_2_REDUCTION_MODE(enum a6xx_reduction_mode v
return ((val) << A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT) & A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK;
}
#define A6XX_TEX_SAMP_2_CHROMA_LINEAR 0x00000020
-#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK 0xffffff80
-#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT 7
-static inline uint32_t A6XX_TEX_SAMP_2_BCOLOR_OFFSET(uint32_t val)
+#define A6XX_TEX_SAMP_2_BCOLOR__MASK 0xffffff80
+#define A6XX_TEX_SAMP_2_BCOLOR__SHIFT 7
+static inline uint32_t A6XX_TEX_SAMP_2_BCOLOR(uint32_t val)
{
- return ((val >> 7) << A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK;
+ return ((val) << A6XX_TEX_SAMP_2_BCOLOR__SHIFT) & A6XX_TEX_SAMP_2_BCOLOR__MASK;
}
#define REG_A6XX_TEX_SAMP_3 0x00000003
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 3d55e153fa9c..b349692219b7 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -512,19 +512,26 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct platform_device *pdev = to_platform_device(gmu->dev);
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
- void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
+ void __iomem *seqptr;
uint32_t pdc_address_offset;
+ bool pdc_in_aop = false;
- if (!pdcptr || !seqptr)
+ if (!pdcptr)
goto err;
- if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu))
+ if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
+ pdc_in_aop = true;
+ else if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu))
pdc_address_offset = 0x30090;
- else if (adreno_is_a650(adreno_gpu))
- pdc_address_offset = 0x300a0;
else
pdc_address_offset = 0x30080;
+ if (!pdc_in_aop) {
+ seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
+ if (!seqptr)
+ goto err;
+ }
+
/* Disable SDE clock gating */
gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
@@ -542,7 +549,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
/* Load RSC sequencer uCode for sleep and wakeup */
- if (adreno_is_a650(adreno_gpu)) {
+ if (adreno_is_a650_family(adreno_gpu)) {
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
@@ -556,6 +563,9 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
}
+ if (pdc_in_aop)
+ goto setup_pdc;
+
/* Load PDC sequencer uCode for power up and power down sequence */
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
@@ -587,7 +597,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
- if (adreno_is_a618(adreno_gpu) || adreno_is_a650(adreno_gpu))
+ if (adreno_is_a618(adreno_gpu) || adreno_is_a650_family(adreno_gpu))
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
else
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
@@ -596,6 +606,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
/* Setup GPU PDC */
+setup_pdc:
pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
@@ -687,7 +698,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
u32 itcm_base = 0x00000000;
u32 dtcm_base = 0x00040000;
- if (adreno_is_a650(adreno_gpu))
+ if (adreno_is_a650_family(adreno_gpu))
dtcm_base = 0x10004000;
if (gmu->legacy) {
@@ -740,8 +751,10 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
int ret;
u32 chipid;
- if (adreno_is_a650(adreno_gpu))
+ if (adreno_is_a650_family(adreno_gpu)) {
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
+ }
if (state == GMU_WARM_BOOT) {
ret = a6xx_rpmh_start(gmu);
@@ -1346,7 +1359,7 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
* The GMU handles its own frequency switching so build a list of
* available frequencies to send during initialization
*/
- ret = dev_pm_opp_of_add_table(gmu->dev);
+ ret = devm_pm_opp_of_add_table(gmu->dev);
if (ret) {
DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
return ret;
@@ -1483,12 +1496,28 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
goto err_put_device;
+
+ /* A660 now requires handling "prealloc requests" in GMU firmware
+ * For now just hardcode allocations based on the known firmware.
+ * note: there is no indication that these correspond to "dummy" or
+ * "debug" regions, but this "guess" allows reusing these BOs which
+ * are otherwise unused by a660.
+ */
+ gmu->dummy.size = SZ_4K;
+ if (adreno_is_a660(adreno_gpu)) {
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000);
+ if (ret)
+ goto err_memory;
+
+ gmu->dummy.size = SZ_8K;
+ }
+
/* Allocate memory for the GMU dummy page */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, SZ_4K, 0x60000000);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, 0x60000000);
if (ret)
goto err_memory;
- if (adreno_is_a650(adreno_gpu)) {
+ if (adreno_is_a650_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
SZ_16M - SZ_16K, 0x04000);
if (ret)
@@ -1530,7 +1559,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto err_memory;
}
- if (adreno_is_a650(adreno_gpu)) {
+ if (adreno_is_a650_family(adreno_gpu)) {
gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
if (IS_ERR(gmu->rscc))
goto err_mmio;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
index 5a43d3090b0c..811589215834 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -8,21 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14386 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 67699 bytes, from 2021-05-31 20:21:57)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84226 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 112551 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 150713 bytes, from 2021-06-10 22:34:02)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 180049 bytes, from 2021-06-02 21:44:19)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-05-27 20:22:36)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-05-27 20:18:13)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -292,6 +292,8 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF 0x000050f0
+#define REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF 0x000050f1
+
#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG 0x00005100
#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP 0x00005101
@@ -439,6 +441,8 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42
+#define REG_A6XX_GPU_CPR_FSM_CTL 0x0000c001
+
#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00000004
#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00000008
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index b4d8e1b01ee4..9c5e4618aa0a 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -149,7 +149,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
- get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_start));
/*
@@ -157,7 +157,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
* GPU registers so we need to add 0x1a800 to the register value on A630
* to get the right value from PM4.
*/
- get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
rbmemptr_stats(ring, index, alwayson_start));
/* Invalidate CCU depth and color */
@@ -185,9 +185,9 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
}
}
- get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_end));
- get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
rbmemptr_stats(ring, index, alwayson_end));
/* Write the fence to the scratch register */
@@ -206,8 +206,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, submit->seqno);
trace_msm_gpu_submit_flush(submit,
- gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
- REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
+ gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
a6xx_flush(gpu, ring);
}
@@ -427,6 +427,59 @@ const struct adreno_reglist a650_hwcg[] = {
{},
};
+const struct adreno_reglist a660_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -462,6 +515,162 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
}
+/* For a615, a616, a618, A619, a630, a640 and a680 */
+static const u32 a6xx_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
+};
+
+/* These are for a620 and a650 */
+static const u32 a650_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
+ A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
+ A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
+ A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
+};
+
+/* These are for a635 and a660 */
+static const u32 a660_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x012f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0b608, 0x0006),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x015f),
+ A6XX_PROTECT_NORDWR(0x0d000, 0x05ff),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1a400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
+ A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
+ A6XX_PROTECT_NORDWR(0x1f860, 0x0000),
+ A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
+ A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
+};
+
+static void a6xx_set_cp_protect(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const u32 *regs = a6xx_protect;
+ unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32;
+
+ BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
+ BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
+
+ if (adreno_is_a650(adreno_gpu)) {
+ regs = a650_protect;
+ count = ARRAY_SIZE(a650_protect);
+ count_max = 48;
+ } else if (adreno_is_a660(adreno_gpu)) {
+ regs = a660_protect;
+ count = ARRAY_SIZE(a660_protect);
+ count_max = 48;
+ }
+
+ /*
+ * Enable access protection to privileged registers, fault on an access
+ * protect violation and select the last span to protect from the start
+ * address all the way to the end of the register address space
+ */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3));
+
+ for (i = 0; i < count - 1; i++)
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
+ /* last CP_PROTECT to have "infinite" length on the last entry */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
+}
+
static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -477,7 +686,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
if (adreno_is_a640(adreno_gpu))
amsbc = 1;
- if (adreno_is_a650(adreno_gpu)) {
+ if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) {
/* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
lower_bit = 3;
amsbc = 1;
@@ -489,7 +698,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
- uavflagprd_inv >> 4 | lower_bit << 1);
+ uavflagprd_inv << 4 | lower_bit << 1);
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
}
@@ -541,6 +750,11 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
* Targets up to a640 (a618, a630 and a640) need to check for a
* microcode version that is patched to support the whereami opcode or
* one that is new enough to include it by default.
+ *
+ * a650 tier targets don't need whereami but still need to be
+ * equal to or newer than 0.95 for other security fixes
+ *
+ * a660 targets have all the critical security fixes from the start
*/
if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) ||
adreno_is_a640(adreno_gpu)) {
@@ -564,27 +778,20 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
DRM_DEV_ERROR(&gpu->pdev->dev,
"a630 SQE ucode is too old. Have version %x need at least %x\n",
buf[0] & 0xfff, 0x190);
- } else {
- /*
- * a650 tier targets don't need whereami but still need to be
- * equal to or newer than 0.95 for other security fixes
- */
- if (adreno_is_a650(adreno_gpu)) {
- if ((buf[0] & 0xfff) >= 0x095) {
- ret = true;
- goto out;
- }
-
- DRM_DEV_ERROR(&gpu->pdev->dev,
- "a650 SQE ucode is too old. Have version %x need at least %x\n",
- buf[0] & 0xfff, 0x095);
+ } else if (adreno_is_a650(adreno_gpu)) {
+ if ((buf[0] & 0xfff) >= 0x095) {
+ ret = true;
+ goto out;
}
- /*
- * When a660 is added those targets should return true here
- * since those have all the critical security fixes built in
- * from the start
- */
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "a650 SQE ucode is too old. Have version %x need at least %x\n",
+ buf[0] & 0xfff, 0x095);
+ } else if (adreno_is_a660(adreno_gpu)) {
+ ret = true;
+ } else {
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "unknown GPU, add it to a6xx_ucode_check_version()!!\n");
}
out:
msm_gem_put_vaddr(obj);
@@ -620,8 +827,8 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
}
}
- gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
- REG_A6XX_CP_SQE_INSTR_BASE_HI, a6xx_gpu->sqe_iova);
+ gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE,
+ REG_A6XX_CP_SQE_INSTR_BASE+1, a6xx_gpu->sqe_iova);
return 0;
}
@@ -690,7 +897,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
a6xx_set_hwcg(gpu, true);
/* VBIF/GBIF start*/
- if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu)) {
+ if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
@@ -715,7 +922,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
- if (!adreno_is_a650(adreno_gpu)) {
+ if (!adreno_is_a650_family(adreno_gpu)) {
/* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
@@ -728,22 +935,27 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
- if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu))
+ if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu))
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
else
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
+ if (adreno_is_a660(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);
+
/* Setting the mem pool size */
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
- /* Setting the primFifo thresholds default values */
- if (adreno_is_a650(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300000);
+ /* Setting the primFifo thresholds default values,
+ * and vccCacheSkipDis=1 bit (0x200) for A640 and newer
+ */
+ if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
else if (adreno_is_a640(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200000);
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
else
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
/* Set the AHB default slave response to "ERROR" */
gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
@@ -752,7 +964,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
/* Select CP0 to always count cycles */
- gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
+ gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT);
a6xx_set_ubwc_config(gpu);
@@ -763,7 +975,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
/* Set weights for bicubic filtering */
- if (adreno_is_a650(adreno_gpu)) {
+ if (adreno_is_a650_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
0x3fe05ff4);
@@ -776,41 +988,14 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
}
/* Protect registers from the CP */
- gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
-
- gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
- A6XX_PROTECT_RDONLY(0x600, 0x51));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
- A6XX_PROTECT_RDONLY(0xfc00, 0x3));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
- A6XX_PROTECT_RDONLY(0x0, 0x4f9));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
- A6XX_PROTECT_RDONLY(0x501, 0xa));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
- A6XX_PROTECT_RDONLY(0x511, 0x44));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
- A6XX_PROTECT_RW(0xbe20, 0x11f3));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
- A6XX_PROTECT_RDONLY(0x980, 0x4));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
+ a6xx_set_cp_protect(gpu);
+
+ if (adreno_is_a660(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0);
+ /* Set dualQ + disable afull for A660 GPU but not for A635 */
+ gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
+ }
/* Enable expanded apriv for targets that support it */
if (gpu->hw_apriv) {
@@ -852,7 +1037,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
if (!a6xx_gpu->shadow_bo) {
a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev,
sizeof(u32) * gpu->nr_rings,
- MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
+ MSM_BO_WC | MSM_BO_MAP_PRIV,
gpu->aspace, &a6xx_gpu->shadow_bo,
&a6xx_gpu->shadow_iova);
@@ -959,18 +1144,113 @@ static void a6xx_recover(struct msm_gpu *gpu)
msm_gpu_hw_init(gpu);
}
-static int a6xx_fault_handler(void *arg, unsigned long iova, int flags)
+static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
+{
+ static const char *uche_clients[7] = {
+ "VFD", "SP", "VSC", "VPC", "HLSQ", "PC", "LRZ",
+ };
+ u32 val;
+
+ if (mid < 1 || mid > 3)
+ return "UNKNOWN";
+
+ /*
+ * The source of the data depends on the mid ID read from FSYNR1.
+ * and the client ID read from the UCHE block
+ */
+ val = gpu_read(gpu, REG_A6XX_UCHE_CLIENT_PF);
+
+ /* mid = 3 is most precise and refers to only one block per client */
+ if (mid == 3)
+ return uche_clients[val & 7];
+
+ /* For mid=2 the source is TP or VFD except when the client id is 0 */
+ if (mid == 2)
+ return ((val & 7) == 0) ? "TP" : "TP|VFD";
+
+ /* For mid=1 just return "UCHE" as a catchall for everything else */
+ return "UCHE";
+}
+
+static const char *a6xx_fault_block(struct msm_gpu *gpu, u32 id)
+{
+ if (id == 0)
+ return "CP";
+ else if (id == 4)
+ return "CCU";
+ else if (id == 6)
+ return "CDP Prefetch";
+
+ return a6xx_uche_fault_block(gpu, id);
+}
+
+#define ARM_SMMU_FSR_TF BIT(1)
+#define ARM_SMMU_FSR_PF BIT(3)
+#define ARM_SMMU_FSR_EF BIT(4)
+
+static int a6xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
{
struct msm_gpu *gpu = arg;
+ struct adreno_smmu_fault_info *info = data;
+ const char *type = "UNKNOWN";
+ const char *block;
+ bool do_devcoredump = info && !READ_ONCE(gpu->crashstate);
+
+ /*
+ * If we aren't going to be resuming later from fault_worker, then do
+ * it now.
+ */
+ if (!do_devcoredump) {
+ gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
+ }
- pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
+ /*
+ * Print a default message if we couldn't get the data from the
+ * adreno-smmu-priv
+ */
+ if (!info) {
+ pr_warn_ratelimited("*** gpu fault: iova=%.16lx flags=%d (%u,%u,%u,%u)\n",
iova, flags,
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)));
- return -EFAULT;
+ return 0;
+ }
+
+ if (info->fsr & ARM_SMMU_FSR_TF)
+ type = "TRANSLATION";
+ else if (info->fsr & ARM_SMMU_FSR_PF)
+ type = "PERMISSION";
+ else if (info->fsr & ARM_SMMU_FSR_EF)
+ type = "EXTERNAL";
+
+ block = a6xx_fault_block(gpu, info->fsynr1 & 0xff);
+
+ pr_warn_ratelimited("*** gpu fault: ttbr0=%.16llx iova=%.16lx dir=%s type=%s source=%s (%u,%u,%u,%u)\n",
+ info->ttbr0, iova,
+ flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ",
+ type, block,
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)));
+
+ if (do_devcoredump) {
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ gpu->fault_info.ttbr0 = info->ttbr0;
+ gpu->fault_info.iova = iova;
+ gpu->fault_info.flags = flags;
+ gpu->fault_info.type = type;
+ gpu->fault_info.block = block;
+
+ kthread_queue_work(gpu->worker, &gpu->fault_work);
+ }
+
+ return 0;
}
static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
@@ -1022,6 +1302,15 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
/*
+ * If stalled on SMMU fault, we could trip the GPU's hang detection,
+ * but the fault handler will trigger the devcore dump, and we want
+ * to otherwise resume normally rather than killing the submit, so
+ * just bail.
+ */
+ if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT)
+ return;
+
+ /*
* Force the GPU to stay on until after we finish
* collecting information
*/
@@ -1211,7 +1500,7 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
if (ret)
return ret;
- if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
+ if (a6xx_gpu->shadow_bo)
for (i = 0; i < gpu->nr_rings; i++)
a6xx_gpu->shadow[i] = 0;
@@ -1266,9 +1555,6 @@ static void a6xx_destroy(struct msm_gpu *gpu)
adreno_gpu_cleanup(adreno_gpu);
- if (a6xx_gpu->opp_table)
- dev_pm_opp_put_supported_hw(a6xx_gpu->opp_table);
-
kfree(a6xx_gpu);
}
@@ -1401,7 +1687,6 @@ static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse)
static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
u32 revn)
{
- struct opp_table *opp_table;
u32 supp_hw = UINT_MAX;
u16 speedbin;
int ret;
@@ -1424,11 +1709,10 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
supp_hw = fuse_to_supp_hw(dev, revn, speedbin);
done:
- opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
+ ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
+ if (ret)
+ return ret;
- a6xx_gpu->opp_table = opp_table;
return 0;
}
@@ -1488,7 +1772,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
*/
info = adreno_info(config->rev);
- if (info && info->revn == 650)
+ if (info && (info->revn == 650 || info->revn == 660))
adreno_gpu->base.hw_apriv = true;
a6xx_llc_slices_init(pdev, a6xx_gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index ce0610c5256f..0bc2d062f54a 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -33,8 +33,6 @@ struct a6xx_gpu {
void *llc_slice;
void *htw_llc_slice;
bool have_mmu500;
-
- struct opp_table *opp_table;
};
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
@@ -44,7 +42,7 @@ struct a6xx_gpu {
* REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
* registers starting at _reg.
*/
-#define A6XX_PROTECT_RW(_reg, _len) \
+#define A6XX_PROTECT_NORDWR(_reg, _len) \
((1 << 31) | \
(((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index c1699b4f9a89..ad4ea0ed5d99 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -113,7 +113,7 @@ static int a6xx_crashdumper_init(struct msm_gpu *gpu,
struct a6xx_crashdumper *dumper)
{
dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
- SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
+ SZ_1M, MSM_BO_WC, gpu->aspace,
&dumper->bo, &dumper->iova);
if (!IS_ERR(dumper->ptr))
@@ -832,6 +832,20 @@ static void a6xx_get_registers(struct msm_gpu *gpu,
a6xx_get_ahb_gpu_registers(gpu,
a6xx_state, &a6xx_vbif_reglist,
&a6xx_state->registers[index++]);
+ if (!dumper) {
+ /*
+ * We can't use the crashdumper when the SMMU is stalled,
+ * because the GPU has no memory access until we resume
+ * translation (but we don't want to do that until after
+ * we have captured as much useful GPU state as possible).
+ * So instead collect registers via the CPU:
+ */
+ for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_reglist[i],
+ &a6xx_state->registers[index++]);
+ return;
+ }
for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
a6xx_get_crashdumper_registers(gpu,
@@ -905,11 +919,13 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
{
- struct a6xx_crashdumper dumper = { 0 };
+ struct a6xx_crashdumper _dumper = { 0 }, *dumper = NULL;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gpu_state *a6xx_state = kzalloc(sizeof(*a6xx_state),
GFP_KERNEL);
+ bool stalled = !!(gpu_read(gpu, REG_A6XX_RBBM_STATUS3) &
+ A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT);
if (!a6xx_state)
return ERR_PTR(-ENOMEM);
@@ -928,14 +944,24 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
/* Get the banks of indexed registers */
a6xx_get_indexed_registers(gpu, a6xx_state);
- /* Try to initialize the crashdumper */
- if (!a6xx_crashdumper_init(gpu, &dumper)) {
- a6xx_get_registers(gpu, a6xx_state, &dumper);
- a6xx_get_shaders(gpu, a6xx_state, &dumper);
- a6xx_get_clusters(gpu, a6xx_state, &dumper);
- a6xx_get_dbgahb_clusters(gpu, a6xx_state, &dumper);
+ /*
+ * Try to initialize the crashdumper, if we are not dumping state
+ * with the SMMU stalled. The crashdumper needs memory access to
+ * write out GPU state, so we need to skip this when the SMMU is
+ * stalled in response to an iova fault
+ */
+ if (!stalled && !a6xx_crashdumper_init(gpu, &_dumper)) {
+ dumper = &_dumper;
+ }
+
+ a6xx_get_registers(gpu, a6xx_state, dumper);
+
+ if (dumper) {
+ a6xx_get_shaders(gpu, a6xx_state, dumper);
+ a6xx_get_clusters(gpu, a6xx_state, dumper);
+ a6xx_get_dbgahb_clusters(gpu, a6xx_state, dumper);
- msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
+ msm_gem_kernel_put(dumper->bo, gpu->aspace, true);
}
if (snapshot_debugbus)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index ccd44d0418f8..919433732b43 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -351,6 +351,37 @@ static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
+static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x01;
+
+ msg->ddr_cmds_addrs[0] = 0x50004;
+ msg->ddr_cmds_addrs[1] = 0x500a0;
+ msg->ddr_cmds_addrs[2] = 0x50000;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x50070;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
@@ -401,6 +432,8 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
a640_build_bw_table(&msg);
else if (adreno_is_a650(adreno_gpu))
a650_build_bw_table(&msg);
+ else if (adreno_is_a660(adreno_gpu))
+ a660_build_bw_table(&msg);
else
a6xx_build_bw_table(&msg);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 548f532417d1..c9389d9fb599 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,21 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14386 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 67699 bytes, from 2021-05-31 20:21:57)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84226 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 112551 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 150713 bytes, from 2021-06-10 22:34:02)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 180049 bytes, from 2021-06-02 21:44:19)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-05-27 20:22:36)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-05-27 20:18:13)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 600d445fabe8..6dad8015c9a1 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -287,6 +287,19 @@ static const struct adreno_info gpulist[] = {
.init = a6xx_gpu_init,
.zapfw = "a650_zap.mdt",
.hwcg = a650_hwcg,
+ }, {
+ .rev = ADRENO_REV(6, 6, 0, ANY_ID),
+ .revn = 660,
+ .name = "A660",
+ .fw = {
+ [ADRENO_FW_SQE] = "a660_sqe.fw",
+ [ADRENO_FW_GMU] = "a660_gmu.bin",
+ },
+ .gmem = SZ_1M + SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a660_zap.mdt",
+ .hwcg = a660_hwcg,
},
};
@@ -466,6 +479,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
config.rev.minor, config.rev.patchid);
priv->is_a2xx = config.rev.core == 2;
+ priv->has_cached_coherent = config.rev.core >= 6;
gpu = info->init(drm);
if (IS_ERR(gpu)) {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index cf897297656f..9f5a30234b33 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -239,7 +239,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
*value = adreno_gpu->gmem;
return 0;
case MSM_PARAM_GMEM_BASE:
- *value = !adreno_is_a650(adreno_gpu) ? 0x100000 : 0;
+ *value = !adreno_is_a650_family(adreno_gpu) ? 0x100000 : 0;
return 0;
case MSM_PARAM_CHIP_ID:
*value = adreno_gpu->rev.patchid |
@@ -391,7 +391,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
void *ptr;
ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
- MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
+ MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
@@ -408,7 +408,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret, i;
- DBG("%s", gpu->name);
+ VERB("%s", gpu->name);
ret = adreno_load_fw(adreno_gpu);
if (ret)
@@ -684,6 +684,21 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
adreno_gpu->info->revn, adreno_gpu->rev.core,
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
+ /*
+ * If this is state collected due to iova fault, so fault related info
+ *
+ * TTBR0 would not be zero, so this is a good way to distinguish
+ */
+ if (state->fault_info.ttbr0) {
+ const struct msm_gpu_fault_info *info = &state->fault_info;
+
+ drm_puts(p, "fault-info:\n");
+ drm_printf(p, " - ttbr0=%.16llx\n", info->ttbr0);
+ drm_printf(p, " - iova=%.16lx\n", info->iova);
+ drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ");
+ drm_printf(p, " - type=%s\n", info->type);
+ drm_printf(p, " - source=%s\n", info->block);
+ }
drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
@@ -841,7 +856,7 @@ static void adreno_get_pwrlevels(struct device *dev,
if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
ret = adreno_get_legacy_pwrlevels(dev);
else {
- ret = dev_pm_opp_of_add_table(dev);
+ ret = devm_pm_opp_of_add_table(dev);
if (ret)
DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
}
@@ -946,7 +961,4 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
pm_runtime_disable(&priv->gpu_pdev->dev);
msm_gpu_cleanup(&adreno_gpu->base);
-
- icc_put(gpu->icc_path);
- icc_put(gpu->ocmem_icc_path);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index ccac275aa7a2..8dbe0d157520 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -55,7 +55,7 @@ struct adreno_reglist {
u32 value;
};
-extern const struct adreno_reglist a630_hwcg[], a640_hwcg[], a650_hwcg[];
+extern const struct adreno_reglist a630_hwcg[], a640_hwcg[], a650_hwcg[], a660_hwcg[];
struct adreno_info {
struct adreno_rev rev;
@@ -247,6 +247,17 @@ static inline int adreno_is_a650(struct adreno_gpu *gpu)
return gpu->revn == 650;
}
+static inline int adreno_is_a660(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 660;
+}
+
+/* check for a650, a660, or any derivatives */
+static inline int adreno_is_a650_family(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 650 || gpu->revn == 620 || gpu->revn == 660;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 59bb8c1ffce6..e832ae4b937a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,21 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14386 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 67699 bytes, from 2021-05-31 20:21:57)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84226 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 112551 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 150713 bytes, from 2021-06-10 22:34:02)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 180049 bytes, from 2021-06-02 21:44:19)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-05-27 20:22:36)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-05-27 20:18:13)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -247,9 +247,9 @@ enum adreno_pm4_type3_packets {
CP_DRAW_INDX_INDIRECT = 41,
CP_DRAW_INDIRECT_MULTI = 42,
CP_DRAW_AUTO = 36,
- CP_UNKNOWN_19 = 25,
- CP_UNKNOWN_1A = 26,
- CP_UNKNOWN_4E = 78,
+ CP_DRAW_PRED_ENABLE_GLOBAL = 25,
+ CP_DRAW_PRED_ENABLE_LOCAL = 26,
+ CP_DRAW_PRED_SET = 78,
CP_WIDE_REG_WRITE = 116,
CP_SCRATCH_TO_REG = 77,
CP_REG_TO_SCRATCH = 74,
@@ -267,6 +267,7 @@ enum adreno_pm4_type3_packets {
CP_SKIP_IB2_ENABLE_GLOBAL = 29,
CP_SKIP_IB2_ENABLE_LOCAL = 35,
CP_SET_SUBDRAW_SIZE = 53,
+ CP_WHERE_AM_I = 98,
CP_SET_VISIBILITY_OVERRIDE = 100,
CP_PREEMPT_ENABLE_GLOBAL = 105,
CP_PREEMPT_ENABLE_LOCAL = 106,
@@ -298,7 +299,6 @@ enum adreno_pm4_type3_packets {
CP_SET_BIN_DATA5_OFFSET = 46,
CP_SET_CTXSWITCH_IB = 85,
CP_REG_WRITE = 109,
- CP_WHERE_AM_I = 98,
};
enum adreno_state_block {
@@ -400,6 +400,17 @@ enum a6xx_patch_type {
enum a6xx_draw_indirect_opcode {
INDIRECT_OP_NORMAL = 2,
INDIRECT_OP_INDEXED = 4,
+ INDIRECT_OP_INDIRECT_COUNT = 6,
+ INDIRECT_OP_INDIRECT_COUNT_INDEXED = 7,
+};
+
+enum cp_draw_pred_src {
+ PRED_SRC_MEM = 5,
+};
+
+enum cp_draw_pred_test {
+ NE_0_PASS = 0,
+ EQ_0_PASS = 1,
};
enum cp_cond_function {
@@ -1040,33 +1051,61 @@ static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(uint32_t val)
return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK;
}
-#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_2 0x00000002
-#define A6XX_CP_DRAW_INDIRECT_MULTI_2_DRAW_COUNT__MASK 0xffffffff
-#define A6XX_CP_DRAW_INDIRECT_MULTI_2_DRAW_COUNT__SHIFT 0
-static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_2_DRAW_COUNT(uint32_t val)
-{
- return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_2_DRAW_COUNT__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_2_DRAW_COUNT__MASK;
-}
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_DRAW_COUNT 0x00000002
-#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_ADDRESS_0 0x00000003
-#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_5 0x00000005
-#define A6XX_CP_DRAW_INDIRECT_MULTI_5_PARAM_0__MASK 0xffffffff
-#define A6XX_CP_DRAW_INDIRECT_MULTI_5_PARAM_0__SHIFT 0
-static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_5_PARAM_0(uint32_t val)
-{
- return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_5_PARAM_0__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_5_PARAM_0__MASK;
-}
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000003
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_STRIDE 0x00000005
+
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDEX_INDEXED 0x00000003
+
+#define REG_CP_DRAW_INDIRECT_MULTI_MAX_INDICES_INDEXED 0x00000005
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_INDEXED 0x00000006
-#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000006
+#define REG_CP_DRAW_INDIRECT_MULTI_STRIDE_INDEXED 0x00000008
-#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_8 0x00000008
-#define A6XX_CP_DRAW_INDIRECT_MULTI_8_STRIDE__MASK 0xffffffff
-#define A6XX_CP_DRAW_INDIRECT_MULTI_8_STRIDE__SHIFT 0
-static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_8_STRIDE(uint32_t val)
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_INDIRECT 0x00000003
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT_INDIRECT 0x00000005
+
+#define REG_CP_DRAW_INDIRECT_MULTI_STRIDE_INDIRECT 0x00000007
+
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDEX_INDIRECT_INDEXED 0x00000003
+
+#define REG_CP_DRAW_INDIRECT_MULTI_MAX_INDICES_INDIRECT_INDEXED 0x00000005
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_INDIRECT_INDEXED 0x00000006
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT_INDIRECT_INDEXED 0x00000008
+
+#define REG_CP_DRAW_INDIRECT_MULTI_STRIDE_INDIRECT_INDEXED 0x0000000a
+
+#define REG_CP_DRAW_PRED_ENABLE_GLOBAL_0 0x00000000
+#define CP_DRAW_PRED_ENABLE_GLOBAL_0_ENABLE 0x00000001
+
+#define REG_CP_DRAW_PRED_ENABLE_LOCAL_0 0x00000000
+#define CP_DRAW_PRED_ENABLE_LOCAL_0_ENABLE 0x00000001
+
+#define REG_CP_DRAW_PRED_SET_0 0x00000000
+#define CP_DRAW_PRED_SET_0_SRC__MASK 0x000000f0
+#define CP_DRAW_PRED_SET_0_SRC__SHIFT 4
+static inline uint32_t CP_DRAW_PRED_SET_0_SRC(enum cp_draw_pred_src val)
{
- return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_8_STRIDE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_8_STRIDE__MASK;
+ return ((val) << CP_DRAW_PRED_SET_0_SRC__SHIFT) & CP_DRAW_PRED_SET_0_SRC__MASK;
}
+#define CP_DRAW_PRED_SET_0_TEST__MASK 0x00000100
+#define CP_DRAW_PRED_SET_0_TEST__SHIFT 8
+static inline uint32_t CP_DRAW_PRED_SET_0_TEST(enum cp_draw_pred_test val)
+{
+ return ((val) << CP_DRAW_PRED_SET_0_TEST__SHIFT) & CP_DRAW_PRED_SET_0_TEST__MASK;
+}
+
+#define REG_CP_DRAW_PRED_SET_MEM_ADDR 0x00000001
static inline uint32_t REG_CP_SET_DRAW_STATE_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
index cdec3fbe6ff4..d2457490930b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -22,165 +22,20 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
struct dpu_kms *dpu_kms = arg;
struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
struct dpu_irq_callback *cb;
- unsigned long irq_flags;
- pr_debug("irq_idx=%d\n", irq_idx);
+ VERB("irq_idx=%d\n", irq_idx);
- if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
- DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
- atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
- }
+ if (list_empty(&irq_obj->irq_cb_tbl[irq_idx]))
+ DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
atomic_inc(&irq_obj->irq_counts[irq_idx]);
/*
* Perform registered function callback
*/
- spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
if (cb->func)
cb->func(cb->arg, irq_idx);
- spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
-
- /*
- * Clear pending interrupt status in HW.
- * NOTE: dpu_core_irq_callback_handler is protected by top-level
- * spinlock, so it is safe to clear any interrupt status here.
- */
- dpu_kms->hw_intr->ops.clear_intr_status_nolock(
- dpu_kms->hw_intr,
- irq_idx);
-}
-
-int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
- enum dpu_intr_type intr_type, u32 instance_idx)
-{
- if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.irq_idx_lookup)
- return -EINVAL;
-
- return dpu_kms->hw_intr->ops.irq_idx_lookup(dpu_kms->hw_intr,
- intr_type, instance_idx);
-}
-
-/**
- * _dpu_core_irq_enable - enable core interrupt given by the index
- * @dpu_kms: Pointer to dpu kms context
- * @irq_idx: interrupt index
- */
-static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
-{
- unsigned long irq_flags;
- int ret = 0, enable_count;
-
- if (!dpu_kms->hw_intr ||
- !dpu_kms->irq_obj.enable_counts ||
- !dpu_kms->irq_obj.irq_counts) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
- DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
- return -EINVAL;
- }
-
- enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
- DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
- trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
-
- if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
- ret = dpu_kms->hw_intr->ops.enable_irq(
- dpu_kms->hw_intr,
- irq_idx);
- if (ret)
- DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
- irq_idx);
-
- DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
-
- spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
- /* empty callback list but interrupt is enabled */
- if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
- DPU_ERROR("irq_idx=%d enabled with no callback\n",
- irq_idx);
- spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
- }
-
- return ret;
-}
-
-int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
-{
- int i, ret = 0, counts;
-
- if (!irq_idxs || !irq_count) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
- if (counts)
- DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
-
- for (i = 0; (i < irq_count) && !ret; i++)
- ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
-
- return ret;
-}
-
-/**
- * _dpu_core_irq_disable - disable core interrupt given by the index
- * @dpu_kms: Pointer to dpu kms context
- * @irq_idx: interrupt index
- */
-static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
-{
- int ret = 0, enable_count;
-
- if (!dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
- DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
- return -EINVAL;
- }
-
- enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
- DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
- trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
-
- if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
- ret = dpu_kms->hw_intr->ops.disable_irq(
- dpu_kms->hw_intr,
- irq_idx);
- if (ret)
- DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
- irq_idx);
- DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
- }
-
- return ret;
-}
-
-int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
-{
- int i, ret = 0, counts;
-
- if (!irq_idxs || !irq_count) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
- if (counts == 2)
- DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
-
- for (i = 0; (i < irq_count) && !ret; i++)
- ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
-
- return ret;
}
u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
@@ -217,19 +72,28 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
return -EINVAL;
}
- if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
- DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+ VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
- spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr);
trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
list_del_init(&register_irq_cb->list);
list_add_tail(&register_irq_cb->list,
&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
- spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ if (list_is_first(&register_irq_cb->list,
+ &dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
+ int ret = dpu_kms->hw_intr->ops.enable_irq_locked(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ irq_idx);
+ }
+ dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags);
return 0;
}
@@ -252,21 +116,27 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
return -EINVAL;
}
- if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
- DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+ VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
- spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr);
trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
list_del_init(&register_irq_cb->list);
/* empty callback list but interrupt is still enabled */
- if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
- atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
- DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
- spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
+ int ret = dpu_kms->hw_intr->ops.disable_irq_locked(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+ irq_idx);
+ VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
+ }
+ dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags);
return 0;
}
@@ -290,26 +160,26 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
#ifdef CONFIG_DEBUG_FS
static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
{
- struct dpu_irq *irq_obj = s->private;
+ struct dpu_kms *dpu_kms = s->private;
+ struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
struct dpu_irq_callback *cb;
unsigned long irq_flags;
- int i, irq_count, enable_count, cb_count;
+ int i, irq_count, cb_count;
- if (WARN_ON(!irq_obj->enable_counts || !irq_obj->irq_cb_tbl))
+ if (WARN_ON(!irq_obj->irq_cb_tbl))
return 0;
for (i = 0; i < irq_obj->total_irqs; i++) {
- spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
+ irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr);
cb_count = 0;
irq_count = atomic_read(&irq_obj->irq_counts[i]);
- enable_count = atomic_read(&irq_obj->enable_counts[i]);
list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
cb_count++;
- spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
+ dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags);
- if (irq_count || enable_count || cb_count)
- seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
- i, irq_count, enable_count, cb_count);
+ if (irq_count || cb_count)
+ seq_printf(s, "idx:%d irq:%d cb:%d\n",
+ i, irq_count, cb_count);
}
return 0;
@@ -320,7 +190,7 @@ DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
- debugfs_create_file("core_irq", 0600, parent, &dpu_kms->irq_obj,
+ debugfs_create_file("core_irq", 0600, parent, dpu_kms,
&dpu_debugfs_core_irq_fops);
}
#endif
@@ -334,19 +204,14 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
- spin_lock_init(&dpu_kms->irq_obj.cb_lock);
-
/* Create irq callbacks for all possible irq_idx */
- dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->irq_idx_tbl_size;
+ dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->total_irqs;
dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
sizeof(struct list_head), GFP_KERNEL);
- dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
- sizeof(atomic_t), GFP_KERNEL);
dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
sizeof(atomic_t), GFP_KERNEL);
for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
- atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
}
}
@@ -357,8 +222,7 @@ void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
- if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
- !list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
+ if (!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
dpu_clear_all_irqs(dpu_kms);
@@ -366,10 +230,8 @@ void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
pm_runtime_put_sync(&dpu_kms->pdev->dev);
kfree(dpu_kms->irq_obj.irq_cb_tbl);
- kfree(dpu_kms->irq_obj.enable_counts);
kfree(dpu_kms->irq_obj.irq_counts);
dpu_kms->irq_obj.irq_cb_tbl = NULL;
- dpu_kms->irq_obj.enable_counts = NULL;
dpu_kms->irq_obj.irq_counts = NULL;
dpu_kms->irq_obj.total_irqs = 0;
}
@@ -377,21 +239,13 @@ void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
{
/*
- * Read interrupt status from all sources. Interrupt status are
- * stored within hw_intr.
- * Function will also clear the interrupt status after reading.
- * Individual interrupt status bit will only get stored if it
- * is enabled.
- */
- dpu_kms->hw_intr->ops.get_interrupt_statuses(dpu_kms->hw_intr);
-
- /*
* Dispatch to HW driver to handle interrupt lookup that is being
* fired. When matching interrupt is located, HW driver will call to
* dpu_core_irq_callback_handler with the irq_idx from the lookup table.
* dpu_core_irq_callback_handler will perform the registered function
* callback, and do the interrupt status clearing once the registered
* callback is finished.
+ * Function will also clear the interrupt status after reading.
*/
dpu_kms->hw_intr->ops.dispatch_irqs(
dpu_kms->hw_intr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
index e30775e6585b..90ae6c9ccc95 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -30,49 +30,6 @@ void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
/**
- * dpu_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
- * interrupt mapping table.
- * @dpu_kms: DPU handle
- * @intr_type: DPU HW interrupt type for lookup
- * @instance_idx: DPU HW block instance defined in dpu_hw_mdss.h
- * @return: irq_idx or -EINVAL when fail to lookup
- */
-int dpu_core_irq_idx_lookup(
- struct dpu_kms *dpu_kms,
- enum dpu_intr_type intr_type,
- uint32_t instance_idx);
-
-/**
- * dpu_core_irq_enable - IRQ helper function for enabling one or more IRQs
- * @dpu_kms: DPU handle
- * @irq_idxs: Array of irq index
- * @irq_count: Number of irq_idx provided in the array
- * @return: 0 for success enabling IRQ, otherwise failure
- *
- * This function increments count on each enable and decrements on each
- * disable. Interrupts is enabled if count is 0 before increment.
- */
-int dpu_core_irq_enable(
- struct dpu_kms *dpu_kms,
- int *irq_idxs,
- uint32_t irq_count);
-
-/**
- * dpu_core_irq_disable - IRQ helper function for disabling one of more IRQs
- * @dpu_kms: DPU handle
- * @irq_idxs: Array of irq index
- * @irq_count: Number of irq_idx provided in the array
- * @return: 0 for success disabling IRQ, otherwise failure
- *
- * This function increments count on each enable and decrements on each
- * disable. Interrupts is disabled if count is 0 after decrement.
- */
-int dpu_core_irq_disable(
- struct dpu_kms *dpu_kms,
- int *irq_idxs,
- uint32_t irq_count);
-
-/**
* dpu_core_irq_read - IRQ helper function for reading IRQ status
* @dpu_kms: DPU handle
* @irq_idx: irq index
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 7cba5bbdf4b7..60fe06018581 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -132,7 +132,7 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
perf->core_clk_rate = _dpu_core_perf_calc_clk(kms, crtc, state);
}
- DPU_DEBUG(
+ DRM_DEBUG_ATOMIC(
"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n",
crtc->base.id, perf->core_clk_rate,
perf->max_per_pipe_ib, perf->bw_ctl);
@@ -178,7 +178,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct dpu_crtc_state *tmp_cstate =
to_dpu_crtc_state(tmp_crtc->state);
- DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
+ DRM_DEBUG_ATOMIC("crtc:%d bw:%llu ctrl:%d\n",
tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
tmp_cstate->bw_control);
@@ -187,11 +187,11 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
/* convert bandwidth to kb */
bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
- DPU_DEBUG("calculated bandwidth=%uk\n", bw);
+ DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
threshold = kms->catalog->perf.max_bw_high;
- DPU_DEBUG("final threshold bw limit = %d\n", threshold);
+ DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
if (!threshold) {
DPU_ERROR("no bandwidth limits specified\n");
@@ -228,7 +228,7 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
- DPU_DEBUG("crtc=%d bw=%llu paths:%d\n",
+ DRM_DEBUG_ATOMIC("crtc=%d bw=%llu paths:%d\n",
tmp_crtc->base.id,
dpu_cstate->new_perf.bw_ctl, kms->num_paths);
}
@@ -278,7 +278,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
/* Release the bandwidth */
if (kms->perf.enable_bw_release) {
trace_dpu_cmd_release_bw(crtc->base.id);
- DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
+ DRM_DEBUG_ATOMIC("Release BW crtc=%d\n", crtc->base.id);
dpu_crtc->cur_perf.bw_ctl = 0;
_dpu_core_perf_crtc_update_bus(kms, crtc);
}
@@ -314,7 +314,7 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
clk_rate = kms->perf.fix_core_clk_rate;
- DPU_DEBUG("clk:%llu\n", clk_rate);
+ DRM_DEBUG_ATOMIC("clk:%llu\n", clk_rate);
return clk_rate;
}
@@ -344,7 +344,7 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
dpu_crtc = to_dpu_crtc(crtc);
dpu_cstate = to_dpu_crtc_state(crtc->state);
- DPU_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
+ DRM_DEBUG_ATOMIC("crtc:%d stop_req:%d core_clk:%llu\n",
crtc->base.id, stop_req, kms->perf.core_clk_rate);
old = &dpu_crtc->cur_perf;
@@ -362,7 +362,7 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
(new->max_per_pipe_ib > old->max_per_pipe_ib))) ||
(!params_changed && ((new->bw_ctl < old->bw_ctl) ||
(new->max_per_pipe_ib < old->max_per_pipe_ib)))) {
- DPU_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+ DRM_DEBUG_ATOMIC("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
crtc->base.id, params_changed,
new->bw_ctl, old->bw_ctl);
old->bw_ctl = new->bw_ctl;
@@ -378,7 +378,7 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
update_clk = true;
}
} else {
- DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
+ DRM_DEBUG_ATOMIC("crtc=%d disable\n", crtc->base.id);
memset(old, 0, sizeof(*old));
update_bus = true;
update_clk = true;
@@ -413,7 +413,7 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
}
kms->perf.core_clk_rate = clk_rate;
- DPU_DEBUG("update clk rate = %lld HZ\n", clk_rate);
+ DRM_DEBUG_ATOMIC("update clk rate = %lld HZ\n", clk_rate);
}
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 18bc76b7f1a3..9a5c70c87cc8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -57,8 +57,6 @@ static void dpu_crtc_destroy(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
- DPU_DEBUG("\n");
-
if (!crtc)
return;
@@ -163,7 +161,7 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
lm->ops.setup_blend_config(lm, pstate->stage,
0xFF, 0, blend_op);
- DPU_DEBUG("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
+ DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
&format->base.pixel_format, format->alpha_enable, blend_op);
}
@@ -220,7 +218,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
set_bit(dpu_plane_pipe(plane), fetch_active);
- DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
+
+ DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
crtc->base.id,
pstate->stage,
plane->base.id,
@@ -278,7 +277,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
struct dpu_hw_mixer *lm;
int i;
- DPU_DEBUG("%s\n", dpu_crtc->name);
+ DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
for (i = 0; i < cstate->num_mixers; i++) {
mixer[i].mixer_op_mode = 0;
@@ -305,7 +304,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
/* stage config flush mask */
ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
- DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+ DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
mixer[i].hw_lm->idx - LM_0,
mixer[i].mixer_op_mode,
ctl->idx - CTL_0,
@@ -388,7 +387,7 @@ static void dpu_crtc_frame_event_work(struct kthread_work *work)
DPU_ATRACE_BEGIN("crtc_frame_event");
- DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
+ DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
ktime_to_ns(fevent->ts));
if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
@@ -407,9 +406,6 @@ static void dpu_crtc_frame_event_work(struct kthread_work *work)
fevent->event);
}
- if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
- dpu_core_perf_crtc_update(crtc, 0, false);
-
if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
| DPU_ENCODER_FRAME_EVENT_ERROR))
frame_done = true;
@@ -477,6 +473,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
void dpu_crtc_complete_commit(struct drm_crtc *crtc)
{
trace_dpu_crtc_complete_commit(DRMID(crtc));
+ dpu_core_perf_crtc_update(crtc, 0, false);
_dpu_crtc_complete_flip(crtc);
}
@@ -558,7 +555,7 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
/* stage config flush mask */
ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
- DPU_DEBUG("lm %d, ctl %d, flush mask 0x%x\n",
+ DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n",
mixer[i].hw_lm->idx - DSPP_0,
ctl->idx - CTL_0,
mixer[i].flush_mask);
@@ -572,12 +569,12 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_encoder *encoder;
if (!crtc->state->enable) {
- DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
+ DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
crtc->base.id, crtc->state->enable);
return;
}
- DPU_DEBUG("crtc%d\n", crtc->base.id);
+ DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
@@ -617,12 +614,12 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct dpu_crtc_state *cstate;
if (!crtc->state->enable) {
- DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
+ DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
crtc->base.id, crtc->state->enable);
return;
}
- DPU_DEBUG("crtc%d\n", crtc->base.id);
+ DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(crtc->state);
@@ -675,7 +672,7 @@ static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
{
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
- DPU_DEBUG("crtc%d\n", crtc->base.id);
+ DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
__drm_atomic_helper_crtc_destroy_state(state);
@@ -688,7 +685,7 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
int ret, rc = 0;
if (!atomic_read(&dpu_crtc->frame_pending)) {
- DPU_DEBUG("no frames pending\n");
+ DRM_DEBUG_ATOMIC("no frames pending\n");
return 0;
}
@@ -731,9 +728,9 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
/* acquire bandwidth and other resources */
- DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
+ DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
} else
- DPU_DEBUG("crtc%d commit\n", crtc->base.id);
+ DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
dpu_crtc->play_count++;
@@ -908,7 +905,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
if (!crtc_state->enable || !crtc_state->active) {
- DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
+ DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
crtc->base.id, crtc_state->enable,
crtc_state->active);
memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
@@ -916,7 +913,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
}
mode = &crtc_state->adjusted_mode;
- DPU_DEBUG("%s: check\n", dpu_crtc->name);
+ DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
/* force a full mode set if active state changed */
if (crtc_state->active_changed)
@@ -1024,7 +1021,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
}
pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
- DPU_DEBUG("%s: zpos %d\n", dpu_crtc->name, z_pos);
+ DRM_DEBUG_ATOMIC("%s: zpos %d\n", dpu_crtc->name, z_pos);
}
for (i = 0; i < multirect_count; i++) {
@@ -1376,6 +1373,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
/* initialize event handling */
spin_lock_init(&dpu_crtc->event_lock);
- DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
+ DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);
return crtc;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 8d942052db8a..1c04b7cce43e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -26,14 +26,15 @@
#include "dpu_crtc.h"
#include "dpu_trace.h"
#include "dpu_core_irq.h"
+#include "disp/msm_disp_snapshot.h"
-#define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
+#define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
-#define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\
+#define DPU_DEBUG_PHYS(p, fmt, ...) DRM_DEBUG_ATOMIC("enc%d intf%d pp%d " fmt,\
(p) ? (p)->parent->base.id : -1, \
(p) ? (p)->intf_idx - INTF_0 : -1, \
(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
@@ -253,7 +254,7 @@ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
}
static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
- int32_t hw_id, struct dpu_encoder_wait_info *info);
+ u32 irq_idx, struct dpu_encoder_wait_info *info);
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx,
@@ -273,27 +274,27 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
/* return EWOULDBLOCK since we know the wait isn't necessary */
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
- DRM_ERROR("encoder is disabled id=%u, intr=%d, hw=%d, irq=%d",
- DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx,
irq->irq_idx);
return -EWOULDBLOCK;
}
if (irq->irq_idx < 0) {
- DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, hw=%d, irq=%s",
- DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s",
+ DRMID(phys_enc->parent), intr_idx,
irq->name);
return 0;
}
- DRM_DEBUG_KMS("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, pending_cnt=%d",
- DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx,
irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
ret = dpu_encoder_helper_wait_event_timeout(
DRMID(phys_enc->parent),
- irq->hw_idx,
+ irq->irq_idx,
wait_info);
if (ret <= 0) {
@@ -303,9 +304,9 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
unsigned long flags;
DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
- "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ "irq=%d, pp=%d, atomic_cnt=%d",
DRMID(phys_enc->parent), intr_idx,
- irq->hw_idx, irq->irq_idx,
+ irq->irq_idx,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
local_irq_save(flags);
@@ -315,16 +316,16 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
} else {
ret = -ETIMEDOUT;
DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
- "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ "irq=%d, pp=%d, atomic_cnt=%d",
DRMID(phys_enc->parent), intr_idx,
- irq->hw_idx, irq->irq_idx,
+ irq->irq_idx,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
}
} else {
ret = 0;
trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
- intr_idx, irq->hw_idx, irq->irq_idx,
+ intr_idx, irq->irq_idx,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
}
@@ -344,19 +345,9 @@ int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
}
irq = &phys_enc->irq[intr_idx];
- if (irq->irq_idx >= 0) {
- DPU_DEBUG_PHYS(phys_enc,
- "skipping already registered irq %s type %d\n",
- irq->name, irq->intr_type);
- return 0;
- }
-
- irq->irq_idx = dpu_core_irq_idx_lookup(phys_enc->dpu_kms,
- irq->intr_type, irq->hw_idx);
if (irq->irq_idx < 0) {
DPU_ERROR_PHYS(phys_enc,
- "failed to lookup IRQ index for %s type:%d\n",
- irq->name, irq->intr_type);
+ "invalid IRQ index:%d\n", irq->irq_idx);
return -EINVAL;
}
@@ -370,19 +361,8 @@ int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
return ret;
}
- ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
- if (ret) {
- DRM_ERROR("enable failed id=%u, intr=%d, hw=%d, irq=%d",
- DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
- irq->irq_idx);
- dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
- irq->irq_idx, &irq->cb);
- irq->irq_idx = -EINVAL;
- return ret;
- }
-
trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
- irq->hw_idx, irq->irq_idx);
+ irq->irq_idx);
return ret;
}
@@ -397,31 +377,22 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
/* silently skip irqs that weren't registered */
if (irq->irq_idx < 0) {
- DRM_ERROR("duplicate unregister id=%u, intr=%d, hw=%d, irq=%d",
- DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ DRM_ERROR("duplicate unregister id=%u, intr=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx,
irq->irq_idx);
return 0;
}
- ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
- if (ret) {
- DRM_ERROR("disable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
- DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
- irq->irq_idx, ret);
- }
-
ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
&irq->cb);
if (ret) {
- DRM_ERROR("unreg cb fail id=%u, intr=%d, hw=%d, irq=%d ret=%d",
- DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ DRM_ERROR("unreg cb fail id=%u, intr=%d, irq=%d ret=%d",
+ DRMID(phys_enc->parent), intr_idx,
irq->irq_idx, ret);
}
trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
- irq->hw_idx, irq->irq_idx);
-
- irq->irq_idx = -EINVAL;
+ irq->irq_idx);
return 0;
}
@@ -820,13 +791,13 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
/* return if the resource control is already in ON state */
if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
- DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n",
+ DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
DRMID(drm_enc), sw_event);
mutex_unlock(&dpu_enc->rc_lock);
return 0;
} else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
- DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n",
+ DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
DRMID(drm_enc), sw_event,
dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock);
@@ -1336,6 +1307,11 @@ static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
DPU_ATRACE_BEGIN("encoder_underrun_callback");
atomic_inc(&phy_enc->underrun_cnt);
+
+ /* trigger dump only on the first underrun */
+ if (atomic_read(&phy_enc->underrun_cnt) == 1)
+ msm_disp_snapshot_state(drm_enc->dev);
+
trace_dpu_enc_underrun_cb(DRMID(drm_enc),
atomic_read(&phy_enc->underrun_cnt));
DPU_ATRACE_END("encoder_underrun_callback");
@@ -1453,11 +1429,6 @@ static void dpu_encoder_off_work(struct work_struct *work)
struct dpu_encoder_virt *dpu_enc = container_of(work,
struct dpu_encoder_virt, delayed_off_work.work);
- if (!dpu_enc) {
- DPU_ERROR("invalid dpu encoder\n");
- return;
- }
-
dpu_encoder_resource_control(&dpu_enc->base,
DPU_ENC_RC_EVENT_ENTER_IDLE);
@@ -1537,7 +1508,7 @@ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
static int dpu_encoder_helper_wait_event_timeout(
int32_t drm_id,
- int32_t hw_id,
+ u32 irq_idx,
struct dpu_encoder_wait_info *info)
{
int rc = 0;
@@ -1550,7 +1521,7 @@ static int dpu_encoder_helper_wait_event_timeout(
atomic_read(info->atomic_cnt) == 0, jiffies);
time = ktime_to_ms(ktime_get());
- trace_dpu_enc_wait_event_timeout(drm_id, hw_id, rc, time,
+ trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
expected_time,
atomic_read(info->atomic_cnt));
/* If we timed out, counter is valid and time is less, wait again */
@@ -1565,19 +1536,23 @@ static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
struct dpu_encoder_virt *dpu_enc;
struct dpu_hw_ctl *ctl;
int rc;
+ struct drm_encoder *drm_enc;
dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
ctl = phys_enc->hw_ctl;
+ drm_enc = phys_enc->parent;
if (!ctl->ops.reset)
return;
- DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
+ DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
ctl->idx);
rc = ctl->ops.reset(ctl);
- if (rc)
+ if (rc) {
DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
+ msm_disp_snapshot_state(drm_enc->dev);
+ }
phys_enc->enable_state = DPU_ENC_ENABLED;
}
@@ -1797,11 +1772,6 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
struct dpu_encoder_virt, vsync_event_work);
ktime_t wakeup_time;
- if (!dpu_enc) {
- DPU_ERROR("invalid dpu encoder\n");
- return;
- }
-
if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time))
return;
@@ -2068,8 +2038,6 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
phys_params.parent_ops = &dpu_encoder_parent_ops;
phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
- DPU_DEBUG("\n");
-
switch (disp_info->intf_type) {
case DRM_MODE_ENCODER_DSI:
intf_type = INTF_DSI;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index ecbc4be98980..e7270eb6b84b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -165,18 +165,14 @@ enum dpu_intr_idx {
/**
* dpu_encoder_irq - tracking structure for interrupts
* @name: string name of interrupt
- * @intr_type: Encoder interrupt type
* @intr_idx: Encoder interrupt enumeration
- * @hw_idx: HW Block ID
* @irq_idx: IRQ interface lookup index from DPU IRQ framework
* will be -EINVAL if IRQ is not registered
* @irq_cb: interrupt callback
*/
struct dpu_encoder_irq {
const char *name;
- enum dpu_intr_type intr_type;
enum dpu_intr_idx intr_idx;
- int hw_idx;
int irq_idx;
struct dpu_irq_callback cb;
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index b2be39b9144e..aa01698d6b25 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -11,6 +11,7 @@
#include "dpu_core_irq.h"
#include "dpu_formats.h"
#include "dpu_trace.h"
+#include "disp/msm_disp_snapshot.h"
#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->base.parent ? \
@@ -143,28 +144,6 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
phys_enc);
}
-static void _dpu_encoder_phys_cmd_setup_irq_hw_idx(
- struct dpu_encoder_phys *phys_enc)
-{
- struct dpu_encoder_irq *irq;
-
- irq = &phys_enc->irq[INTR_IDX_CTL_START];
- irq->hw_idx = phys_enc->hw_ctl->idx;
- irq->irq_idx = -EINVAL;
-
- irq = &phys_enc->irq[INTR_IDX_PINGPONG];
- irq->hw_idx = phys_enc->hw_pp->idx;
- irq->irq_idx = -EINVAL;
-
- irq = &phys_enc->irq[INTR_IDX_RDPTR];
- irq->hw_idx = phys_enc->hw_pp->idx;
- irq->irq_idx = -EINVAL;
-
- irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
- irq->hw_idx = phys_enc->intf_idx;
- irq->irq_idx = -EINVAL;
-}
-
static void dpu_encoder_phys_cmd_mode_set(
struct dpu_encoder_phys *phys_enc,
struct drm_display_mode *mode,
@@ -172,6 +151,7 @@ static void dpu_encoder_phys_cmd_mode_set(
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_encoder_irq *irq;
if (!mode || !adj_mode) {
DPU_ERROR("invalid args\n");
@@ -181,7 +161,17 @@ static void dpu_encoder_phys_cmd_mode_set(
DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
drm_mode_debug_printmodeline(adj_mode);
- _dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
+ irq = &phys_enc->irq[INTR_IDX_CTL_START];
+ irq->irq_idx = phys_enc->hw_ctl->caps->intr_start;
+
+ irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+ irq->irq_idx = phys_enc->hw_pp->caps->intr_done;
+
+ irq = &phys_enc->irq[INTR_IDX_RDPTR];
+ irq->irq_idx = phys_enc->hw_pp->caps->intr_rdptr;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->irq_idx = phys_enc->hw_intf->cap->intr_underrun;
}
static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
@@ -191,10 +181,13 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
to_dpu_encoder_phys_cmd(phys_enc);
u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
bool do_log = false;
+ struct drm_encoder *drm_enc;
if (!phys_enc->hw_pp)
return -EINVAL;
+ drm_enc = phys_enc->parent;
+
cmd_enc->pp_timeout_report_cnt++;
if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
@@ -203,7 +196,7 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
do_log = true;
}
- trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(phys_enc->parent),
+ trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(drm_enc),
phys_enc->hw_pp->idx - PINGPONG_0,
cmd_enc->pp_timeout_report_cnt,
atomic_read(&phys_enc->pending_kickoff_cnt),
@@ -212,12 +205,12 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
/* to avoid flooding, only log first time, and "dead" time */
if (do_log) {
DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
- DRMID(phys_enc->parent),
+ DRMID(drm_enc),
phys_enc->hw_pp->idx - PINGPONG_0,
phys_enc->hw_ctl->idx - CTL_0,
cmd_enc->pp_timeout_report_cnt,
atomic_read(&phys_enc->pending_kickoff_cnt));
-
+ msm_disp_snapshot_state(drm_enc->dev);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
}
@@ -228,7 +221,7 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
if (phys_enc->parent_ops->handle_frame_done)
phys_enc->parent_ops->handle_frame_done(
- phys_enc->parent, phys_enc, frame_event);
+ drm_enc, phys_enc, frame_event);
return -ETIMEDOUT;
}
@@ -685,10 +678,6 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete(
static int dpu_encoder_phys_cmd_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_cmd *cmd_enc;
-
- cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
-
/* only required for master controller */
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
return 0;
@@ -795,31 +784,26 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
irq = &phys_enc->irq[i];
INIT_LIST_HEAD(&irq->cb.list);
irq->irq_idx = -EINVAL;
- irq->hw_idx = -EINVAL;
irq->cb.arg = phys_enc;
}
irq = &phys_enc->irq[INTR_IDX_CTL_START];
irq->name = "ctl_start";
- irq->intr_type = DPU_IRQ_TYPE_CTL_START;
irq->intr_idx = INTR_IDX_CTL_START;
irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
irq = &phys_enc->irq[INTR_IDX_PINGPONG];
irq->name = "pp_done";
- irq->intr_type = DPU_IRQ_TYPE_PING_PONG_COMP;
irq->intr_idx = INTR_IDX_PINGPONG;
irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
irq = &phys_enc->irq[INTR_IDX_RDPTR];
irq->name = "pp_rd_ptr";
- irq->intr_type = DPU_IRQ_TYPE_PING_PONG_RD_PTR;
irq->intr_idx = INTR_IDX_RDPTR;
irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
irq->name = "underrun";
- irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
irq->intr_idx = INTR_IDX_UNDERRUN;
irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 0e06b7e73c7a..185379b18572 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -9,6 +9,7 @@
#include "dpu_core_irq.h"
#include "dpu_formats.h"
#include "dpu_trace.h"
+#include "disp/msm_disp_snapshot.h"
#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->parent ? \
@@ -284,7 +285,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
if (phys_enc->hw_pp->merge_3d)
- intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->id;
+ intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
@@ -298,11 +299,8 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
true,
phys_enc->hw_pp->idx);
- if (phys_enc->hw_pp->merge_3d) {
- struct dpu_hw_merge_3d *merge_3d = to_dpu_hw_merge_3d(phys_enc->hw_pp->merge_3d);
-
- merge_3d->ops.setup_3d_mode(merge_3d, intf_cfg.mode_3d);
- }
+ if (phys_enc->hw_pp->merge_3d)
+ phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, intf_cfg.mode_3d);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
@@ -363,38 +361,24 @@ static bool dpu_encoder_phys_vid_needs_single_flush(
return phys_enc->split_role != ENC_ROLE_SOLO;
}
-static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
- struct dpu_encoder_phys *phys_enc)
-{
- struct dpu_encoder_irq *irq;
-
- /*
- * Initialize irq->hw_idx only when irq is not registered.
- * Prevent invalidating irq->irq_idx as modeset may be
- * called many times during dfps.
- */
-
- irq = &phys_enc->irq[INTR_IDX_VSYNC];
- if (irq->irq_idx < 0)
- irq->hw_idx = phys_enc->intf_idx;
-
- irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
- if (irq->irq_idx < 0)
- irq->hw_idx = phys_enc->intf_idx;
-}
-
static void dpu_encoder_phys_vid_mode_set(
struct dpu_encoder_phys *phys_enc,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
+ struct dpu_encoder_irq *irq;
+
if (adj_mode) {
phys_enc->cached_mode = *adj_mode;
drm_mode_debug_printmodeline(adj_mode);
DPU_DEBUG_VIDENC(phys_enc, "caching mode:\n");
}
- _dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
+ irq = &phys_enc->irq[INTR_IDX_VSYNC];
+ irq->irq_idx = phys_enc->hw_intf->cap->intr_vsync;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->irq_idx = phys_enc->hw_intf->cap->intr_underrun;
}
static int dpu_encoder_phys_vid_control_vblank_irq(
@@ -416,7 +400,7 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
goto end;
}
- DRM_DEBUG_KMS("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
+ DRM_DEBUG_VBL("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
atomic_read(&phys_enc->vblank_refcount));
if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
@@ -461,13 +445,14 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d)
- ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->id);
+ ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx);
skip_flush:
DPU_DEBUG_VIDENC(phys_enc,
"update pending flush ctl %d intf %d\n",
ctl->idx - CTL_0, phys_enc->hw_intf->idx);
+ atomic_set(&phys_enc->underrun_cnt, 0);
/* ctl_flush & timing engine enable will be triggered by framework */
if (phys_enc->enable_state == DPU_ENC_DISABLED)
@@ -537,6 +522,9 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
{
struct dpu_hw_ctl *ctl;
int rc;
+ struct drm_encoder *drm_enc;
+
+ drm_enc = phys_enc->parent;
ctl = phys_enc->hw_ctl;
if (!ctl->ops.wait_reset_status)
@@ -550,6 +538,7 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
if (rc) {
DPU_ERROR_VIDENC(phys_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
+ msm_disp_snapshot_state(drm_enc->dev);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
}
}
@@ -636,7 +625,7 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
if (enable) {
ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
- if (ret)
+ if (WARN_ON(ret))
return;
dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
@@ -738,19 +727,16 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
irq = &phys_enc->irq[i];
INIT_LIST_HEAD(&irq->cb.list);
irq->irq_idx = -EINVAL;
- irq->hw_idx = -EINVAL;
irq->cb.arg = phys_enc;
}
irq = &phys_enc->irq[INTR_IDX_VSYNC];
irq->name = "vsync_irq";
- irq->intr_type = DPU_IRQ_TYPE_INTF_VSYNC;
irq->intr_idx = INTR_IDX_VSYNC;
irq->cb.func = dpu_encoder_phys_vid_vblank_irq;
irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
irq->name = "underrun";
- irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
irq->intr_idx = INTR_IDX_UNDERRUN;
irq->cb.func = dpu_encoder_phys_vid_underrun_irq;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 21ff8f9e5dfd..440ae93d7bd1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -992,7 +992,7 @@ const struct dpu_format *dpu_get_dpu_format_ext(
* Currently only support exactly zero or one modifier.
* All planes use the same modifier.
*/
- DPU_DEBUG("plane format modifier 0x%llX\n", modifier);
+ DRM_DEBUG_ATOMIC("plane format modifier 0x%llX\n", modifier);
switch (modifier) {
case 0:
@@ -1002,7 +1002,7 @@ const struct dpu_format *dpu_get_dpu_format_ext(
case DRM_FORMAT_MOD_QCOM_COMPRESSED:
map = dpu_format_map_ubwc;
map_size = ARRAY_SIZE(dpu_format_map_ubwc);
- DPU_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+ DRM_DEBUG_ATOMIC("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
(char *)&format);
break;
default:
@@ -1021,7 +1021,7 @@ const struct dpu_format *dpu_get_dpu_format_ext(
DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
(char *)&format, modifier);
else
- DPU_DEBUG("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
+ DRM_DEBUG_ATOMIC("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
(char *)&format, modifier,
DPU_FORMAT_IS_UBWC(fmt),
DPU_FORMAT_IS_YUV(fmt));
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
deleted file mode 100644
index 819b26e660b9..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/mutex.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-
-#include "dpu_hw_mdss.h"
-#include "dpu_hw_blk.h"
-
-/* Serialization lock for dpu_hw_blk_list */
-static DEFINE_MUTEX(dpu_hw_blk_lock);
-
-/* List of all hw block objects */
-static LIST_HEAD(dpu_hw_blk_list);
-
-/**
- * dpu_hw_blk_init - initialize hw block object
- * @hw_blk: pointer to hw block object
- * @type: hw block type - enum dpu_hw_blk_type
- * @id: instance id of the hw block
- * @ops: Pointer to block operations
- */
-void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
- struct dpu_hw_blk_ops *ops)
-{
- INIT_LIST_HEAD(&hw_blk->list);
- hw_blk->type = type;
- hw_blk->id = id;
- atomic_set(&hw_blk->refcount, 0);
-
- if (ops)
- hw_blk->ops = *ops;
-
- mutex_lock(&dpu_hw_blk_lock);
- list_add(&hw_blk->list, &dpu_hw_blk_list);
- mutex_unlock(&dpu_hw_blk_lock);
-}
-
-/**
- * dpu_hw_blk_destroy - destroy hw block object.
- * @hw_blk: pointer to hw block object
- * return: none
- */
-void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk)
-{
- if (!hw_blk) {
- pr_err("invalid parameters\n");
- return;
- }
-
- if (atomic_read(&hw_blk->refcount))
- pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
- hw_blk->id);
-
- mutex_lock(&dpu_hw_blk_lock);
- list_del(&hw_blk->list);
- mutex_unlock(&dpu_hw_blk_lock);
-}
-
-/**
- * dpu_hw_blk_get - get hw_blk from free pool
- * @hw_blk: if specified, increment reference count only
- * @type: if hw_blk is not specified, allocate the next available of this type
- * @id: if specified (>= 0), allocate the given instance of the above type
- * return: pointer to hw block object
- */
-struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id)
-{
- struct dpu_hw_blk *curr;
- int rc, refcount;
-
- if (!hw_blk) {
- mutex_lock(&dpu_hw_blk_lock);
- list_for_each_entry(curr, &dpu_hw_blk_list, list) {
- if ((curr->type != type) ||
- (id >= 0 && curr->id != id) ||
- (id < 0 &&
- atomic_read(&curr->refcount)))
- continue;
-
- hw_blk = curr;
- break;
- }
- mutex_unlock(&dpu_hw_blk_lock);
- }
-
- if (!hw_blk) {
- pr_debug("no hw_blk:%d\n", type);
- return NULL;
- }
-
- refcount = atomic_inc_return(&hw_blk->refcount);
-
- if (refcount == 1 && hw_blk->ops.start) {
- rc = hw_blk->ops.start(hw_blk);
- if (rc) {
- pr_err("failed to start hw_blk:%d rc:%d\n", type, rc);
- goto error_start;
- }
- }
-
- pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
- hw_blk->id, refcount);
- return hw_blk;
-
-error_start:
- dpu_hw_blk_put(hw_blk);
- return ERR_PTR(rc);
-}
-
-/**
- * dpu_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
- * @hw_blk: hw block to be freed
- */
-void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk)
-{
- if (!hw_blk) {
- pr_err("invalid parameters\n");
- return;
- }
-
- pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type, hw_blk->id,
- atomic_read(&hw_blk->refcount));
-
- if (!atomic_read(&hw_blk->refcount)) {
- pr_err("hw_blk:%d.%d invalid put\n", hw_blk->type, hw_blk->id);
- return;
- }
-
- if (atomic_dec_return(&hw_blk->refcount))
- return;
-
- if (hw_blk->ops.stop)
- hw_blk->ops.stop(hw_blk);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
index 2bf737f8dd1b..52e92f37eda4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
@@ -7,19 +7,9 @@
#include <linux/types.h>
#include <linux/list.h>
-#include <linux/atomic.h>
struct dpu_hw_blk;
-/**
- * struct dpu_hw_blk_ops - common hardware block operations
- * @start: start operation on first get
- * @stop: stop operation on last put
- */
-struct dpu_hw_blk_ops {
- int (*start)(struct dpu_hw_blk *);
- void (*stop)(struct dpu_hw_blk *);
-};
/**
* struct dpu_hw_blk - definition of hardware block object
@@ -29,17 +19,7 @@ struct dpu_hw_blk_ops {
* @refcount: reference/usage count
*/
struct dpu_hw_blk {
- struct list_head list;
- u32 type;
- int id;
- atomic_t refcount;
- struct dpu_hw_blk_ops ops;
+ /* opaque */
};
-void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
- struct dpu_hw_blk_ops *ops);
-void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
-
-struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id);
-void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk);
#endif /*_DPU_HW_BLK_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index b569030a0847..d01c4c919504 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -7,6 +7,7 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include "dpu_hw_mdss.h"
+#include "dpu_hw_interrupts.h"
#include "dpu_hw_catalog.h"
#include "dpu_kms.h"
@@ -56,12 +57,39 @@
#define INTF_SC7280_MASK INTF_SC7180_MASK | BIT(DPU_DATA_HCTL_EN)
-#define INTR_SC7180_MASK \
- (BIT(DPU_IRQ_TYPE_PING_PONG_RD_PTR) |\
- BIT(DPU_IRQ_TYPE_PING_PONG_WR_PTR) |\
- BIT(DPU_IRQ_TYPE_PING_PONG_AUTO_REF) |\
- BIT(DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK) |\
- BIT(DPU_IRQ_TYPE_PING_PONG_TE_CHECK))
+#define IRQ_SDM845_MASK (BIT(MDP_SSPP_TOP0_INTR) | \
+ BIT(MDP_SSPP_TOP0_INTR2) | \
+ BIT(MDP_SSPP_TOP0_HIST_INTR) | \
+ BIT(MDP_INTF0_INTR) | \
+ BIT(MDP_INTF1_INTR) | \
+ BIT(MDP_INTF2_INTR) | \
+ BIT(MDP_INTF3_INTR) | \
+ BIT(MDP_INTF4_INTR) | \
+ BIT(MDP_AD4_0_INTR) | \
+ BIT(MDP_AD4_1_INTR))
+
+#define IRQ_SC7180_MASK (BIT(MDP_SSPP_TOP0_INTR) | \
+ BIT(MDP_SSPP_TOP0_INTR2) | \
+ BIT(MDP_SSPP_TOP0_HIST_INTR) | \
+ BIT(MDP_INTF0_INTR) | \
+ BIT(MDP_INTF1_INTR))
+
+#define IRQ_SC7280_MASK (BIT(MDP_SSPP_TOP0_INTR) | \
+ BIT(MDP_SSPP_TOP0_INTR2) | \
+ BIT(MDP_SSPP_TOP0_HIST_INTR) | \
+ BIT(MDP_INTF0_7xxx_INTR) | \
+ BIT(MDP_INTF1_7xxx_INTR) | \
+ BIT(MDP_INTF5_7xxx_INTR))
+
+#define IRQ_SM8250_MASK (BIT(MDP_SSPP_TOP0_INTR) | \
+ BIT(MDP_SSPP_TOP0_INTR2) | \
+ BIT(MDP_SSPP_TOP0_HIST_INTR) | \
+ BIT(MDP_INTF0_INTR) | \
+ BIT(MDP_INTF1_INTR) | \
+ BIT(MDP_INTF2_INTR) | \
+ BIT(MDP_INTF3_INTR) | \
+ BIT(MDP_INTF4_INTR))
+
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
#define DEFAULT_DPU_LINE_WIDTH 2048
@@ -315,27 +343,32 @@ static const struct dpu_ctl_cfg sdm845_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0xE4,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY)
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
{
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0xE4,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY)
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
},
{
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0xE4,
- .features = 0
+ .features = 0,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
},
{
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0xE4,
- .features = 0
+ .features = 0,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
},
{
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0xE4,
- .features = 0
+ .features = 0,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
},
};
@@ -343,17 +376,20 @@ static const struct dpu_ctl_cfg sc7180_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0xE4,
- .features = BIT(DPU_CTL_ACTIVE_CFG)
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
{
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0xE4,
- .features = BIT(DPU_CTL_ACTIVE_CFG)
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
},
{
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0xE4,
- .features = BIT(DPU_CTL_ACTIVE_CFG)
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
},
};
@@ -361,32 +397,38 @@ static const struct dpu_ctl_cfg sm8150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY)
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
{
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY)
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
},
{
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG)
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
},
{
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG)
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
},
{
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG)
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
},
{
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG)
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
@@ -394,22 +436,26 @@ static const struct dpu_ctl_cfg sc7280_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1E8,
- .features = CTL_SC7280_MASK
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
{
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1E8,
- .features = CTL_SC7280_MASK
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
},
{
.name = "ctl_2", .id = CTL_2,
.base = 0x17000, .len = 0x1E8,
- .features = CTL_SC7280_MASK
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
},
{
.name = "ctl_3", .id = CTL_3,
.base = 0x18000, .len = 0x1E8,
- .features = CTL_SC7280_MASK
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
},
};
@@ -690,42 +736,66 @@ static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
.len = 0x20, .version = 0x20000},
};
-#define PP_BLK_TE(_name, _id, _base, _merge_3d, _sblk) \
+#define PP_BLK_TE(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0xd4, \
.features = PINGPONG_SDM845_SPLIT_MASK, \
.merge_3d = _merge_3d, \
- .sblk = &_sblk \
+ .sblk = &_sblk, \
+ .intr_done = _done, \
+ .intr_rdptr = _rdptr, \
}
-#define PP_BLK(_name, _id, _base, _merge_3d, _sblk) \
+#define PP_BLK(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0xd4, \
.features = PINGPONG_SDM845_MASK, \
.merge_3d = _merge_3d, \
- .sblk = &_sblk \
+ .sblk = &_sblk, \
+ .intr_done = _done, \
+ .intr_rdptr = _rdptr, \
}
static const struct dpu_pingpong_cfg sdm845_pp[] = {
- PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te),
- PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te),
- PP_BLK("pingpong_2", PINGPONG_2, 0x71000, 0, sdm845_pp_sblk),
- PP_BLK("pingpong_3", PINGPONG_3, 0x71800, 0, sdm845_pp_sblk),
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
+ PP_BLK("pingpong_2", PINGPONG_2, 0x71000, 0, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
+ PP_BLK("pingpong_3", PINGPONG_3, 0x71800, 0, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
};
static struct dpu_pingpong_cfg sc7180_pp[] = {
- PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te),
- PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te),
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te, -1, -1),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te, -1, -1),
};
static const struct dpu_pingpong_cfg sm8150_pp[] = {
- PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te),
- PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te),
- PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk),
- PP_BLK("pingpong_3", PINGPONG_3, 0x71800, MERGE_3D_1, sdm845_pp_sblk),
- PP_BLK("pingpong_4", PINGPONG_4, 0x72000, MERGE_3D_2, sdm845_pp_sblk),
- PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk),
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
+ PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
+ PP_BLK("pingpong_3", PINGPONG_3, 0x71800, MERGE_3D_1, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
+ PP_BLK("pingpong_4", PINGPONG_4, 0x72000, MERGE_3D_2, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ -1),
+ PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ -1),
};
/*************************************************************
@@ -746,47 +816,49 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
};
static const struct dpu_pingpong_cfg sc7280_pp[] = {
- PP_BLK("pingpong_0", PINGPONG_0, 0x59000, 0, sc7280_pp_sblk),
- PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk),
- PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk),
- PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk),
+ PP_BLK("pingpong_0", PINGPONG_0, 0x59000, 0, sc7280_pp_sblk, -1, -1),
+ PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
+ PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
+ PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
};
/*************************************************************
* INTF sub blocks config
*************************************************************/
-#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _progfetch, _features) \
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _progfetch, _features, _reg, _underrun_bit, _vsync_bit) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x280, \
.features = _features, \
.type = _type, \
.controller_id = _ctrl_id, \
- .prog_fetch_lines_worst_case = _progfetch \
+ .prog_fetch_lines_worst_case = _progfetch, \
+ .intr_underrun = DPU_IRQ_IDX(_reg, _underrun_bit), \
+ .intr_vsync = DPU_IRQ_IDX(_reg, _vsync_bit), \
}
static const struct dpu_intf_cfg sdm845_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SDM845_MASK),
- INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SDM845_MASK),
- INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SDM845_MASK),
- INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SDM845_MASK),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
};
static const struct dpu_intf_cfg sc7180_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK),
- INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
};
static const struct dpu_intf_cfg sm8150_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK),
- INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK),
- INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SC7180_MASK),
- INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SC7180_MASK),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
};
static const struct dpu_intf_cfg sc7280_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, 0, 24, INTF_SC7280_MASK),
- INTF_BLK("intf_1", INTF_1, 0x35000, INTF_DSI, 0, 24, INTF_SC7280_MASK),
- INTF_BLK("intf_5", INTF_5, 0x39000, INTF_EDP, 0, 24, INTF_SC7280_MASK),
+ INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_1", INTF_1, 0x35000, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+ INTF_BLK("intf_5", INTF_5, 0x39000, INTF_EDP, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
};
/*************************************************************
@@ -1060,7 +1132,7 @@ static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.reg_dma_count = 1,
.dma_cfg = sdm845_regdma,
.perf = sdm845_perf_data,
- .mdss_irqs = 0x3ff,
+ .mdss_irqs = IRQ_SDM845_MASK,
};
}
@@ -1091,8 +1163,7 @@ static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.reg_dma_count = 1,
.dma_cfg = sdm845_regdma,
.perf = sc7180_perf_data,
- .mdss_irqs = 0x3f,
- .obsolete_irq = INTR_SC7180_MASK,
+ .mdss_irqs = IRQ_SC7180_MASK,
};
}
@@ -1125,7 +1196,7 @@ static void sm8150_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.reg_dma_count = 1,
.dma_cfg = sm8150_regdma,
.perf = sm8150_perf_data,
- .mdss_irqs = 0x3ff,
+ .mdss_irqs = IRQ_SDM845_MASK,
};
}
@@ -1158,7 +1229,7 @@ static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.reg_dma_count = 1,
.dma_cfg = sm8250_regdma,
.perf = sm8250_perf_data,
- .mdss_irqs = 0xff,
+ .mdss_irqs = IRQ_SM8250_MASK,
};
}
@@ -1181,8 +1252,7 @@ static void sc7280_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.perf = sc7280_perf_data,
- .mdss_irqs = 0x1c07,
- .obsolete_irq = INTR_SC7180_MASK,
+ .mdss_irqs = IRQ_SC7280_MASK,
};
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 4dfd8a20ad5c..d2a945a27cfa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
*/
#ifndef _DPU_HW_CATALOG_H
@@ -464,13 +464,15 @@ struct dpu_mdp_cfg {
struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
};
-/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+/* struct dpu_ctl_cfg : MDP CTL instance info
* @id: index identifying this block
* @base: register base offset to mdss
* @features bit mask identifying sub-blocks/features
+ * @intr_start: interrupt index for CTL_START
*/
struct dpu_ctl_cfg {
DPU_HW_BLK_INFO;
+ s32 intr_start;
};
/**
@@ -526,11 +528,15 @@ struct dpu_dspp_cfg {
* @id enum identifying this block
* @base register offset of this block
* @features bit mask identifying sub-blocks/features
+ * @intr_done: index for PINGPONG done interrupt
+ * @intr_rdptr: index for PINGPONG readpointer done interrupt
* @sblk sub-blocks information
*/
struct dpu_pingpong_cfg {
DPU_HW_BLK_INFO;
u32 merge_3d;
+ s32 intr_done;
+ s32 intr_rdptr;
const struct dpu_pingpong_sub_blks *sblk;
};
@@ -555,12 +561,16 @@ struct dpu_merge_3d_cfg {
* @type: Interface type(DSI, DP, HDMI)
* @controller_id: Controller Instance ID in case of multiple of intf type
* @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
+ * @intr_underrun: index for INTF underrun interrupt
+ * @intr_vsync: index for INTF VSYNC interrupt
*/
struct dpu_intf_cfg {
DPU_HW_BLK_INFO;
u32 type; /* interface type*/
u32 controller_id;
u32 prog_fetch_lines_worst_case;
+ s32 intr_underrun;
+ s32 intr_vsync;
};
/**
@@ -723,7 +733,6 @@ struct dpu_perf_cfg {
* @cursor_formats Supported formats for cursor pipe
* @vig_formats Supported formats for vig pipe
* @mdss_irqs: Bitmap with the irqs supported by the target
- * @obsolete_irq: Irq types that are obsolete for a particular target
*/
struct dpu_mdss_cfg {
u32 hwversion;
@@ -770,7 +779,6 @@ struct dpu_mdss_cfg {
const struct dpu_format_extended *vig_formats;
unsigned long mdss_irqs;
- unsigned long obsolete_irq;
};
struct dpu_mdss_hw_cfg_handler {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 2d4645e01ebf..f8a74f6cdc4c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -589,8 +589,6 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
};
-static struct dpu_hw_blk_ops dpu_hw_ops;
-
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
const struct dpu_mdss_cfg *m)
@@ -615,14 +613,10 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
c->mixer_count = m->mixer_count;
c->mixer_hw_caps = m->mixer;
- dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
-
return c;
}
void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
{
- if (ctx)
- dpu_hw_blk_destroy(&ctx->base);
kfree(ctx);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index e42f901a7de5..a98e964c3b6f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -85,8 +85,6 @@ static const struct dpu_dspp_cfg *_dspp_offset(enum dpu_dspp dspp,
return ERR_PTR(-EINVAL);
}
-static struct dpu_hw_blk_ops dpu_hw_ops;
-
struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx,
void __iomem *addr,
const struct dpu_mdss_cfg *m)
@@ -112,16 +110,11 @@ struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx,
c->cap = cfg;
_setup_dspp_ops(c, c->cap->features);
- dpu_hw_blk_init(&c->base, DPU_HW_BLK_DSPP, idx, &dpu_hw_ops);
-
return c;
}
void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp)
{
- if (dspp)
- dpu_hw_blk_destroy(&dspp->base);
-
kfree(dspp);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 48c96b812126..2e816f232e85 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -30,145 +30,6 @@
#define MDP_INTF_5_OFF_REV_7xxx 0x39000
/**
- * WB interrupt status bit definitions
- */
-#define DPU_INTR_WB_0_DONE BIT(0)
-#define DPU_INTR_WB_1_DONE BIT(1)
-#define DPU_INTR_WB_2_DONE BIT(4)
-
-/**
- * WDOG timer interrupt status bit definitions
- */
-#define DPU_INTR_WD_TIMER_0_DONE BIT(2)
-#define DPU_INTR_WD_TIMER_1_DONE BIT(3)
-#define DPU_INTR_WD_TIMER_2_DONE BIT(5)
-#define DPU_INTR_WD_TIMER_3_DONE BIT(6)
-#define DPU_INTR_WD_TIMER_4_DONE BIT(7)
-
-/**
- * Pingpong interrupt status bit definitions
- */
-#define DPU_INTR_PING_PONG_0_DONE BIT(8)
-#define DPU_INTR_PING_PONG_1_DONE BIT(9)
-#define DPU_INTR_PING_PONG_2_DONE BIT(10)
-#define DPU_INTR_PING_PONG_3_DONE BIT(11)
-#define DPU_INTR_PING_PONG_0_RD_PTR BIT(12)
-#define DPU_INTR_PING_PONG_1_RD_PTR BIT(13)
-#define DPU_INTR_PING_PONG_2_RD_PTR BIT(14)
-#define DPU_INTR_PING_PONG_3_RD_PTR BIT(15)
-#define DPU_INTR_PING_PONG_0_WR_PTR BIT(16)
-#define DPU_INTR_PING_PONG_1_WR_PTR BIT(17)
-#define DPU_INTR_PING_PONG_2_WR_PTR BIT(18)
-#define DPU_INTR_PING_PONG_3_WR_PTR BIT(19)
-#define DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
-#define DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
-#define DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
-#define DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
-
-/**
- * Interface interrupt status bit definitions
- */
-#define DPU_INTR_INTF_0_UNDERRUN BIT(24)
-#define DPU_INTR_INTF_1_UNDERRUN BIT(26)
-#define DPU_INTR_INTF_2_UNDERRUN BIT(28)
-#define DPU_INTR_INTF_3_UNDERRUN BIT(30)
-#define DPU_INTR_INTF_5_UNDERRUN BIT(22)
-#define DPU_INTR_INTF_0_VSYNC BIT(25)
-#define DPU_INTR_INTF_1_VSYNC BIT(27)
-#define DPU_INTR_INTF_2_VSYNC BIT(29)
-#define DPU_INTR_INTF_3_VSYNC BIT(31)
-#define DPU_INTR_INTF_5_VSYNC BIT(23)
-
-/**
- * Pingpong Secondary interrupt status bit definitions
- */
-#define DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
-#define DPU_INTR_PING_PONG_S0_WR_PTR BIT(4)
-#define DPU_INTR_PING_PONG_S0_RD_PTR BIT(8)
-#define DPU_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
-#define DPU_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
-
-/**
- * Pingpong TEAR detection interrupt status bit definitions
- */
-#define DPU_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
-#define DPU_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
-#define DPU_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
-#define DPU_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
-
-/**
- * Pingpong TE detection interrupt status bit definitions
- */
-#define DPU_INTR_PING_PONG_0_TE_DETECTED BIT(24)
-#define DPU_INTR_PING_PONG_1_TE_DETECTED BIT(25)
-#define DPU_INTR_PING_PONG_2_TE_DETECTED BIT(26)
-#define DPU_INTR_PING_PONG_3_TE_DETECTED BIT(27)
-
-/**
- * Ctl start interrupt status bit definitions
- */
-#define DPU_INTR_CTL_0_START BIT(9)
-#define DPU_INTR_CTL_1_START BIT(10)
-#define DPU_INTR_CTL_2_START BIT(11)
-#define DPU_INTR_CTL_3_START BIT(12)
-#define DPU_INTR_CTL_4_START BIT(13)
-
-/**
- * Concurrent WB overflow interrupt status bit definitions
- */
-#define DPU_INTR_CWB_2_OVERFLOW BIT(14)
-#define DPU_INTR_CWB_3_OVERFLOW BIT(15)
-
-/**
- * Histogram VIG done interrupt status bit definitions
- */
-#define DPU_INTR_HIST_VIG_0_DONE BIT(0)
-#define DPU_INTR_HIST_VIG_1_DONE BIT(4)
-#define DPU_INTR_HIST_VIG_2_DONE BIT(8)
-#define DPU_INTR_HIST_VIG_3_DONE BIT(10)
-
-/**
- * Histogram VIG reset Sequence done interrupt status bit definitions
- */
-#define DPU_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
-#define DPU_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
-#define DPU_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
-#define DPU_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
-
-/**
- * Histogram DSPP done interrupt status bit definitions
- */
-#define DPU_INTR_HIST_DSPP_0_DONE BIT(12)
-#define DPU_INTR_HIST_DSPP_1_DONE BIT(16)
-#define DPU_INTR_HIST_DSPP_2_DONE BIT(20)
-#define DPU_INTR_HIST_DSPP_3_DONE BIT(22)
-
-/**
- * Histogram DSPP reset Sequence done interrupt status bit definitions
- */
-#define DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
-#define DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
-#define DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
-#define DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
-
-/**
- * INTF interrupt status bit definitions
- */
-#define DPU_INTR_VIDEO_INTO_STATIC BIT(0)
-#define DPU_INTR_VIDEO_OUTOF_STATIC BIT(1)
-#define DPU_INTR_DSICMD_0_INTO_STATIC BIT(2)
-#define DPU_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
-#define DPU_INTR_DSICMD_1_INTO_STATIC BIT(4)
-#define DPU_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
-#define DPU_INTR_DSICMD_2_INTO_STATIC BIT(6)
-#define DPU_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
-#define DPU_INTR_PROG_LINE BIT(8)
-
-/**
- * AD4 interrupt status bit definitions
- */
-#define DPU_INTR_BACKLIGHT_UPDATED BIT(0)
-/**
* struct dpu_intr_reg - array of DPU register sets
* @clr_off: offset to CLEAR reg
* @en_off: offset to ENABLE reg
@@ -180,22 +41,10 @@ struct dpu_intr_reg {
u32 status_off;
};
-/**
- * struct dpu_irq_type - maps each irq with i/f
- * @intr_type: type of interrupt listed in dpu_intr_type
- * @instance_idx: instance index of the associated HW block in DPU
- * @irq_mask: corresponding bit in the interrupt status reg
- * @reg_idx: which reg set to use
- */
-struct dpu_irq_type {
- u32 intr_type;
- u32 instance_idx;
- u32 irq_mask;
- u32 reg_idx;
-};
-
/*
* struct dpu_intr_reg - List of DPU interrupt registers
+ *
+ * When making changes be sure to sync with dpu_hw_intr_reg
*/
static const struct dpu_intr_reg dpu_intr_set[] = {
{
@@ -265,1101 +114,22 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
},
};
-/*
- * struct dpu_irq_type - IRQ mapping table use for lookup an irq_idx in this
- * table that have a matching interface type and
- * instance index.
- */
-static const struct dpu_irq_type dpu_irq_map[] = {
- /* BEGIN MAP_RANGE: 0-31, INTR */
- /* irq_idx: 0-3 */
- { DPU_IRQ_TYPE_WB_ROT_COMP, WB_0, DPU_INTR_WB_0_DONE, 0},
- { DPU_IRQ_TYPE_WB_ROT_COMP, WB_1, DPU_INTR_WB_1_DONE, 0},
- { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_0, DPU_INTR_WD_TIMER_0_DONE, 0},
- { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_1, DPU_INTR_WD_TIMER_1_DONE, 0},
- /* irq_idx: 4-7 */
- { DPU_IRQ_TYPE_WB_WFD_COMP, WB_2, DPU_INTR_WB_2_DONE, 0},
- { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_2, DPU_INTR_WD_TIMER_2_DONE, 0},
- { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_3, DPU_INTR_WD_TIMER_3_DONE, 0},
- { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_4, DPU_INTR_WD_TIMER_4_DONE, 0},
- /* irq_idx: 8-11 */
- { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
- DPU_INTR_PING_PONG_0_DONE, 0},
- { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
- DPU_INTR_PING_PONG_1_DONE, 0},
- { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
- DPU_INTR_PING_PONG_2_DONE, 0},
- { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
- DPU_INTR_PING_PONG_3_DONE, 0},
- /* irq_idx: 12-15 */
- { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
- DPU_INTR_PING_PONG_0_RD_PTR, 0},
- { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
- DPU_INTR_PING_PONG_1_RD_PTR, 0},
- { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
- DPU_INTR_PING_PONG_2_RD_PTR, 0},
- { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
- DPU_INTR_PING_PONG_3_RD_PTR, 0},
- /* irq_idx: 16-19 */
- { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
- DPU_INTR_PING_PONG_0_WR_PTR, 0},
- { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
- DPU_INTR_PING_PONG_1_WR_PTR, 0},
- { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
- DPU_INTR_PING_PONG_2_WR_PTR, 0},
- { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
- DPU_INTR_PING_PONG_3_WR_PTR, 0},
- /* irq_idx: 20-23 */
- { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
- DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
- { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
- DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
- { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
- DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
- { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
- DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
- /* irq_idx: 24-27 */
- { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, DPU_INTR_INTF_0_UNDERRUN, 0},
- { DPU_IRQ_TYPE_INTF_VSYNC, INTF_0, DPU_INTR_INTF_0_VSYNC, 0},
- { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, DPU_INTR_INTF_1_UNDERRUN, 0},
- { DPU_IRQ_TYPE_INTF_VSYNC, INTF_1, DPU_INTR_INTF_1_VSYNC, 0},
- /* irq_idx: 28-31 */
- { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, DPU_INTR_INTF_2_UNDERRUN, 0},
- { DPU_IRQ_TYPE_INTF_VSYNC, INTF_2, DPU_INTR_INTF_2_VSYNC, 0},
- { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, DPU_INTR_INTF_3_UNDERRUN, 0},
- { DPU_IRQ_TYPE_INTF_VSYNC, INTF_3, DPU_INTR_INTF_3_VSYNC, 0},
- /* irq_idx:32-33 */
- { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_5, DPU_INTR_INTF_5_UNDERRUN, 0},
- { DPU_IRQ_TYPE_INTF_VSYNC, INTF_5, DPU_INTR_INTF_5_VSYNC, 0},
- /* irq_idx:34-63 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
- /* BEGIN MAP_RANGE: 64-95, INTR2 */
- /* irq_idx: 64-67 */
- { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
- DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- /* irq_idx: 68-71 */
- { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
- DPU_INTR_PING_PONG_S0_WR_PTR, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- /* irq_idx: 72 */
- { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
- DPU_INTR_PING_PONG_S0_RD_PTR, 1},
- /* irq_idx: 73-77 */
- { DPU_IRQ_TYPE_CTL_START, CTL_0,
- DPU_INTR_CTL_0_START, 1},
- { DPU_IRQ_TYPE_CTL_START, CTL_1,
- DPU_INTR_CTL_1_START, 1},
- { DPU_IRQ_TYPE_CTL_START, CTL_2,
- DPU_INTR_CTL_2_START, 1},
- { DPU_IRQ_TYPE_CTL_START, CTL_3,
- DPU_INTR_CTL_3_START, 1},
- { DPU_IRQ_TYPE_CTL_START, CTL_4,
- DPU_INTR_CTL_4_START, 1},
- /* irq_idx: 78-79 */
- { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_2, DPU_INTR_CWB_2_OVERFLOW, 1},
- { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_3, DPU_INTR_CWB_3_OVERFLOW, 1},
- /* irq_idx: 80-83 */
- { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
- DPU_INTR_PING_PONG_0_TEAR_DETECTED, 1},
- { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
- DPU_INTR_PING_PONG_1_TEAR_DETECTED, 1},
- { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
- DPU_INTR_PING_PONG_2_TEAR_DETECTED, 1},
- { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
- DPU_INTR_PING_PONG_3_TEAR_DETECTED, 1},
- /* irq_idx: 84-87 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
- DPU_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- /* irq_idx: 88-91 */
- { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
- DPU_INTR_PING_PONG_0_TE_DETECTED, 1},
- { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
- DPU_INTR_PING_PONG_1_TE_DETECTED, 1},
- { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
- DPU_INTR_PING_PONG_2_TE_DETECTED, 1},
- { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
- DPU_INTR_PING_PONG_3_TE_DETECTED, 1},
- /* irq_idx: 92-95 */
- { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
- DPU_INTR_PING_PONG_S0_TE_DETECTED, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- /* irq_idx: 96-127 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
- /* BEGIN MAP_RANGE: 128-159 HIST */
- /* irq_idx: 128-131 */
- { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, DPU_INTR_HIST_VIG_0_DONE, 2},
- { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
- DPU_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- /* irq_idx: 132-135 */
- { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, DPU_INTR_HIST_VIG_1_DONE, 2},
- { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
- DPU_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- /* irq_idx: 136-139 */
- { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, DPU_INTR_HIST_VIG_2_DONE, 2},
- { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
- DPU_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
- { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, DPU_INTR_HIST_VIG_3_DONE, 2},
- { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
- DPU_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
- /* irq_idx: 140-143 */
- { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, DPU_INTR_HIST_DSPP_0_DONE, 2},
- { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
- DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- /* irq_idx: 144-147 */
- { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, DPU_INTR_HIST_DSPP_1_DONE, 2},
- { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
- DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- /* irq_idx: 148-151 */
- { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, DPU_INTR_HIST_DSPP_2_DONE, 2},
- { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
- DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
- { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, DPU_INTR_HIST_DSPP_3_DONE, 2},
- { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
- DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
- /* irq_idx: 152-155 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- /* irq_idx: 156-159 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- /* irq_idx: 160-191 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
- /* BEGIN MAP_RANGE: 192-255 INTF_0_INTR */
- /* irq_idx: 192-195 */
- { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
- DPU_INTR_VIDEO_INTO_STATIC, 3},
- { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
- DPU_INTR_VIDEO_OUTOF_STATIC, 3},
- { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
- DPU_INTR_DSICMD_0_INTO_STATIC, 3},
- { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
- DPU_INTR_DSICMD_0_OUTOF_STATIC, 3},
- /* irq_idx: 196-199 */
- { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
- DPU_INTR_DSICMD_1_INTO_STATIC, 3},
- { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
- DPU_INTR_DSICMD_1_OUTOF_STATIC, 3},
- { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
- DPU_INTR_DSICMD_2_INTO_STATIC, 3},
- { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
- DPU_INTR_DSICMD_2_OUTOF_STATIC, 3},
- /* irq_idx: 200-203 */
- { DPU_IRQ_TYPE_PROG_LINE, INTF_0, DPU_INTR_PROG_LINE, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- /* irq_idx: 204-207 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- /* irq_idx: 208-211 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- /* irq_idx: 212-215 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- /* irq_idx: 216-219 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- /* irq_idx: 220-223 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- /* irq_idx: 224-255 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
- /* BEGIN MAP_RANGE: 256-319 INTF_1_INTR */
- /* irq_idx: 256-259 */
- { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
- DPU_INTR_VIDEO_INTO_STATIC, 4},
- { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
- DPU_INTR_VIDEO_OUTOF_STATIC, 4},
- { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
- DPU_INTR_DSICMD_0_INTO_STATIC, 4},
- { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
- DPU_INTR_DSICMD_0_OUTOF_STATIC, 4},
- /* irq_idx: 260-263 */
- { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
- DPU_INTR_DSICMD_1_INTO_STATIC, 4},
- { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
- DPU_INTR_DSICMD_1_OUTOF_STATIC, 4},
- { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
- DPU_INTR_DSICMD_2_INTO_STATIC, 4},
- { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
- DPU_INTR_DSICMD_2_OUTOF_STATIC, 4},
- /* irq_idx: 264-267 */
- { DPU_IRQ_TYPE_PROG_LINE, INTF_1, DPU_INTR_PROG_LINE, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- /* irq_idx: 268-271 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- /* irq_idx: 272-275 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- /* irq_idx: 276-279 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- /* irq_idx: 280-283 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- /* irq_idx: 284-287 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- /* irq_idx: 288-319 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
- /* BEGIN MAP_RANGE: 320-383 INTF_2_INTR */
- /* irq_idx: 320-323 */
- { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
- DPU_INTR_VIDEO_INTO_STATIC, 5},
- { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
- DPU_INTR_VIDEO_OUTOF_STATIC, 5},
- { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
- DPU_INTR_DSICMD_0_INTO_STATIC, 5},
- { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
- DPU_INTR_DSICMD_0_OUTOF_STATIC, 5},
- /* irq_idx: 324-327 */
- { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
- DPU_INTR_DSICMD_1_INTO_STATIC, 5},
- { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
- DPU_INTR_DSICMD_1_OUTOF_STATIC, 5},
- { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
- DPU_INTR_DSICMD_2_INTO_STATIC, 5},
- { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
- DPU_INTR_DSICMD_2_OUTOF_STATIC, 5},
- /* irq_idx: 328-331 */
- { DPU_IRQ_TYPE_PROG_LINE, INTF_2, DPU_INTR_PROG_LINE, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- /* irq_idx: 332-335 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- /* irq_idx: 336-339 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- /* irq_idx: 340-343 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- /* irq_idx: 344-347 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- /* irq_idx: 348-351 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- /* irq_idx: 352-383 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
- /* BEGIN MAP_RANGE: 384-447 INTF_3_INTR */
- /* irq_idx: 384-387 */
- { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
- DPU_INTR_VIDEO_INTO_STATIC, 6},
- { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
- DPU_INTR_VIDEO_OUTOF_STATIC, 6},
- { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
- DPU_INTR_DSICMD_0_INTO_STATIC, 6},
- { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
- DPU_INTR_DSICMD_0_OUTOF_STATIC, 6},
- /* irq_idx: 388-391 */
- { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
- DPU_INTR_DSICMD_1_INTO_STATIC, 6},
- { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
- DPU_INTR_DSICMD_1_OUTOF_STATIC, 6},
- { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
- DPU_INTR_DSICMD_2_INTO_STATIC, 6},
- { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
- DPU_INTR_DSICMD_2_OUTOF_STATIC, 6},
- /* irq_idx: 392-395 */
- { DPU_IRQ_TYPE_PROG_LINE, INTF_3, DPU_INTR_PROG_LINE, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- /* irq_idx: 396-399 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- /* irq_idx: 400-403 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- /* irq_idx: 404-407 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- /* irq_idx: 408-411 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- /* irq_idx: 412-415 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- /* irq_idx: 416-447*/
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
- /* BEGIN MAP_RANGE: 448-511 INTF_4_INTR */
- /* irq_idx: 448-451 */
- { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
- DPU_INTR_VIDEO_INTO_STATIC, 7},
- { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
- DPU_INTR_VIDEO_OUTOF_STATIC, 7},
- { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
- DPU_INTR_DSICMD_0_INTO_STATIC, 7},
- { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
- DPU_INTR_DSICMD_0_OUTOF_STATIC, 7},
- /* irq_idx: 452-455 */
- { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
- DPU_INTR_DSICMD_1_INTO_STATIC, 7},
- { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
- DPU_INTR_DSICMD_1_OUTOF_STATIC, 7},
- { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
- DPU_INTR_DSICMD_2_INTO_STATIC, 7},
- { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
- DPU_INTR_DSICMD_2_OUTOF_STATIC, 7},
- /* irq_idx: 456-459 */
- { DPU_IRQ_TYPE_PROG_LINE, INTF_4, DPU_INTR_PROG_LINE, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- /* irq_idx: 460-463 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- /* irq_idx: 464-467 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- /* irq_idx: 468-471 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- /* irq_idx: 472-475 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- /* irq_idx: 476-479 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- /* irq_idx: 480-511 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
- /* BEGIN MAP_RANGE: 512-575 AD4_0_INTR */
- /* irq_idx: 512-515 */
- { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_0, DPU_INTR_BACKLIGHT_UPDATED, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- /* irq_idx: 516-519 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- /* irq_idx: 520-523 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- /* irq_idx: 524-527 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- /* irq_idx: 528-531 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- /* irq_idx: 532-535 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- /* irq_idx: 536-539 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- /* irq_idx: 540-543 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- /* irq_idx: 544-575*/
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
- /* BEGIN MAP_RANGE: 576-639 AD4_1_INTR */
- /* irq_idx: 576-579 */
- { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_1, DPU_INTR_BACKLIGHT_UPDATED, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- /* irq_idx: 580-583 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- /* irq_idx: 584-587 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- /* irq_idx: 588-591 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- /* irq_idx: 592-595 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- /* irq_idx: 596-599 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- /* irq_idx: 600-603 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- /* irq_idx: 604-607 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- /* irq_idx: 608-639 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
- /* BEGIN MAP_RANGE: 640-703 INTF_0_SC7280_INTR */
- /* irq_idx: 640-643 */
- { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
- DPU_INTR_VIDEO_INTO_STATIC, 10},
- { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
- DPU_INTR_VIDEO_OUTOF_STATIC, 10},
- { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
- DPU_INTR_DSICMD_0_INTO_STATIC, 10},
- { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
- DPU_INTR_DSICMD_0_OUTOF_STATIC, 10},
- /* irq_idx: 644-647 */
- { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
- DPU_INTR_DSICMD_1_INTO_STATIC, 10},
- { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
- DPU_INTR_DSICMD_1_OUTOF_STATIC, 10},
- { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
- DPU_INTR_DSICMD_2_INTO_STATIC, 10},
- { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
- DPU_INTR_DSICMD_2_OUTOF_STATIC, 10},
- /* irq_idx: 648-651 */
- { DPU_IRQ_TYPE_PROG_LINE, INTF_0, DPU_INTR_PROG_LINE, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- /* irq_idx: 652-655 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- /* irq_idx: 656-659 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- /* irq_idx: 660-663 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- /* irq_idx: 664-667 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- /* irq_idx: 668-671 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- /* irq_idx: 672-703 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
- /* BEGIN MAP_RANGE: 704-767 INTF_1_SC7280_INTR */
- /* irq_idx: 704-707 */
- { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
- DPU_INTR_VIDEO_INTO_STATIC, 11},
- { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
- DPU_INTR_VIDEO_OUTOF_STATIC, 11},
- { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
- DPU_INTR_DSICMD_0_INTO_STATIC, 11},
- { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
- DPU_INTR_DSICMD_0_OUTOF_STATIC, 11},
- /* irq_idx: 708-711 */
- { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
- DPU_INTR_DSICMD_1_INTO_STATIC, 11},
- { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
- DPU_INTR_DSICMD_1_OUTOF_STATIC, 11},
- { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
- DPU_INTR_DSICMD_2_INTO_STATIC, 11},
- { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
- DPU_INTR_DSICMD_2_OUTOF_STATIC, 11},
- /* irq_idx: 712-715 */
- { DPU_IRQ_TYPE_PROG_LINE, INTF_1, DPU_INTR_PROG_LINE, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- /* irq_idx: 716-719 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- /* irq_idx: 720-723 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- /* irq_idx: 724-727 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- /* irq_idx: 728-731 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- /* irq_idx: 732-735 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- /* irq_idx: 736-767 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
- /* BEGIN MAP_RANGE: 768-831 INTF_5_SC7280_INTR */
- /* irq_idx: 768-771 */
- { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_5,
- DPU_INTR_VIDEO_INTO_STATIC, 12},
- { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_5,
- DPU_INTR_VIDEO_OUTOF_STATIC, 12},
- { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_5,
- DPU_INTR_DSICMD_0_INTO_STATIC, 12},
- { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_5,
- DPU_INTR_DSICMD_0_OUTOF_STATIC, 12},
- /* irq_idx: 772-775 */
- { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_5,
- DPU_INTR_DSICMD_1_INTO_STATIC, 12},
- { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_5,
- DPU_INTR_DSICMD_1_OUTOF_STATIC, 12},
- { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_5,
- DPU_INTR_DSICMD_2_INTO_STATIC, 12},
- { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_5,
- DPU_INTR_DSICMD_2_OUTOF_STATIC, 12},
- /* irq_idx: 776-779 */
- { DPU_IRQ_TYPE_PROG_LINE, INTF_5, DPU_INTR_PROG_LINE, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- /* irq_idx: 780-783 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- /* irq_idx: 784-787 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- /* irq_idx: 788-791 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- /* irq_idx: 792-795 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- /* irq_idx: 796-799 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- /* irq_idx: 800-831 */
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
- { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
-};
+#define DPU_IRQ_REG(irq_idx) (irq_idx / 32)
+#define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32))
-static int dpu_hw_intr_irqidx_lookup(struct dpu_hw_intr *intr,
- enum dpu_intr_type intr_type, u32 instance_idx)
+static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
+ int irq_idx)
{
- int i;
+ int reg_idx;
- for (i = 0; i < ARRAY_SIZE(dpu_irq_map); i++) {
- if (intr_type == dpu_irq_map[i].intr_type &&
- instance_idx == dpu_irq_map[i].instance_idx &&
- !(intr->obsolete_irq & BIT(dpu_irq_map[i].intr_type)))
- return i;
- }
+ if (!intr)
+ return;
- pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
- intr_type, instance_idx);
- return -EINVAL;
+ reg_idx = DPU_IRQ_REG(irq_idx);
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off, DPU_IRQ_MASK(irq_idx));
+
+ /* ensure register writes go through */
+ wmb();
}
static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
@@ -1368,9 +138,9 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
{
int reg_idx;
int irq_idx;
- int start_idx;
- int end_idx;
u32 irq_status;
+ u32 enable_mask;
+ int bit;
unsigned long irq_flags;
if (!intr)
@@ -1383,86 +153,92 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
*/
spin_lock_irqsave(&intr->irq_lock, irq_flags);
for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
- irq_status = intr->save_irq_status[reg_idx];
+ if (!test_bit(reg_idx, &intr->irq_mask))
+ continue;
- /*
- * Each Interrupt register has a range of 64 indexes, and
- * that is static for dpu_irq_map.
- */
- start_idx = reg_idx * 64;
- end_idx = start_idx + 64;
+ /* Read interrupt status */
+ irq_status = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].status_off);
+
+ /* Read enable mask */
+ enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].en_off);
+
+ /* and clear the interrupt */
+ if (irq_status)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+ irq_status);
- if (!test_bit(reg_idx, &intr->irq_mask) ||
- start_idx >= ARRAY_SIZE(dpu_irq_map))
+ /* Finally update IRQ status based on enable mask */
+ irq_status &= enable_mask;
+
+ if (!irq_status)
continue;
/*
- * Search through matching intr status from irq map.
- * start_idx and end_idx defined the search range in
- * the dpu_irq_map.
+ * Search through matching intr status.
*/
- for (irq_idx = start_idx;
- (irq_idx < end_idx) && irq_status;
- irq_idx++)
- if ((irq_status & dpu_irq_map[irq_idx].irq_mask) &&
- (dpu_irq_map[irq_idx].reg_idx == reg_idx) &&
- !(intr->obsolete_irq &
- BIT(dpu_irq_map[irq_idx].intr_type))) {
- /*
- * Once a match on irq mask, perform a callback
- * to the given cbfunc. cbfunc will take care
- * the interrupt status clearing. If cbfunc is
- * not provided, then the interrupt clearing
- * is here.
- */
- if (cbfunc)
- cbfunc(arg, irq_idx);
- else
- intr->ops.clear_intr_status_nolock(
- intr, irq_idx);
-
- /*
- * When callback finish, clear the irq_status
- * with the matching mask. Once irq_status
- * is all cleared, the search can be stopped.
- */
- irq_status &= ~dpu_irq_map[irq_idx].irq_mask;
- }
+ while ((bit = ffs(irq_status)) != 0) {
+ irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
+ /*
+ * Once a match on irq mask, perform a callback
+ * to the given cbfunc. cbfunc will take care
+ * the interrupt status clearing. If cbfunc is
+ * not provided, then the interrupt clearing
+ * is here.
+ */
+ if (cbfunc)
+ cbfunc(arg, irq_idx);
+
+ dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
+
+ /*
+ * When callback finish, clear the irq_status
+ * with the matching mask. Once irq_status
+ * is all cleared, the search can be stopped.
+ */
+ irq_status &= ~BIT(bit - 1);
+ }
}
+
+ /* ensure register writes go through */
+ wmb();
+
spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
}
-static int dpu_hw_intr_enable_irq(struct dpu_hw_intr *intr, int irq_idx)
+static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
{
int reg_idx;
- unsigned long irq_flags;
const struct dpu_intr_reg *reg;
- const struct dpu_irq_type *irq;
const char *dbgstr = NULL;
uint32_t cache_irq_mask;
if (!intr)
return -EINVAL;
- if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
pr_err("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
- irq = &dpu_irq_map[irq_idx];
- reg_idx = irq->reg_idx;
+ /*
+ * The cache_irq_mask and hardware RMW operations needs to be done
+ * under irq_lock and it's the caller's responsibility to ensure that's
+ * held.
+ */
+ assert_spin_locked(&intr->irq_lock);
+
+ reg_idx = DPU_IRQ_REG(irq_idx);
reg = &dpu_intr_set[reg_idx];
- spin_lock_irqsave(&intr->irq_lock, irq_flags);
cache_irq_mask = intr->cache_irq_mask[reg_idx];
- if (cache_irq_mask & irq->irq_mask) {
+ if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
dbgstr = "DPU IRQ already set:";
} else {
dbgstr = "DPU IRQ enabled:";
- cache_irq_mask |= irq->irq_mask;
+ cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
/* Cleaning any pending interrupt */
- DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
/* Enabling interrupts with the new mask */
DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
@@ -1471,45 +247,49 @@ static int dpu_hw_intr_enable_irq(struct dpu_hw_intr *intr, int irq_idx)
intr->cache_irq_mask[reg_idx] = cache_irq_mask;
}
- spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
- pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
- irq->irq_mask, cache_irq_mask);
+ pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
+ DPU_IRQ_MASK(irq_idx), cache_irq_mask);
return 0;
}
-static int dpu_hw_intr_disable_irq_nolock(struct dpu_hw_intr *intr, int irq_idx)
+static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
{
int reg_idx;
const struct dpu_intr_reg *reg;
- const struct dpu_irq_type *irq;
const char *dbgstr = NULL;
uint32_t cache_irq_mask;
if (!intr)
return -EINVAL;
- if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
pr_err("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
- irq = &dpu_irq_map[irq_idx];
- reg_idx = irq->reg_idx;
+ /*
+ * The cache_irq_mask and hardware RMW operations needs to be done
+ * under irq_lock and it's the caller's responsibility to ensure that's
+ * held.
+ */
+ assert_spin_locked(&intr->irq_lock);
+
+ reg_idx = DPU_IRQ_REG(irq_idx);
reg = &dpu_intr_set[reg_idx];
cache_irq_mask = intr->cache_irq_mask[reg_idx];
- if ((cache_irq_mask & irq->irq_mask) == 0) {
+ if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
dbgstr = "DPU IRQ is already cleared:";
} else {
dbgstr = "DPU IRQ mask disable:";
- cache_irq_mask &= ~irq->irq_mask;
+ cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
/* Disable interrupts based on the new mask */
DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
/* Cleaning any pending interrupt */
- DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
/* ensure register write goes through */
wmb();
@@ -1517,27 +297,8 @@ static int dpu_hw_intr_disable_irq_nolock(struct dpu_hw_intr *intr, int irq_idx)
intr->cache_irq_mask[reg_idx] = cache_irq_mask;
}
- pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
- irq->irq_mask, cache_irq_mask);
-
- return 0;
-}
-
-static int dpu_hw_intr_disable_irq(struct dpu_hw_intr *intr, int irq_idx)
-{
- unsigned long irq_flags;
-
- if (!intr)
- return -EINVAL;
-
- if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
- pr_err("invalid IRQ index: [%d]\n", irq_idx);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&intr->irq_lock, irq_flags);
- dpu_hw_intr_disable_irq_nolock(intr, irq_idx);
- spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+ pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
+ DPU_IRQ_MASK(irq_idx), cache_irq_mask);
return 0;
}
@@ -1580,58 +341,6 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
return 0;
}
-static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
-{
- int i;
- u32 enable_mask;
- unsigned long irq_flags;
-
- if (!intr)
- return;
-
- spin_lock_irqsave(&intr->irq_lock, irq_flags);
- for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
- if (!test_bit(i, &intr->irq_mask))
- continue;
-
- /* Read interrupt status */
- intr->save_irq_status[i] = DPU_REG_READ(&intr->hw,
- dpu_intr_set[i].status_off);
-
- /* Read enable mask */
- enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[i].en_off);
-
- /* and clear the interrupt */
- if (intr->save_irq_status[i])
- DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off,
- intr->save_irq_status[i]);
-
- /* Finally update IRQ status based on enable mask */
- intr->save_irq_status[i] &= enable_mask;
- }
-
- /* ensure register writes go through */
- wmb();
-
- spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
-}
-
-static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
- int irq_idx)
-{
- int reg_idx;
-
- if (!intr)
- return;
-
- reg_idx = dpu_irq_map[irq_idx].reg_idx;
- DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
- dpu_irq_map[irq_idx].irq_mask);
-
- /* ensure register writes go through */
- wmb();
-}
-
static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
int irq_idx, bool clear)
{
@@ -1642,17 +351,17 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
if (!intr)
return 0;
- if (irq_idx >= ARRAY_SIZE(dpu_irq_map) || irq_idx < 0) {
+ if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
pr_err("invalid IRQ index: [%d]\n", irq_idx);
return 0;
}
spin_lock_irqsave(&intr->irq_lock, irq_flags);
- reg_idx = dpu_irq_map[irq_idx].reg_idx;
+ reg_idx = DPU_IRQ_REG(irq_idx);
intr_status = DPU_REG_READ(&intr->hw,
dpu_intr_set[reg_idx].status_off) &
- dpu_irq_map[irq_idx].irq_mask;
+ DPU_IRQ_MASK(irq_idx);
if (intr_status && clear)
DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
intr_status);
@@ -1665,17 +374,30 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
return intr_status;
}
+static unsigned long dpu_hw_intr_lock(struct dpu_hw_intr *intr)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+
+ return irq_flags;
+}
+
+static void dpu_hw_intr_unlock(struct dpu_hw_intr *intr, unsigned long irq_flags)
+{
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
{
- ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup;
- ops->enable_irq = dpu_hw_intr_enable_irq;
- ops->disable_irq = dpu_hw_intr_disable_irq;
+ ops->enable_irq_locked = dpu_hw_intr_enable_irq_locked;
+ ops->disable_irq_locked = dpu_hw_intr_disable_irq_locked;
ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
- ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses;
- ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock;
ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
+ ops->lock = dpu_hw_intr_lock;
+ ops->unlock = dpu_hw_intr_unlock;
}
static void __intr_offset(struct dpu_mdss_cfg *m,
@@ -1701,7 +423,7 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
__intr_offset(m, addr, &intr->hw);
__setup_intr_ops(&intr->ops);
- intr->irq_idx_tbl_size = ARRAY_SIZE(dpu_irq_map);
+ intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;
intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
GFP_KERNEL);
@@ -1710,16 +432,7 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
return ERR_PTR(-ENOMEM);
}
- intr->save_irq_status = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
- GFP_KERNEL);
- if (intr->save_irq_status == NULL) {
- kfree(intr->cache_irq_mask);
- kfree(intr);
- return ERR_PTR(-ENOMEM);
- }
-
intr->irq_mask = m->mdss_irqs;
- intr->obsolete_irq = m->obsolete_irq;
spin_lock_init(&intr->irq_lock);
@@ -1730,7 +443,6 @@ void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
{
if (intr) {
kfree(intr->cache_irq_mask);
- kfree(intr->save_irq_status);
kfree(intr);
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 5d6f9a7a5195..ac83c1159815 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -12,85 +12,32 @@
#include "dpu_hw_util.h"
#include "dpu_hw_mdss.h"
-/**
- * dpu_intr_type - HW Interrupt Type
- * @DPU_IRQ_TYPE_WB_ROT_COMP: WB rotator done
- * @DPU_IRQ_TYPE_WB_WFD_COMP: WB WFD done
- * @DPU_IRQ_TYPE_PING_PONG_COMP: PingPong done
- * @DPU_IRQ_TYPE_PING_PONG_RD_PTR: PingPong read pointer
- * @DPU_IRQ_TYPE_PING_PONG_WR_PTR: PingPong write pointer
- * @DPU_IRQ_TYPE_PING_PONG_AUTO_REF: PingPong auto refresh
- * @DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK: PingPong Tear check
- * @DPU_IRQ_TYPE_PING_PONG_TE_CHECK: PingPong TE detection
- * @DPU_IRQ_TYPE_INTF_UNDER_RUN: INTF underrun
- * @DPU_IRQ_TYPE_INTF_VSYNC: INTF VSYNC
- * @DPU_IRQ_TYPE_CWB_OVERFLOW: Concurrent WB overflow
- * @DPU_IRQ_TYPE_HIST_VIG_DONE: VIG Histogram done
- * @DPU_IRQ_TYPE_HIST_VIG_RSTSEQ: VIG Histogram reset
- * @DPU_IRQ_TYPE_HIST_DSPP_DONE: DSPP Histogram done
- * @DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ: DSPP Histogram reset
- * @DPU_IRQ_TYPE_WD_TIMER: Watchdog timer
- * @DPU_IRQ_TYPE_SFI_VIDEO_IN: Video static frame INTR into static
- * @DPU_IRQ_TYPE_SFI_VIDEO_OUT: Video static frame INTR out-of static
- * @DPU_IRQ_TYPE_SFI_CMD_0_IN: DSI CMD0 static frame INTR into static
- * @DPU_IRQ_TYPE_SFI_CMD_0_OUT: DSI CMD0 static frame INTR out-of static
- * @DPU_IRQ_TYPE_SFI_CMD_1_IN: DSI CMD1 static frame INTR into static
- * @DPU_IRQ_TYPE_SFI_CMD_1_OUT: DSI CMD1 static frame INTR out-of static
- * @DPU_IRQ_TYPE_SFI_CMD_2_IN: DSI CMD2 static frame INTR into static
- * @DPU_IRQ_TYPE_SFI_CMD_2_OUT: DSI CMD2 static frame INTR out-of static
- * @DPU_IRQ_TYPE_PROG_LINE: Programmable Line interrupt
- * @DPU_IRQ_TYPE_AD4_BL_DONE: AD4 backlight
- * @DPU_IRQ_TYPE_CTL_START: Control start
- * @DPU_IRQ_TYPE_RESERVED: Reserved for expansion
- */
-enum dpu_intr_type {
- DPU_IRQ_TYPE_WB_ROT_COMP,
- DPU_IRQ_TYPE_WB_WFD_COMP,
- DPU_IRQ_TYPE_PING_PONG_COMP,
- DPU_IRQ_TYPE_PING_PONG_RD_PTR,
- DPU_IRQ_TYPE_PING_PONG_WR_PTR,
- DPU_IRQ_TYPE_PING_PONG_AUTO_REF,
- DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK,
- DPU_IRQ_TYPE_PING_PONG_TE_CHECK,
- DPU_IRQ_TYPE_INTF_UNDER_RUN,
- DPU_IRQ_TYPE_INTF_VSYNC,
- DPU_IRQ_TYPE_CWB_OVERFLOW,
- DPU_IRQ_TYPE_HIST_VIG_DONE,
- DPU_IRQ_TYPE_HIST_VIG_RSTSEQ,
- DPU_IRQ_TYPE_HIST_DSPP_DONE,
- DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ,
- DPU_IRQ_TYPE_WD_TIMER,
- DPU_IRQ_TYPE_SFI_VIDEO_IN,
- DPU_IRQ_TYPE_SFI_VIDEO_OUT,
- DPU_IRQ_TYPE_SFI_CMD_0_IN,
- DPU_IRQ_TYPE_SFI_CMD_0_OUT,
- DPU_IRQ_TYPE_SFI_CMD_1_IN,
- DPU_IRQ_TYPE_SFI_CMD_1_OUT,
- DPU_IRQ_TYPE_SFI_CMD_2_IN,
- DPU_IRQ_TYPE_SFI_CMD_2_OUT,
- DPU_IRQ_TYPE_PROG_LINE,
- DPU_IRQ_TYPE_AD4_BL_DONE,
- DPU_IRQ_TYPE_CTL_START,
- DPU_IRQ_TYPE_RESERVED,
+/* When making changes be sure to sync with dpu_intr_set */
+enum dpu_hw_intr_reg {
+ MDP_SSPP_TOP0_INTR,
+ MDP_SSPP_TOP0_INTR2,
+ MDP_SSPP_TOP0_HIST_INTR,
+ MDP_INTF0_INTR,
+ MDP_INTF1_INTR,
+ MDP_INTF2_INTR,
+ MDP_INTF3_INTR,
+ MDP_INTF4_INTR,
+ MDP_AD4_0_INTR,
+ MDP_AD4_1_INTR,
+ MDP_INTF0_7xxx_INTR,
+ MDP_INTF1_7xxx_INTR,
+ MDP_INTF5_7xxx_INTR,
+ MDP_INTR_MAX,
};
+#define DPU_IRQ_IDX(reg_idx, offset) (reg_idx * 32 + offset)
+
struct dpu_hw_intr;
/**
* Interrupt operations.
*/
struct dpu_hw_intr_ops {
- /**
- * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
- * Used for all irq related ops
- * @intr: HW interrupt handle
- * @intr_type: Interrupt type defined in dpu_intr_type
- * @instance_idx: HW interrupt block instance
- * @return: irq_idx or -EINVAL for lookup fail
- */
- int (*irq_idx_lookup)(struct dpu_hw_intr *intr,
- enum dpu_intr_type intr_type,
- u32 instance_idx);
/**
* enable_irq - Enable IRQ based on lookup IRQ index
@@ -98,7 +45,7 @@ struct dpu_hw_intr_ops {
* @irq_idx: Lookup irq index return from irq_idx_lookup
* @return: 0 for success, otherwise failure
*/
- int (*enable_irq)(
+ int (*enable_irq_locked)(
struct dpu_hw_intr *intr,
int irq_idx);
@@ -108,7 +55,7 @@ struct dpu_hw_intr_ops {
* @irq_idx: Lookup irq index return from irq_idx_lookup
* @return: 0 for success, otherwise failure
*/
- int (*disable_irq)(
+ int (*disable_irq_locked)(
struct dpu_hw_intr *intr,
int irq_idx);
@@ -143,23 +90,6 @@ struct dpu_hw_intr_ops {
void *arg);
/**
- * get_interrupt_statuses - Gets and store value from all interrupt
- * status registers that are currently fired.
- * @intr: HW interrupt handle
- */
- void (*get_interrupt_statuses)(
- struct dpu_hw_intr *intr);
-
- /**
- * clear_intr_status_nolock() - clears the HW interrupts without lock
- * @intr: HW interrupt handle
- * @irq_idx: Lookup irq index return from irq_idx_lookup
- */
- void (*clear_intr_status_nolock)(
- struct dpu_hw_intr *intr,
- int irq_idx);
-
- /**
* get_interrupt_status - Gets HW interrupt status, and clear if set,
* based on given lookup IRQ index.
* @intr: HW interrupt handle
@@ -170,6 +100,22 @@ struct dpu_hw_intr_ops {
struct dpu_hw_intr *intr,
int irq_idx,
bool clear);
+
+ /**
+ * lock - take the IRQ lock
+ * @intr: HW interrupt handle
+ * @return: irq_flags for the taken spinlock
+ */
+ unsigned long (*lock)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * unlock - take the IRQ lock
+ * @intr: HW interrupt handle
+ * @irq_flags: the irq_flags returned from lock
+ */
+ void (*unlock)(
+ struct dpu_hw_intr *intr, unsigned long irq_flags);
};
/**
@@ -178,19 +124,17 @@ struct dpu_hw_intr_ops {
* @ops: function pointer mapping for IRQ handling
* @cache_irq_mask: array of IRQ enable masks reg storage created during init
* @save_irq_status: array of IRQ status reg storage created during init
- * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @total_irqs: total number of irq_idx mapped in the hw_interrupts
* @irq_lock: spinlock for accessing IRQ resources
- * @obsolete_irq: irq types that are obsolete for a particular target
*/
struct dpu_hw_intr {
struct dpu_hw_blk_reg_map hw;
struct dpu_hw_intr_ops ops;
u32 *cache_irq_mask;
u32 *save_irq_status;
- u32 irq_idx_tbl_size;
+ u32 total_irqs;
spinlock_t irq_lock;
unsigned long irq_mask;
- unsigned long obsolete_irq;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 1599e3f49a4f..116e2b5b1a90 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -299,8 +299,6 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
}
-static struct dpu_hw_blk_ops dpu_hw_ops;
-
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
const struct dpu_mdss_cfg *m)
@@ -327,15 +325,11 @@ struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
c->mdss = m;
_setup_intf_ops(&c->ops, c->cap->features);
- dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
-
return c;
}
void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
{
- if (intf)
- dpu_hw_blk_destroy(&intf->base);
kfree(intf);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 6ac0b5a0e057..cb6bb7a22c15 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -160,8 +160,6 @@ static void _setup_mixer_ops(const struct dpu_mdss_cfg *m,
ops->setup_border_color = dpu_hw_lm_setup_border_color;
}
-static struct dpu_hw_blk_ops dpu_hw_ops;
-
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
const struct dpu_mdss_cfg *m)
@@ -184,14 +182,10 @@ struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
c->cap = cfg;
_setup_mixer_ops(m, &c->ops, c->cap->features);
- dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
-
return c;
}
void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
{
- if (lm)
- dpu_hw_blk_destroy(&lm->base);
kfree(lm);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 09a3fb3e89f5..bb9ceadeb0bb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -343,7 +343,7 @@ enum dpu_3d_blend_mode {
/** struct dpu_format - defines the format configuration which
* allows DPU HW to correctly fetch and decode the format
- * @base: base msm_format struture containing fourcc code
+ * @base: base msm_format structure containing fourcc code
* @fetch_planes: how the color components are packed in pixel format
* @element: element color ordering
* @bits: element bit widths
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
index 720813e5a8ae..c06d595d5df0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
@@ -58,8 +58,6 @@ static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c,
c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode;
};
-static struct dpu_hw_blk_ops dpu_hw_ops;
-
struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(enum dpu_merge_3d idx,
void __iomem *addr,
const struct dpu_mdss_cfg *m)
@@ -81,14 +79,10 @@ struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(enum dpu_merge_3d idx,
c->caps = cfg;
_setup_merge_3d_ops(c, c->caps->features);
- dpu_hw_blk_init(&c->base, DPU_HW_BLK_MERGE_3D, idx, &dpu_hw_ops);
-
return c;
}
void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *hw)
{
- if (hw)
- dpu_hw_blk_destroy(&hw->base);
kfree(hw);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 245a7a62b5c6..55766c97c4c8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -261,8 +261,6 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
c->ops.setup_dither = dpu_hw_pp_setup_dither;
};
-static struct dpu_hw_blk_ops dpu_hw_ops;
-
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
const struct dpu_mdss_cfg *m)
@@ -284,14 +282,10 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
c->caps = cfg;
_setup_pingpong_ops(c, c->caps->features);
- dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
-
return c;
}
void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
{
- if (pp)
- dpu_hw_blk_destroy(&pp->base);
kfree(pp);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 845b9ce80e31..89d08a715c16 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -126,6 +126,8 @@ struct dpu_hw_pingpong_ops {
struct dpu_hw_dither_cfg *cfg);
};
+struct dpu_hw_merge_3d;
+
struct dpu_hw_pingpong {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
@@ -133,7 +135,7 @@ struct dpu_hw_pingpong {
/* pingpong */
enum dpu_pingpong idx;
const struct dpu_pingpong_cfg *caps;
- struct dpu_hw_blk *merge_3d;
+ struct dpu_hw_merge_3d *merge_3d;
/* ops */
struct dpu_hw_pingpong_ops ops;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 34d81aa16041..69eed7932486 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -706,8 +706,6 @@ static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
return ERR_PTR(-ENOMEM);
}
-static struct dpu_hw_blk_ops dpu_hw_ops;
-
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
void __iomem *addr, struct dpu_mdss_cfg *catalog,
bool is_virtual_pipe)
@@ -735,15 +733,11 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
hw_pipe->cap = cfg;
_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
- dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
-
return hw_pipe;
}
void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
{
- if (ctx)
- dpu_hw_blk_destroy(&ctx->base);
kfree(ctx);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index 01b76766a9a8..282e3c6c6d48 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -295,8 +295,6 @@ static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
return ERR_PTR(-EINVAL);
}
-static struct dpu_hw_blk_ops dpu_hw_ops;
-
struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
void __iomem *addr,
const struct dpu_mdss_cfg *m)
@@ -324,15 +322,11 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
mdp->caps = cfg;
_setup_mdp_ops(&mdp->ops, mdp->caps->features);
- dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
-
return mdp;
}
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
{
- if (mdp)
- dpu_hw_blk_destroy(&mdp->base);
kfree(mdp);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 93bc3575bf53..4fd913522931 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -19,6 +19,7 @@
#include "msm_drv.h"
#include "msm_mmu.h"
#include "msm_gem.h"
+#include "disp/msm_disp_snapshot.h"
#include "dpu_kms.h"
#include "dpu_core_irq.h"
@@ -798,6 +799,51 @@ static void dpu_irq_uninstall(struct msm_kms *kms)
dpu_core_irq_uninstall(dpu_kms);
}
+static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms)
+{
+ int i;
+ struct dpu_kms *dpu_kms;
+ struct dpu_mdss_cfg *cat;
+ struct dpu_hw_mdp *top;
+
+ dpu_kms = to_dpu_kms(kms);
+
+ cat = dpu_kms->catalog;
+ top = dpu_kms->hw_mdp;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* dump CTL sub-blocks HW regs info */
+ for (i = 0; i < cat->ctl_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
+ dpu_kms->mmio + cat->ctl[i].base, "ctl_%d", i);
+
+ /* dump DSPP sub-blocks HW regs info */
+ for (i = 0; i < cat->dspp_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len,
+ dpu_kms->mmio + cat->dspp[i].base, "dspp_%d", i);
+
+ /* dump INTF sub-blocks HW regs info */
+ for (i = 0; i < cat->intf_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
+ dpu_kms->mmio + cat->intf[i].base, "intf_%d", i);
+
+ /* dump PP sub-blocks HW regs info */
+ for (i = 0; i < cat->pingpong_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len,
+ dpu_kms->mmio + cat->pingpong[i].base, "pingpong_%d", i);
+
+ /* dump SSPP sub-blocks HW regs info */
+ for (i = 0; i < cat->sspp_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len,
+ dpu_kms->mmio + cat->sspp[i].base, "sspp_%d", i);
+
+ msm_disp_snapshot_add_block(disp_state, top->hw.length,
+ dpu_kms->mmio + top->hw.blk_off, "top");
+
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
+
static const struct msm_kms_funcs kms_funcs = {
.hw_init = dpu_kms_hw_init,
.irq_preinstall = dpu_irq_preinstall,
@@ -818,6 +864,7 @@ static const struct msm_kms_funcs kms_funcs = {
.round_pixclk = dpu_kms_round_pixclk,
.destroy = dpu_kms_destroy,
.set_encoder_mode = _dpu_kms_set_encoder_mode,
+ .snapshot = dpu_kms_mdp_snapshot,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = dpu_kms_debugfs_init,
#endif
@@ -1089,21 +1136,21 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
if (!dpu_kms)
return -ENOMEM;
- dpu_kms->opp_table = dev_pm_opp_set_clkname(dev, "core");
- if (IS_ERR(dpu_kms->opp_table))
- return PTR_ERR(dpu_kms->opp_table);
+ ret = devm_pm_opp_set_clkname(dev, "core");
+ if (ret)
+ return ret;
/* OPP table is optional */
- ret = dev_pm_opp_of_add_table(dev);
+ ret = devm_pm_opp_of_add_table(dev);
if (ret && ret != -ENODEV) {
dev_err(dev, "invalid OPP table in device tree\n");
- goto put_clkname;
+ return ret;
}
mp = &dpu_kms->mp;
ret = msm_dss_parse_clock(pdev, mp);
if (ret) {
DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
- goto err;
+ return ret;
}
platform_set_drvdata(pdev, dpu_kms);
@@ -1111,7 +1158,7 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
if (ret) {
DPU_ERROR("failed to init kms, ret=%d\n", ret);
- goto err;
+ return ret;
}
dpu_kms->dev = ddev;
dpu_kms->pdev = pdev;
@@ -1120,11 +1167,7 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
dpu_kms->rpm_enabled = true;
priv->kms = &dpu_kms->base;
- return ret;
-err:
- dev_pm_opp_of_remove_table(dev);
-put_clkname:
- dev_pm_opp_put_clkname(dpu_kms->opp_table);
+
return ret;
}
@@ -1140,9 +1183,6 @@ static void dpu_unbind(struct device *dev, struct device *master, void *data)
if (dpu_kms->rpm_enabled)
pm_runtime_disable(&pdev->dev);
-
- dev_pm_opp_of_remove_table(dev);
- dev_pm_opp_put_clkname(dpu_kms->opp_table);
}
static const struct component_ops dpu_ops = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index d6717d6672f7..323a6bce9e64 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -82,16 +82,12 @@ struct dpu_irq_callback {
* struct dpu_irq: IRQ structure contains callback registration info
* @total_irq: total number of irq_idx obtained from HW interrupts mapping
* @irq_cb_tbl: array of IRQ callbacks setting
- * @enable_counts array of IRQ enable counts
- * @cb_lock: callback lock
* @debugfs_file: debugfs file for irq statistics
*/
struct dpu_irq {
u32 total_irqs;
struct list_head *irq_cb_tbl;
- atomic_t *enable_counts;
atomic_t *irq_counts;
- spinlock_t cb_lock;
};
struct dpu_kms {
@@ -130,8 +126,6 @@ struct dpu_kms {
struct platform_device *pdev;
bool rpm_enabled;
- struct opp_table *opp_table;
-
struct dss_module_power mp;
/* reference count bandwidth requests, so we know when we can
@@ -258,7 +252,7 @@ void dpu_kms_encoder_enable(struct drm_encoder *encoder);
/**
* dpu_kms_get_clk_rate() - get the clock rate
- * @dpu_kms: poiner to dpu_kms structure
+ * @dpu_kms: pointer to dpu_kms structure
* @clock_name: clock name to get the rate
*
* Return: current clock rate
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 06b56fec04e0..6b0a7bc87eb7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -225,7 +225,7 @@ int dpu_mdss_init(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct dpu_mdss *dpu_mdss;
struct dss_module_power *mp;
- int ret = 0;
+ int ret;
int irq;
dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
@@ -253,8 +253,10 @@ int dpu_mdss_init(struct drm_device *dev)
goto irq_domain_error;
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
+ if (irq < 0) {
+ ret = irq;
goto irq_error;
+ }
irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
dpu_mdss);
@@ -263,7 +265,7 @@ int dpu_mdss_init(struct drm_device *dev)
pm_runtime_enable(dev->dev);
- return ret;
+ return 0;
irq_error:
_dpu_mdss_irq_domain_fini(dpu_mdss);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 7a993547eb75..ec4a6f04394a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -25,7 +25,7 @@
#include "dpu_vbif.h"
#include "dpu_plane.h"
-#define DPU_DEBUG_PLANE(pl, fmt, ...) DPU_DEBUG("plane%d " fmt,\
+#define DPU_DEBUG_PLANE(pl, fmt, ...) DRM_DEBUG_ATOMIC("plane%d " fmt,\
(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
#define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\
@@ -284,8 +284,8 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
}
}
- DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u fl:%u\n",
- plane->base.id, pdpu->pipe - SSPP_VIG0,
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s w:%u fl:%u\n",
+ pdpu->pipe - SSPP_VIG0,
(char *)&fmt->base.pixel_format,
src_width, total_fl);
@@ -354,8 +354,7 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
(fmt) ? fmt->base.pixel_format : 0,
pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage);
- DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
- plane->base.id,
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
pdpu->pipe - SSPP_VIG0,
fmt ? (char *)&fmt->base.pixel_format : NULL,
pdpu->is_rt_pipe, total_fl, qos_lut);
@@ -364,7 +363,7 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
}
/**
- * _dpu_plane_set_panic_lut - set danger/safe LUT of the given plane
+ * _dpu_plane_set_danger_lut - set danger/safe LUT of the given plane
* @plane: Pointer to drm plane
* @fb: Pointer to framebuffer associated with the given plane
*/
@@ -407,8 +406,7 @@ static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
pdpu->pipe_qos_cfg.danger_lut,
pdpu->pipe_qos_cfg.safe_lut);
- DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
- plane->base.id,
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
pdpu->pipe - SSPP_VIG0,
fmt ? (char *)&fmt->base.pixel_format : NULL,
fmt ? fmt->fetch_mode : -1,
@@ -451,8 +449,7 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
pdpu->pipe_qos_cfg.danger_safe_en = false;
}
- DPU_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
- plane->base.id,
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
pdpu->pipe - SSPP_VIG0,
pdpu->pipe_qos_cfg.danger_safe_en,
pdpu->pipe_qos_cfg.vblank_en,
@@ -491,7 +488,7 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
}
/**
- * _dpu_plane_set_vbif_qos - set vbif QoS for the given plane
+ * _dpu_plane_set_qos_remap - set vbif QoS for the given plane
* @plane: Pointer to drm plane
*/
static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
@@ -507,8 +504,8 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
qos_params.num = pdpu->pipe_hw->idx - SSPP_VIG0;
qos_params.is_rt = pdpu->is_rt_pipe;
- DPU_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
- plane->base.id, qos_params.num,
+ DPU_DEBUG_PLANE(pdpu, "pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
+ qos_params.num,
qos_params.vbif_idx,
qos_params.xin_id, qos_params.is_rt,
qos_params.clk_ctrl);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index fd2d104f0a91..f9c83d6e427a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -162,7 +162,7 @@ int dpu_rm_init(struct dpu_rm *rm,
goto fail;
}
if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX)
- hw->merge_3d = rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0];
+ hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]);
rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
}
@@ -428,7 +428,7 @@ static int _dpu_rm_reserve_ctls(
features = ctl->caps->features;
has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
- DPU_DEBUG("ctl %d caps 0x%lX\n", rm->ctl_blks[j]->id, features);
+ DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features);
if (needs_split_display != has_split_display)
continue;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index 6714b088970f..37bba57675a8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -168,44 +168,41 @@ TRACE_EVENT(dpu_perf_crtc_update,
);
DECLARE_EVENT_CLASS(dpu_enc_irq_template,
- TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
int irq_idx),
- TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx),
+ TP_ARGS(drm_id, intr_idx, irq_idx),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( enum dpu_intr_idx, intr_idx )
- __field( int, hw_idx )
__field( int, irq_idx )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->intr_idx = intr_idx;
- __entry->hw_idx = hw_idx;
__entry->irq_idx = irq_idx;
),
- TP_printk("id=%u, intr=%d, hw=%d, irq=%d",
- __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+ TP_printk("id=%u, intr=%d, irq=%d",
+ __entry->drm_id, __entry->intr_idx,
__entry->irq_idx)
);
DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_register_success,
- TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
int irq_idx),
- TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+ TP_ARGS(drm_id, intr_idx, irq_idx)
);
DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_unregister_success,
- TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
int irq_idx),
- TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+ TP_ARGS(drm_id, intr_idx, irq_idx)
);
TRACE_EVENT(dpu_enc_irq_wait_success,
- TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
- TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx, pp_idx, atomic_cnt),
+ TP_ARGS(drm_id, intr_idx, irq_idx, pp_idx, atomic_cnt),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( enum dpu_intr_idx, intr_idx )
- __field( int, hw_idx )
__field( int, irq_idx )
__field( enum dpu_pingpong, pp_idx )
__field( int, atomic_cnt )
@@ -213,13 +210,12 @@ TRACE_EVENT(dpu_enc_irq_wait_success,
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->intr_idx = intr_idx;
- __entry->hw_idx = hw_idx;
__entry->irq_idx = irq_idx;
__entry->pp_idx = pp_idx;
__entry->atomic_cnt = atomic_cnt;
),
- TP_printk("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
- __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+ TP_printk("id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ __entry->drm_id, __entry->intr_idx,
__entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
);
@@ -514,12 +510,12 @@ DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending,
);
TRACE_EVENT(dpu_enc_wait_event_timeout,
- TP_PROTO(uint32_t drm_id, int32_t hw_id, int rc, s64 time,
+ TP_PROTO(uint32_t drm_id, int irq_idx, int rc, s64 time,
s64 expected_time, int atomic_cnt),
- TP_ARGS(drm_id, hw_id, rc, time, expected_time, atomic_cnt),
+ TP_ARGS(drm_id, irq_idx, rc, time, expected_time, atomic_cnt),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
- __field( int32_t, hw_id )
+ __field( int, irq_idx )
__field( int, rc )
__field( s64, time )
__field( s64, expected_time )
@@ -527,14 +523,14 @@ TRACE_EVENT(dpu_enc_wait_event_timeout,
),
TP_fast_assign(
__entry->drm_id = drm_id;
- __entry->hw_id = hw_id;
+ __entry->irq_idx = irq_idx;
__entry->rc = rc;
__entry->time = time;
__entry->expected_time = expected_time;
__entry->atomic_cnt = atomic_cnt;
),
- TP_printk("id=%u, hw_id=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
- __entry->drm_id, __entry->hw_id, __entry->rc, __entry->time,
+ TP_printk("id=%u, irq_idx=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
+ __entry->drm_id, __entry->irq_idx, __entry->rc, __entry->time,
__entry->expected_time, __entry->atomic_cnt)
);
@@ -879,29 +875,6 @@ TRACE_EVENT(dpu_pp_connect_ext_te,
TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
);
-DECLARE_EVENT_CLASS(dpu_core_irq_idx_cnt_template,
- TP_PROTO(int irq_idx, int enable_count),
- TP_ARGS(irq_idx, enable_count),
- TP_STRUCT__entry(
- __field( int, irq_idx )
- __field( int, enable_count )
- ),
- TP_fast_assign(
- __entry->irq_idx = irq_idx;
- __entry->enable_count = enable_count;
- ),
- TP_printk("irq_idx:%d enable_count:%u", __entry->irq_idx,
- __entry->enable_count)
-);
-DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_enable_idx,
- TP_PROTO(int irq_idx, int enable_count),
- TP_ARGS(irq_idx, enable_count)
-);
-DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_idx,
- TP_PROTO(int irq_idx, int enable_count),
- TP_ARGS(irq_idx, enable_count)
-);
-
DECLARE_EVENT_CLASS(dpu_core_irq_callback_template,
TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
TP_ARGS(irq_idx, callback),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 7e08f40e7e6f..21d20373eb8b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -46,7 +46,7 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
vbif->idx - VBIF_0, xin_id);
} else {
rc = 0;
- DPU_DEBUG("VBIF %d client %d is halted\n",
+ DRM_DEBUG_ATOMIC("VBIF %d client %d is halted\n",
vbif->idx - VBIF_0, xin_id);
}
@@ -87,7 +87,7 @@ static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
}
}
- DPU_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+ DRM_DEBUG_ATOMIC("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
vbif->idx - VBIF_0, params->xin_id,
params->width, params->height, params->frame_rate,
pps, *ot_lim);
@@ -133,7 +133,7 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
}
exit:
- DPU_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
+ DRM_DEBUG_ATOMIC("vbif:%d xin:%d ot_lim:%d\n",
vbif->idx - VBIF_0, params->xin_id, ot_lim);
return ot_lim;
}
@@ -163,7 +163,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
}
if (!vbif || !mdp) {
- DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
+ DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n",
vbif != NULL, mdp != NULL);
return;
}
@@ -230,7 +230,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
}
if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
- DPU_DEBUG("qos remap not supported\n");
+ DRM_DEBUG_ATOMIC("qos remap not supported\n");
return;
}
@@ -238,14 +238,14 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
&vbif->cap->qos_nrt_tbl;
if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
- DPU_DEBUG("qos tbl not defined\n");
+ DRM_DEBUG_ATOMIC("qos tbl not defined\n");
return;
}
forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
for (i = 0; i < qos_tbl->npriority_lvl; i++) {
- DPU_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
+ DRM_DEBUG_ATOMIC("vbif:%d xin:%d lvl:%d/%d\n",
params->vbif_idx, params->xin_id, i,
qos_tbl->priority_lvl[i]);
vbif->ops.set_qos_remap(vbif, params->xin_id, i,
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
index f1d1de578e4c..c82d1bd09359 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
@@ -8,19 +8,27 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
index 4cf0953009f7..0e21cf3f9e2e 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
@@ -8,19 +8,27 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -80,6 +88,10 @@ enum mdp5_pipe {
SSPP_CURSOR1 = 12,
};
+enum mdp5_format {
+ DUMMY = 0,
+};
+
enum mdp5_ctl_mode {
MODE_NONE = 0,
MODE_WB_0_BLOCK = 1,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 94ce62a26daf..9741544ffc35 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -95,6 +95,11 @@ static const struct mdp5_cfg_hw msm8x74v1_config = {
[3] = INTF_HDMI,
},
},
+ .perf = {
+ .ab_inefficiency = 200,
+ .ib_inefficiency = 120,
+ .clk_inefficiency = 125
+ },
.max_clk = 200000000,
};
@@ -177,6 +182,11 @@ static const struct mdp5_cfg_hw msm8x74v2_config = {
[3] = INTF_HDMI,
},
},
+ .perf = {
+ .ab_inefficiency = 200,
+ .ib_inefficiency = 120,
+ .clk_inefficiency = 125
+ },
.max_clk = 320000000,
};
@@ -272,6 +282,11 @@ static const struct mdp5_cfg_hw apq8084_config = {
[3] = INTF_HDMI,
},
},
+ .perf = {
+ .ab_inefficiency = 200,
+ .ib_inefficiency = 120,
+ .clk_inefficiency = 105
+ },
.max_clk = 320000000,
};
@@ -339,6 +354,11 @@ static const struct mdp5_cfg_hw msm8x16_config = {
[1] = INTF_DSI,
},
},
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 200,
+ .clk_inefficiency = 105
+ },
.max_clk = 320000000,
};
@@ -414,6 +434,11 @@ static const struct mdp5_cfg_hw msm8x36_config = {
[2] = INTF_DSI,
},
},
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 200,
+ .clk_inefficiency = 105
+ },
.max_clk = 366670000,
};
@@ -509,6 +534,11 @@ static const struct mdp5_cfg_hw msm8x94_config = {
[3] = INTF_HDMI,
},
},
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 100,
+ .clk_inefficiency = 105
+ },
.max_clk = 400000000,
};
@@ -617,6 +647,11 @@ static const struct mdp5_cfg_hw msm8x96_config = {
[3] = INTF_HDMI,
},
},
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 200,
+ .clk_inefficiency = 105
+ },
.max_clk = 412500000,
};
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
index 1c50d01f15f5..6b03d7899309 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
@@ -76,6 +76,12 @@ struct mdp5_intf_block {
u32 connect[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
};
+struct mdp5_perf_block {
+ u32 ab_inefficiency;
+ u32 ib_inefficiency;
+ u32 clk_inefficiency;
+};
+
struct mdp5_cfg_hw {
char *name;
@@ -93,6 +99,7 @@ struct mdp5_cfg_hw {
struct mdp5_sub_block dsc;
struct mdp5_sub_block cdm;
struct mdp5_intf_block intf;
+ struct mdp5_perf_block perf;
uint32_t max_clk;
};
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index f5d71b274079..f482e0911d03 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -291,8 +291,8 @@ static void blend_setup(struct drm_crtc *crtc)
plane = pstates[i]->base.plane;
blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
- fg_alpha = pstates[i]->alpha;
- bg_alpha = 0xFF - pstates[i]->alpha;
+ fg_alpha = pstates[i]->base.alpha >> 8;
+ bg_alpha = 0xFF - fg_alpha;
if (!format->alpha_enable && bg_alpha_enabled)
mixer_op_mode = 0;
@@ -301,7 +301,8 @@ static void blend_setup(struct drm_crtc *crtc)
DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
- if (format->alpha_enable && pstates[i]->premultiplied) {
+ if (format->alpha_enable &&
+ pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
if (fg_alpha != 0xff) {
@@ -312,7 +313,8 @@ static void blend_setup(struct drm_crtc *crtc)
} else {
blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
}
- } else if (format->alpha_enable) {
+ } else if (format->alpha_enable &&
+ pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
if (fg_alpha != 0xff) {
@@ -648,7 +650,7 @@ static int pstate_cmp(const void *a, const void *b)
{
struct plane_state *pa = (struct plane_state *)a;
struct plane_state *pb = (struct plane_state *)b;
- return pa->state->zpos - pb->state->zpos;
+ return pa->state->base.normalized_zpos - pb->state->base.normalized_zpos;
}
/* is there a helper for this? */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
index 128866742593..ac269a6802df 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
@@ -98,11 +98,6 @@ struct mdp5_plane_state {
struct mdp5_hw_pipe *hwpipe;
struct mdp5_hw_pipe *r_hwpipe; /* right hwpipe */
- /* aligned with property */
- uint8_t premultiplied;
- uint8_t zpos;
- uint8_t alpha;
-
/* assigned by crtc blender */
enum mdp_mixer_stage_id stage;
};
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 8c9f2f492178..c6b69afcbac8 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -44,8 +44,9 @@ static void mdp5_plane_destroy(struct drm_plane *plane)
kfree(mdp5_plane);
}
-static void mdp5_plane_install_rotation_property(struct drm_device *dev,
- struct drm_plane *plane)
+/* helper to install properties which are common to planes and crtcs */
+static void mdp5_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj)
{
drm_plane_create_rotation_property(plane,
DRM_MODE_ROTATE_0,
@@ -53,104 +54,12 @@ static void mdp5_plane_install_rotation_property(struct drm_device *dev,
DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
-}
-
-/* helper to install properties which are common to planes and crtcs */
-static void mdp5_plane_install_properties(struct drm_plane *plane,
- struct drm_mode_object *obj)
-{
- struct drm_device *dev = plane->dev;
- struct msm_drm_private *dev_priv = dev->dev_private;
- struct drm_property *prop;
-
-#define INSTALL_PROPERTY(name, NAME, init_val, fnc, ...) do { \
- prop = dev_priv->plane_property[PLANE_PROP_##NAME]; \
- if (!prop) { \
- prop = drm_property_##fnc(dev, 0, #name, \
- ##__VA_ARGS__); \
- if (!prop) { \
- dev_warn(dev->dev, \
- "Create property %s failed\n", \
- #name); \
- return; \
- } \
- dev_priv->plane_property[PLANE_PROP_##NAME] = prop; \
- } \
- drm_object_attach_property(&plane->base, prop, init_val); \
- } while (0)
-
-#define INSTALL_RANGE_PROPERTY(name, NAME, min, max, init_val) \
- INSTALL_PROPERTY(name, NAME, init_val, \
- create_range, min, max)
-
-#define INSTALL_ENUM_PROPERTY(name, NAME, init_val) \
- INSTALL_PROPERTY(name, NAME, init_val, \
- create_enum, name##_prop_enum_list, \
- ARRAY_SIZE(name##_prop_enum_list))
-
- INSTALL_RANGE_PROPERTY(zpos, ZPOS, 1, 255, 1);
-
- mdp5_plane_install_rotation_property(dev, plane);
-
-#undef INSTALL_RANGE_PROPERTY
-#undef INSTALL_ENUM_PROPERTY
-#undef INSTALL_PROPERTY
-}
-
-static int mdp5_plane_atomic_set_property(struct drm_plane *plane,
- struct drm_plane_state *state, struct drm_property *property,
- uint64_t val)
-{
- struct drm_device *dev = plane->dev;
- struct mdp5_plane_state *pstate;
- struct msm_drm_private *dev_priv = dev->dev_private;
- int ret = 0;
-
- pstate = to_mdp5_plane_state(state);
-
-#define SET_PROPERTY(name, NAME, type) do { \
- if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
- pstate->name = (type)val; \
- DBG("Set property %s %d", #name, (type)val); \
- goto done; \
- } \
- } while (0)
-
- SET_PROPERTY(zpos, ZPOS, uint8_t);
-
- DRM_DEV_ERROR(dev->dev, "Invalid property\n");
- ret = -EINVAL;
-done:
- return ret;
-#undef SET_PROPERTY
-}
-
-static int mdp5_plane_atomic_get_property(struct drm_plane *plane,
- const struct drm_plane_state *state,
- struct drm_property *property, uint64_t *val)
-{
- struct drm_device *dev = plane->dev;
- struct mdp5_plane_state *pstate;
- struct msm_drm_private *dev_priv = dev->dev_private;
- int ret = 0;
-
- pstate = to_mdp5_plane_state(state);
-
-#define GET_PROPERTY(name, NAME, type) do { \
- if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
- *val = pstate->name; \
- DBG("Get property %s %lld", #name, *val); \
- goto done; \
- } \
- } while (0)
-
- GET_PROPERTY(zpos, ZPOS, uint8_t);
-
- DRM_DEV_ERROR(dev->dev, "Invalid property\n");
- ret = -EINVAL;
-done:
- return ret;
-#undef SET_PROPERTY
+ drm_plane_create_alpha_property(plane);
+ drm_plane_create_blend_mode_property(plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+ drm_plane_create_zpos_property(plane, 1, 1, 255);
}
static void
@@ -166,9 +75,10 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
drm_printf(p, "\tright-hwpipe=%s\n",
pstate->r_hwpipe ? pstate->r_hwpipe->name :
"(null)");
- drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied);
- drm_printf(p, "\tzpos=%u\n", pstate->zpos);
- drm_printf(p, "\talpha=%u\n", pstate->alpha);
+ drm_printf(p, "\tblend_mode=%u\n", pstate->base.pixel_blend_mode);
+ drm_printf(p, "\tzpos=%u\n", pstate->base.zpos);
+ drm_printf(p, "\tnormalized_zpos=%u\n", pstate->base.normalized_zpos);
+ drm_printf(p, "\talpha=%u\n", pstate->base.alpha);
drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
}
@@ -176,24 +86,19 @@ static void mdp5_plane_reset(struct drm_plane *plane)
{
struct mdp5_plane_state *mdp5_state;
- if (plane->state && plane->state->fb)
- drm_framebuffer_put(plane->state->fb);
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
kfree(to_mdp5_plane_state(plane->state));
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
- /* assign default blend parameters */
- mdp5_state->alpha = 255;
- mdp5_state->premultiplied = 0;
-
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
- mdp5_state->zpos = STAGE_BASE;
+ mdp5_state->base.zpos = STAGE_BASE;
else
- mdp5_state->zpos = STAGE0 + drm_plane_index(plane);
-
- mdp5_state->base.plane = plane;
+ mdp5_state->base.zpos = STAGE0 + drm_plane_index(plane);
+ mdp5_state->base.normalized_zpos = mdp5_state->base.zpos;
- plane->state = &mdp5_state->base;
+ __drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
}
static struct drm_plane_state *
@@ -229,8 +134,6 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp5_plane_destroy,
- .atomic_set_property = mdp5_plane_atomic_set_property,
- .atomic_get_property = mdp5_plane_atomic_get_property,
.reset = mdp5_plane_reset,
.atomic_duplicate_state = mdp5_plane_duplicate_state,
.atomic_destroy_state = mdp5_plane_destroy_state,
diff --git a/drivers/gpu/drm/msm/disp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
index 4f51bea8fd3f..bb0066a63732 100644
--- a/drivers/gpu/drm/msm/disp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
@@ -8,19 +8,27 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
new file mode 100644
index 000000000000..a4a7cb06bc87
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_disp_snapshot.h"
+
+static ssize_t __maybe_unused disp_devcoredump_read(char *buffer, loff_t offset,
+ size_t count, void *data, size_t datalen)
+{
+ struct drm_print_iterator iter;
+ struct drm_printer p;
+ struct msm_disp_state *disp_state;
+
+ disp_state = data;
+
+ iter.data = buffer;
+ iter.offset = 0;
+ iter.start = offset;
+ iter.remain = count;
+
+ p = drm_coredump_printer(&iter);
+
+ msm_disp_state_print(disp_state, &p);
+
+ return count - iter.remain;
+}
+
+static void _msm_disp_snapshot_work(struct kthread_work *work)
+{
+ struct msm_kms *kms = container_of(work, struct msm_kms, dump_work);
+ struct drm_device *drm_dev = kms->dev;
+ struct msm_disp_state *disp_state;
+ struct drm_printer p;
+
+ disp_state = kzalloc(sizeof(struct msm_disp_state), GFP_KERNEL);
+ if (!disp_state)
+ return;
+
+ disp_state->dev = drm_dev->dev;
+ disp_state->drm_dev = drm_dev;
+
+ INIT_LIST_HEAD(&disp_state->blocks);
+
+ /* Serialize dumping here */
+ mutex_lock(&kms->dump_mutex);
+
+ msm_disp_snapshot_capture_state(disp_state);
+
+ mutex_unlock(&kms->dump_mutex);
+
+ if (MSM_DISP_SNAPSHOT_DUMP_IN_CONSOLE) {
+ p = drm_info_printer(disp_state->drm_dev->dev);
+ msm_disp_state_print(disp_state, &p);
+ }
+
+ /*
+ * If COREDUMP is disabled, the stub will call the free function.
+ * If there is a codedump pending for the device, the dev_coredumpm()
+ * will also free new coredump state.
+ */
+ dev_coredumpm(disp_state->dev, THIS_MODULE, disp_state, 0, GFP_KERNEL,
+ disp_devcoredump_read, msm_disp_state_free);
+}
+
+void msm_disp_snapshot_state(struct drm_device *drm_dev)
+{
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+
+ if (!drm_dev) {
+ DRM_ERROR("invalid params\n");
+ return;
+ }
+
+ priv = drm_dev->dev_private;
+ kms = priv->kms;
+
+ kthread_queue_work(kms->dump_worker, &kms->dump_work);
+}
+
+int msm_disp_snapshot_init(struct drm_device *drm_dev)
+{
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+
+ if (!drm_dev) {
+ DRM_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ priv = drm_dev->dev_private;
+ kms = priv->kms;
+
+ mutex_init(&kms->dump_mutex);
+
+ kms->dump_worker = kthread_create_worker(0, "%s", "disp_snapshot");
+ if (IS_ERR(kms->dump_worker))
+ DRM_ERROR("failed to create disp state task\n");
+
+ kthread_init_work(&kms->dump_work, _msm_disp_snapshot_work);
+
+ return 0;
+}
+
+void msm_disp_snapshot_destroy(struct drm_device *drm_dev)
+{
+ struct msm_kms *kms;
+ struct msm_drm_private *priv;
+
+ if (!drm_dev) {
+ DRM_ERROR("invalid params\n");
+ return;
+ }
+
+ priv = drm_dev->dev_private;
+ kms = priv->kms;
+
+ if (kms->dump_worker)
+ kthread_destroy_worker(kms->dump_worker);
+
+ mutex_destroy(&kms->dump_mutex);
+}
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
new file mode 100644
index 000000000000..c92a9508c8d3
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef MSM_DISP_SNAPSHOT_H_
+#define MSM_DISP_SNAPSHOT_H_
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
+#include "../../../drm_crtc_internal.h"
+#include <drm/drm_print.h>
+#include <drm/drm_atomic.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/kthread.h>
+#include <linux/devcoredump.h>
+#include <stdarg.h>
+#include "msm_kms.h"
+
+#define MSM_DISP_SNAPSHOT_MAX_BLKS 10
+
+/* debug option to print the registers in logs */
+#define MSM_DISP_SNAPSHOT_DUMP_IN_CONSOLE 0
+
+/* print debug ranges in groups of 4 u32s */
+#define REG_DUMP_ALIGN 16
+
+/**
+ * struct msm_disp_state - structure to store current dpu state
+ * @dev: device pointer
+ * @drm_dev: drm device pointer
+ * @atomic_state: atomic state duplicated at the time of the error
+ * @timestamp: timestamp at which the coredump was captured
+ */
+struct msm_disp_state {
+ struct device *dev;
+ struct drm_device *drm_dev;
+
+ struct list_head blocks;
+
+ struct drm_atomic_state *atomic_state;
+
+ ktime_t timestamp;
+};
+
+/**
+ * struct msm_disp_state_block - structure to store each hardware block state
+ * @name: name of the block
+ * @drm_dev: handle to the linked list head
+ * @size: size of the register space of this hardware block
+ * @state: array holding the register dump of this hardware block
+ * @base_addr: starting address of this hardware block's register space
+ */
+struct msm_disp_state_block {
+ char name[SZ_128];
+ struct list_head node;
+ unsigned int size;
+ u32 *state;
+ void __iomem *base_addr;
+};
+
+/**
+ * msm_disp_snapshot_init - initialize display snapshot
+ * @drm_dev: drm device handle
+ *
+ * Returns: 0 or -ERROR
+ */
+int msm_disp_snapshot_init(struct drm_device *drm_dev);
+
+/**
+ * msm_disp_snapshot_destroy - destroy the display snapshot
+ * @drm_dev: drm device handle
+ *
+ * Returns: none
+ */
+void msm_disp_snapshot_destroy(struct drm_device *drm_dev);
+
+/**
+ * msm_disp_snapshot_state - trigger to dump the display snapshot
+ * @drm_dev: handle to drm device
+
+ * Returns: none
+ */
+void msm_disp_snapshot_state(struct drm_device *drm_dev);
+
+/**
+ * msm_disp_state_print - print out the current dpu state
+ * @disp_state: handle to drm device
+ * @p: handle to drm printer
+ *
+ * Returns: none
+ */
+void msm_disp_state_print(struct msm_disp_state *disp_state, struct drm_printer *p);
+
+/**
+ * msm_disp_snapshot_capture_state - utility to capture atomic state and hw registers
+ * @disp_state: handle to msm_disp_state struct
+
+ * Returns: none
+ */
+void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state);
+
+/**
+ * msm_disp_state_free - free the memory after the coredump has been read
+ * @data: handle to struct msm_disp_state
+
+ * Returns: none
+ */
+void msm_disp_state_free(void *data);
+
+/**
+ * msm_disp_snapshot_add_block - add a hardware block with its register dump
+ * @disp_state: handle to struct msm_disp_state
+ * @name: name of the hardware block
+ * @len: size of the register space of the hardware block
+ * @base_addr: starting address of the register space of the hardware block
+ * @fmt: format in which the block names need to be printed
+ *
+ * Returns: none
+ */
+__printf(4, 5)
+void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
+ void __iomem *base_addr, const char *fmt, ...);
+
+#endif /* MSM_DISP_SNAPSHOT_H_ */
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
new file mode 100644
index 000000000000..cabe15190ec1
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_disp_snapshot.h"
+
+static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *base_addr)
+{
+ u32 len_padded;
+ u32 num_rows;
+ u32 x0, x4, x8, xc;
+ void __iomem *addr;
+ u32 *dump_addr = NULL;
+ void __iomem *end_addr;
+ int i;
+
+ len_padded = aligned_len * REG_DUMP_ALIGN;
+ num_rows = aligned_len / REG_DUMP_ALIGN;
+
+ addr = base_addr;
+ end_addr = base_addr + aligned_len;
+
+ if (!(*reg))
+ *reg = kzalloc(len_padded, GFP_KERNEL);
+
+ if (*reg)
+ dump_addr = *reg;
+
+ for (i = 0; i < num_rows; i++) {
+ x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0;
+ x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0;
+ x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0;
+ xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0;
+
+ if (dump_addr) {
+ dump_addr[i * 4] = x0;
+ dump_addr[i * 4 + 1] = x4;
+ dump_addr[i * 4 + 2] = x8;
+ dump_addr[i * 4 + 3] = xc;
+ }
+
+ addr += REG_DUMP_ALIGN;
+ }
+}
+
+static void msm_disp_state_print_regs(u32 **reg, u32 len, void __iomem *base_addr,
+ struct drm_printer *p)
+{
+ int i;
+ u32 *dump_addr = NULL;
+ void __iomem *addr;
+ u32 num_rows;
+
+ addr = base_addr;
+ num_rows = len / REG_DUMP_ALIGN;
+
+ if (*reg)
+ dump_addr = *reg;
+
+ for (i = 0; i < num_rows; i++) {
+ drm_printf(p, "0x%lx : %08x %08x %08x %08x\n",
+ (unsigned long)(addr - base_addr),
+ dump_addr[i * 4], dump_addr[i * 4 + 1],
+ dump_addr[i * 4 + 2], dump_addr[i * 4 + 3]);
+ addr += REG_DUMP_ALIGN;
+ }
+}
+
+void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
+{
+ struct msm_disp_state_block *block, *tmp;
+
+ if (!p) {
+ DRM_ERROR("invalid drm printer\n");
+ return;
+ }
+
+ drm_printf(p, "---\n");
+
+ drm_printf(p, "module: " KBUILD_MODNAME "\n");
+ drm_printf(p, "dpu devcoredump\n");
+ drm_printf(p, "timestamp %lld\n", ktime_to_ns(state->timestamp));
+
+ list_for_each_entry_safe(block, tmp, &state->blocks, node) {
+ drm_printf(p, "====================%s================\n", block->name);
+ msm_disp_state_print_regs(&block->state, block->size, block->base_addr, p);
+ }
+
+ drm_printf(p, "===================dpu drm state================\n");
+
+ if (state->atomic_state)
+ drm_atomic_print_new_state(state->atomic_state, p);
+}
+
+static void msm_disp_capture_atomic_state(struct msm_disp_state *disp_state)
+{
+ struct drm_device *ddev;
+ struct drm_modeset_acquire_ctx ctx;
+
+ disp_state->timestamp = ktime_get();
+
+ ddev = disp_state->drm_dev;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ while (drm_modeset_lock_all_ctx(ddev, &ctx) != 0)
+ drm_modeset_backoff(&ctx);
+
+ disp_state->atomic_state = drm_atomic_helper_duplicate_state(ddev,
+ &ctx);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state)
+{
+ struct msm_drm_private *priv;
+ struct drm_device *drm_dev;
+ struct msm_kms *kms;
+ int i;
+
+ drm_dev = disp_state->drm_dev;
+ priv = drm_dev->dev_private;
+ kms = priv->kms;
+
+ if (priv->dp)
+ msm_dp_snapshot(disp_state, priv->dp);
+
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ if (!priv->dsi[i])
+ continue;
+
+ msm_dsi_snapshot(disp_state, priv->dsi[i]);
+ }
+
+ if (kms->funcs->snapshot)
+ kms->funcs->snapshot(disp_state, kms);
+
+ msm_disp_capture_atomic_state(disp_state);
+}
+
+void msm_disp_state_free(void *data)
+{
+ struct msm_disp_state *disp_state = data;
+ struct msm_disp_state_block *block, *tmp;
+
+ if (disp_state->atomic_state) {
+ drm_atomic_state_put(disp_state->atomic_state);
+ disp_state->atomic_state = NULL;
+ }
+
+ list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) {
+ list_del(&block->node);
+ kfree(block->state);
+ kfree(block);
+ }
+
+ kfree(disp_state);
+}
+
+void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
+ void __iomem *base_addr, const char *fmt, ...)
+{
+ struct msm_disp_state_block *new_blk;
+ struct va_format vaf;
+ va_list va;
+
+ new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+ snprintf(new_blk->name, sizeof(new_blk->name), "%pV", &vaf);
+
+ va_end(va);
+
+ INIT_LIST_HEAD(&new_blk->node);
+ new_blk->size = ALIGN(len, REG_DUMP_ALIGN);
+ new_blk->base_addr = base_addr;
+
+ msm_disp_state_dump_regs(&new_blk->state, new_blk->size, base_addr);
+ list_add(&new_blk->node, &disp_state->blocks);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 7c22bfe0fc7d..4a3293b590b0 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -9,7 +9,15 @@
#include "dp_reg.h"
#include "dp_aux.h"
-#define DP_AUX_ENUM_STR(x) #x
+enum msm_dp_aux_err {
+ DP_AUX_ERR_NONE,
+ DP_AUX_ERR_ADDR,
+ DP_AUX_ERR_TOUT,
+ DP_AUX_ERR_NACK,
+ DP_AUX_ERR_DEFER,
+ DP_AUX_ERR_NACK_DEFER,
+ DP_AUX_ERR_PHY,
+};
struct dp_aux_private {
struct device *dev;
@@ -18,7 +26,7 @@ struct dp_aux_private {
struct mutex mutex;
struct completion comp;
- u32 aux_error_num;
+ enum msm_dp_aux_err aux_error_num;
u32 retry_cnt;
bool cmd_busy;
bool native;
@@ -27,69 +35,51 @@ struct dp_aux_private {
bool no_send_stop;
u32 offset;
u32 segment;
- u32 isr;
struct drm_dp_aux dp_aux;
};
#define MAX_AUX_RETRIES 5
-static const char *dp_aux_get_error(u32 aux_error)
-{
- switch (aux_error) {
- case DP_AUX_ERR_NONE:
- return DP_AUX_ENUM_STR(DP_AUX_ERR_NONE);
- case DP_AUX_ERR_ADDR:
- return DP_AUX_ENUM_STR(DP_AUX_ERR_ADDR);
- case DP_AUX_ERR_TOUT:
- return DP_AUX_ENUM_STR(DP_AUX_ERR_TOUT);
- case DP_AUX_ERR_NACK:
- return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK);
- case DP_AUX_ERR_DEFER:
- return DP_AUX_ENUM_STR(DP_AUX_ERR_DEFER);
- case DP_AUX_ERR_NACK_DEFER:
- return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK_DEFER);
- default:
- return "unknown";
- }
-}
-
-static u32 dp_aux_write(struct dp_aux_private *aux,
+static ssize_t dp_aux_write(struct dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
- u32 data[4], reg, len;
+ u8 data[4];
+ u32 reg;
+ ssize_t len;
u8 *msgdata = msg->buffer;
int const AUX_CMD_FIFO_LEN = 128;
int i = 0;
if (aux->read)
- len = 4;
+ len = 0;
else
- len = msg->size + 4;
+ len = msg->size;
/*
* cmd fifo only has depth of 144 bytes
* limit buf length to 128 bytes here
*/
- if (len > AUX_CMD_FIFO_LEN) {
+ if (len > AUX_CMD_FIFO_LEN - 4) {
DRM_ERROR("buf size greater than allowed size of 128 bytes\n");
- return 0;
+ return -EINVAL;
}
/* Pack cmd and write to HW */
- data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
+ data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
if (aux->read)
- data[0] |= BIT(4); /* R/W */
+ data[0] |= BIT(4); /* R/W */
- data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */
- data[2] = msg->address & 0xff; /* addr[7:0] */
- data[3] = (msg->size - 1) & 0xff; /* len[7:0] */
+ data[1] = msg->address >> 8; /* addr[15:8] */
+ data[2] = msg->address; /* addr[7:0] */
+ data[3] = msg->size - 1; /* len[7:0] */
- for (i = 0; i < len; i++) {
+ for (i = 0; i < len + 4; i++) {
reg = (i < 4) ? data[i] : msgdata[i - 4];
+ reg <<= DP_AUX_DATA_OFFSET;
+ reg &= DP_AUX_DATA_MASK;
+ reg |= DP_AUX_DATA_WRITE;
/* index = 0, write */
- reg = (((reg) << DP_AUX_DATA_OFFSET)
- & DP_AUX_DATA_MASK) | DP_AUX_DATA_WRITE;
if (i == 0)
reg |= DP_AUX_DATA_INDEX_WRITE;
aux->catalog->aux_data = reg;
@@ -117,39 +107,27 @@ static u32 dp_aux_write(struct dp_aux_private *aux,
return len;
}
-static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
+static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
- u32 ret, len, timeout;
- int aux_timeout_ms = HZ/4;
+ ssize_t ret;
+ unsigned long time_left;
reinit_completion(&aux->comp);
- len = dp_aux_write(aux, msg);
- if (len == 0) {
- DRM_ERROR("DP AUX write failed\n");
- return -EINVAL;
- }
+ ret = dp_aux_write(aux, msg);
+ if (ret < 0)
+ return ret;
- timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms);
- if (!timeout) {
- DRM_ERROR("aux %s timeout\n", (aux->read ? "read" : "write"));
+ time_left = wait_for_completion_timeout(&aux->comp,
+ msecs_to_jiffies(250));
+ if (!time_left)
return -ETIMEDOUT;
- }
-
- if (aux->aux_error_num == DP_AUX_ERR_NONE) {
- ret = len;
- } else {
- DRM_ERROR_RATELIMITED("aux err: %s\n",
- dp_aux_get_error(aux->aux_error_num));
-
- ret = -EINVAL;
- }
return ret;
}
-static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
+static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
u32 data;
@@ -176,15 +154,14 @@ static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF;
if (i != actual_i)
- DRM_ERROR("Index mismatch: expected %d, found %d\n",
- i, actual_i);
+ break;
}
+
+ return i;
}
-static void dp_aux_native_handler(struct dp_aux_private *aux)
+static void dp_aux_native_handler(struct dp_aux_private *aux, u32 isr)
{
- u32 isr = aux->isr;
-
if (isr & DP_INTR_AUX_I2C_DONE)
aux->aux_error_num = DP_AUX_ERR_NONE;
else if (isr & DP_INTR_WRONG_ADDR)
@@ -197,14 +174,10 @@ static void dp_aux_native_handler(struct dp_aux_private *aux)
aux->aux_error_num = DP_AUX_ERR_PHY;
dp_catalog_aux_clear_hw_interrupts(aux->catalog);
}
-
- complete(&aux->comp);
}
-static void dp_aux_i2c_handler(struct dp_aux_private *aux)
+static void dp_aux_i2c_handler(struct dp_aux_private *aux, u32 isr)
{
- u32 isr = aux->isr;
-
if (isr & DP_INTR_AUX_I2C_DONE) {
if (isr & (DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER))
aux->aux_error_num = DP_AUX_ERR_NACK;
@@ -226,8 +199,6 @@ static void dp_aux_i2c_handler(struct dp_aux_private *aux)
dp_catalog_aux_clear_hw_interrupts(aux->catalog);
}
}
-
- complete(&aux->comp);
}
static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
@@ -338,30 +309,29 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
ssize_t ret;
int const aux_cmd_native_max = 16;
int const aux_cmd_i2c_max = 128;
- struct dp_aux_private *aux = container_of(dp_aux,
- struct dp_aux_private, dp_aux);
+ struct dp_aux_private *aux;
- mutex_lock(&aux->mutex);
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
/* Ignore address only message */
- if ((msg->size == 0) || (msg->buffer == NULL)) {
+ if (msg->size == 0 || !msg->buffer) {
msg->reply = aux->native ?
DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
- ret = msg->size;
- goto unlock_exit;
+ return msg->size;
}
/* msg sanity check */
- if ((aux->native && (msg->size > aux_cmd_native_max)) ||
- (msg->size > aux_cmd_i2c_max)) {
+ if ((aux->native && msg->size > aux_cmd_native_max) ||
+ msg->size > aux_cmd_i2c_max) {
DRM_ERROR("%s: invalid msg: size(%zu), request(%x)\n",
__func__, msg->size, msg->request);
- ret = -EINVAL;
- goto unlock_exit;
+ return -EINVAL;
}
+ mutex_lock(&aux->mutex);
+
dp_aux_update_offset_and_segment(aux, msg);
dp_aux_transfer_helper(aux, msg, true);
@@ -377,41 +347,44 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
}
ret = dp_aux_cmd_fifo_tx(aux, msg);
-
if (ret < 0) {
if (aux->native) {
aux->retry_cnt++;
if (!(aux->retry_cnt % MAX_AUX_RETRIES))
dp_catalog_aux_update_cfg(aux->catalog);
}
- usleep_range(400, 500); /* at least 400us to next try */
- goto unlock_exit;
- }
-
- if (aux->aux_error_num == DP_AUX_ERR_NONE) {
- if (aux->read)
- dp_aux_cmd_fifo_rx(aux, msg);
-
- msg->reply = aux->native ?
- DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
} else {
- /* Reply defer to retry */
- msg->reply = aux->native ?
- DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
+ aux->retry_cnt = 0;
+ switch (aux->aux_error_num) {
+ case DP_AUX_ERR_NONE:
+ if (aux->read)
+ ret = dp_aux_cmd_fifo_rx(aux, msg);
+ msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+ break;
+ case DP_AUX_ERR_DEFER:
+ msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
+ break;
+ case DP_AUX_ERR_PHY:
+ case DP_AUX_ERR_ADDR:
+ case DP_AUX_ERR_NACK:
+ case DP_AUX_ERR_NACK_DEFER:
+ msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_NACK : DP_AUX_I2C_REPLY_NACK;
+ break;
+ case DP_AUX_ERR_TOUT:
+ ret = -ETIMEDOUT;
+ break;
+ }
}
- /* Return requested size for success or retry */
- ret = msg->size;
- aux->retry_cnt = 0;
-
-unlock_exit:
aux->cmd_busy = false;
mutex_unlock(&aux->mutex);
+
return ret;
}
void dp_aux_isr(struct drm_dp_aux *dp_aux)
{
+ u32 isr;
struct dp_aux_private *aux;
if (!dp_aux) {
@@ -421,15 +394,17 @@ void dp_aux_isr(struct drm_dp_aux *dp_aux)
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
- aux->isr = dp_catalog_aux_get_irq(aux->catalog);
+ isr = dp_catalog_aux_get_irq(aux->catalog);
if (!aux->cmd_busy)
return;
if (aux->native)
- dp_aux_native_handler(aux);
+ dp_aux_native_handler(aux, isr);
else
- dp_aux_i2c_handler(aux);
+ dp_aux_i2c_handler(aux, isr);
+
+ complete(&aux->comp);
}
void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index f8b8ba919465..0728cc09c9ec 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -9,14 +9,6 @@
#include "dp_catalog.h"
#include <drm/drm_dp_helper.h>
-#define DP_AUX_ERR_NONE 0
-#define DP_AUX_ERR_ADDR -1
-#define DP_AUX_ERR_TOUT -2
-#define DP_AUX_ERR_NACK -3
-#define DP_AUX_ERR_DEFER -4
-#define DP_AUX_ERR_NACK_DEFER -5
-#define DP_AUX_ERR_PHY -6
-
int dp_aux_register(struct drm_dp_aux *dp_aux);
void dp_aux_unregister(struct drm_dp_aux *dp_aux);
void dp_aux_isr(struct drm_dp_aux *dp_aux);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index b1a9b1b98f5f..ca96e3514790 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -62,6 +62,15 @@ struct dp_catalog_private {
u8 aux_lut_cfg_index[PHY_AUX_CFG_MAX];
};
+void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ msm_disp_snapshot_add_block(disp_state, catalog->io->dp_controller.len,
+ catalog->io->dp_controller.base, "dp_ctrl");
+}
+
static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset)
{
offset += MSM_DP_CONTROLLER_AUX_OFFSET;
@@ -193,7 +202,7 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog)
/**
* dp_catalog_aux_reset() - reset AUX controller
*
- * @aux: DP catalog structure
+ * @dp_catalog: DP catalog structure
*
* return: void
*
@@ -292,7 +301,7 @@ void dp_catalog_dump_regs(struct dp_catalog *dp_catalog)
dump_regs(catalog->io->dp_controller.base + offset, len);
}
-int dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog)
+u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
@@ -582,10 +591,9 @@ void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
- /* enable HPD interrupts */
+ /* enable HPD plug and unplug interrupts */
dp_catalog_hpd_config_intr(dp_catalog,
- DP_DP_HPD_PLUG_INT_MASK | DP_DP_IRQ_HPD_INT_MASK
- | DP_DP_HPD_UNPLUG_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true);
+ DP_DP_HPD_PLUG_INT_MASK | DP_DP_HPD_UNPLUG_INT_MASK, true);
/* Configure REFTIMER and enable it */
reftimer |= DP_DP_HPD_REFTIMER_ENABLE;
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index 176a9020a520..6965afa81aad 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -9,6 +9,7 @@
#include <drm/drm_modes.h>
#include "dp_parser.h"
+#include "disp/msm_disp_snapshot.h"
/* interrupts */
#define DP_INTR_HPD BIT(0)
@@ -71,6 +72,9 @@ struct dp_catalog {
u32 audio_data;
};
+/* Debug module */
+void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state);
+
/* AUX APIs */
u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog);
int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog);
@@ -80,7 +84,7 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog);
void dp_catalog_aux_reset(struct dp_catalog *dp_catalog);
void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable);
void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog);
-int dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog);
+u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog);
/* DP Controller APIs */
void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state);
@@ -124,7 +128,6 @@ void dp_catalog_audio_get_header(struct dp_catalog *catalog);
void dp_catalog_audio_set_header(struct dp_catalog *catalog);
void dp_catalog_audio_config_acr(struct dp_catalog *catalog);
void dp_catalog_audio_enable(struct dp_catalog *catalog);
-void dp_catalog_audio_enable(struct dp_catalog *catalog);
void dp_catalog_audio_config_sdp(struct dp_catalog *catalog);
void dp_catalog_audio_init(struct dp_catalog *catalog);
void dp_catalog_audio_sfe_level(struct dp_catalog *catalog);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 2cebd17a7289..ee221d835fa0 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -77,8 +77,6 @@ struct dp_ctrl_private {
struct dp_parser *parser;
struct dp_catalog *catalog;
- struct opp_table *opp_table;
-
struct completion idle_comp;
struct completion video_comp;
};
@@ -1809,6 +1807,63 @@ end:
return ret;
}
+int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+ struct dp_io *dp_io;
+ struct phy *phy;
+ int ret;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ dp_io = &ctrl->parser->io;
+ phy = dp_io->phy;
+
+ /* set dongle to D3 (power off) mode */
+ dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true);
+
+ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+
+ if (dp_power_clk_status(ctrl->power, DP_STREAM_PM)) {
+ ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable pclk. ret=%d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
+ return ret;
+ }
+
+ phy_power_off(phy);
+
+ /* aux channel down, reinit phy */
+ phy_exit(phy);
+ phy_init(phy);
+
+ DRM_DEBUG_DP("DP off link/stream done\n");
+ return ret;
+}
+
+void dp_ctrl_off_phy(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+ struct dp_io *dp_io;
+ struct phy *phy;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ dp_io = &ctrl->parser->io;
+ phy = dp_io->phy;
+
+ dp_catalog_ctrl_reset(ctrl->catalog);
+
+ phy_exit(phy);
+
+ DRM_DEBUG_DP("DP off phy done\n");
+}
+
int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
@@ -1886,20 +1941,17 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
return ERR_PTR(-ENOMEM);
}
- ctrl->opp_table = dev_pm_opp_set_clkname(dev, "ctrl_link");
- if (IS_ERR(ctrl->opp_table)) {
+ ret = devm_pm_opp_set_clkname(dev, "ctrl_link");
+ if (ret) {
dev_err(dev, "invalid DP OPP table in device tree\n");
- /* caller do PTR_ERR(ctrl->opp_table) */
- return (struct dp_ctrl *)ctrl->opp_table;
+ /* caller do PTR_ERR(opp_table) */
+ return (struct dp_ctrl *)ERR_PTR(ret);
}
/* OPP table is optional */
- ret = dev_pm_opp_of_add_table(dev);
- if (ret) {
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret)
dev_err(dev, "failed to add DP OPP table\n");
- dev_pm_opp_put_clkname(ctrl->opp_table);
- ctrl->opp_table = NULL;
- }
init_completion(&ctrl->idle_comp);
init_completion(&ctrl->video_comp);
@@ -1915,16 +1967,3 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
return &ctrl->dp_ctrl;
}
-
-void dp_ctrl_put(struct dp_ctrl *dp_ctrl)
-{
- struct dp_ctrl_private *ctrl;
-
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
- if (ctrl->opp_table) {
- dev_pm_opp_of_remove_table(ctrl->dev);
- dev_pm_opp_put_clkname(ctrl->opp_table);
- ctrl->opp_table = NULL;
- }
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index a836bd358447..2363a2df9597 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -23,6 +23,8 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_off_phy(struct dp_ctrl *dp_ctrl);
int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
void dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
@@ -31,6 +33,5 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
struct dp_panel *panel, struct drm_dp_aux *aux,
struct dp_power *power, struct dp_catalog *catalog,
struct dp_parser *parser);
-void dp_ctrl_put(struct dp_ctrl *dp_ctrl);
#endif /* _DP_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 1784e119269b..051c1be1de7e 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -208,10 +208,6 @@ static int dp_display_bind(struct device *dev, struct device *master,
dp = container_of(g_dp_display,
struct dp_display_private, dp_display);
- if (!dp) {
- DRM_ERROR("DP driver bind failed. Invalid driver data\n");
- return -EINVAL;
- }
dp->dp_display.drm_dev = drm;
priv = drm->dev_private;
@@ -252,10 +248,6 @@ static void dp_display_unbind(struct device *dev, struct device *master,
dp = container_of(g_dp_display,
struct dp_display_private, dp_display);
- if (!dp) {
- DRM_ERROR("Invalid DP driver data\n");
- return;
- }
dp_power_client_deinit(dp->power);
dp_aux_unregister(dp->aux);
@@ -346,6 +338,12 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
dp->dp_display.max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes;
+ /*
+ * set sink to normal operation mode -- D0
+ * before dpcd read
+ */
+ dp_link_psm_config(dp->link, &dp->panel->link_info, false);
+
dp_link_reset_phy_params_vx_px(dp->link);
rc = dp_ctrl_on_link(dp->ctrl);
if (rc) {
@@ -406,19 +404,9 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
dp = container_of(g_dp_display,
struct dp_display_private, dp_display);
- if (!dp) {
- DRM_ERROR("no driver data found\n");
- rc = -ENODEV;
- goto end;
- }
dp_display_host_init(dp, false);
- /*
- * set sink to normal operation mode -- D0
- * before dpcd read
- */
- dp_link_psm_config(dp->link, &dp->panel->link_info, false);
rc = dp_display_process_hpd_high(dp);
end:
return rc;
@@ -437,11 +425,6 @@ static int dp_display_usbpd_disconnect_cb(struct device *dev)
dp = container_of(g_dp_display,
struct dp_display_private, dp_display);
- if (!dp) {
- DRM_ERROR("no driver data found\n");
- rc = -ENODEV;
- return rc;
- }
dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
@@ -502,7 +485,6 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
int rc = 0;
u32 sink_request;
struct dp_display_private *dp;
- struct dp_usbpd *hpd;
if (!dev) {
DRM_ERROR("invalid dev\n");
@@ -511,12 +493,6 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
dp = container_of(g_dp_display,
struct dp_display_private, dp_display);
- if (!dp) {
- DRM_ERROR("no driver data found\n");
- return -ENODEV;
- }
-
- hpd = dp->usbpd;
/* check for any test request issued by sink */
rc = dp_link_process_request(dp->link);
@@ -579,6 +555,10 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
}
+ /* enable HDP irq_hpd/replug interrupt */
+ dp_catalog_hpd_config_intr(dp->catalog,
+ DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true);
+
mutex_unlock(&dp->event_mutex);
/* uevent will complete connection part */
@@ -628,7 +608,26 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
- if (state == ST_DISCONNECT_PENDING || state == ST_DISCONNECTED) {
+
+ /* disable irq_hpd/replug interrupts */
+ dp_catalog_hpd_config_intr(dp->catalog,
+ DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, false);
+
+ /* unplugged, no more irq_hpd handle */
+ dp_del_event(dp, EV_IRQ_HPD_INT);
+
+ if (state == ST_DISCONNECTED) {
+ /* triggered by irq_hdp with sink_count = 0 */
+ if (dp->link->sink_count == 0) {
+ dp_ctrl_off_phy(dp->ctrl);
+ hpd->hpd_high = 0;
+ dp->core_initialized = false;
+ }
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+ if (state == ST_DISCONNECT_PENDING) {
mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -642,9 +641,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp->hpd_state = ST_DISCONNECT_PENDING;
- /* disable HPD plug interrupt until disconnect is done */
- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK
- | DP_DP_IRQ_HPD_INT_MASK, false);
+ /* disable HPD plug interrupts */
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false);
hpd->hpd_high = 0;
@@ -660,8 +658,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
/* signal the disconnect event early to ensure proper teardown */
dp_display_handle_plugged_change(g_dp_display, false);
- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
- DP_DP_IRQ_HPD_INT_MASK, true);
+ /* enable HDP plug interrupt to prepare for next plugin */
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true);
/* uevent will complete disconnection part */
mutex_unlock(&dp->event_mutex);
@@ -692,7 +690,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
/* irq_hpd can happen at either connected or disconnected state */
state = dp->hpd_state;
- if (state == ST_DISPLAY_OFF) {
+ if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -724,7 +722,6 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
{
dp_debug_put(dp->debug);
- dp_ctrl_put(dp->ctrl);
dp_panel_put(dp->panel);
dp_aux_put(dp->aux);
dp_audio_put(dp->audio);
@@ -818,13 +815,11 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
rc = PTR_ERR(dp->audio);
pr_err("failed to initialize audio, rc = %d\n", rc);
dp->audio = NULL;
- goto error_audio;
+ goto error_ctrl;
}
return rc;
-error_audio:
- dp_ctrl_put(dp->ctrl);
error_ctrl:
dp_panel_put(dp->panel);
error_link:
@@ -910,9 +905,13 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
dp_display->audio_enabled = false;
- dp_ctrl_off(dp->ctrl);
-
- dp->core_initialized = false;
+ /* triggered by irq_hpd with sink_count = 0 */
+ if (dp->link->sink_count == 0) {
+ dp_ctrl_off_link_stream(dp->ctrl);
+ } else {
+ dp_ctrl_off(dp->ctrl);
+ dp->core_initialized = false;
+ }
dp_display->power_on = false;
@@ -1012,6 +1011,33 @@ int dp_display_get_test_bpp(struct msm_dp *dp)
dp_display->link->test_video.test_bit_depth);
}
+void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
+{
+ struct dp_display_private *dp_display;
+ struct drm_device *drm;
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+ drm = dp->drm_dev;
+
+ /*
+ * if we are reading registers we need the link clocks to be on
+ * however till DP cable is connected this will not happen as we
+ * do not know the resolution to power up with. Hence check the
+ * power_on status before dumping DP registers to avoid crash due
+ * to unclocked access
+ */
+ mutex_lock(&dp_display->event_mutex);
+
+ if (!dp->power_on) {
+ mutex_unlock(&dp_display->event_mutex);
+ return;
+ }
+
+ dp_catalog_snapshot(dp_display->catalog, disp_state);
+
+ mutex_unlock(&dp_display->event_mutex);
+}
+
static void dp_display_config_hpd(struct dp_display_private *dp)
{
@@ -1300,8 +1326,13 @@ static int dp_pm_suspend(struct device *dev)
mutex_lock(&dp->event_mutex);
- if (dp->core_initialized == true)
+ if (dp->core_initialized == true) {
+ /* mainlink enabled */
+ if (dp_power_clk_status(dp->power, DP_CTRL_PM))
+ dp_ctrl_off_link_stream(dp->ctrl);
+
dp_display_host_deinit(dp);
+ }
dp->hpd_state = ST_SUSPENDED;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 5173c89eedf7..8b47cdabb67e 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -8,6 +8,7 @@
#include "dp_panel.h"
#include <sound/hdmi-codec.h>
+#include "disp/msm_disp_snapshot.h"
struct msm_dp {
struct drm_device *drm_dev;
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index be986da78c4a..1195044a7a3b 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -364,7 +364,7 @@ static int dp_link_parse_timing_params3(struct dp_link_private *link,
}
/**
- * dp_parse_video_pattern_params() - parses video pattern parameters from DPCD
+ * dp_link_parse_video_pattern_params() - parses video pattern parameters from DPCD
* @link: Display Port Driver data
*
* Returns 0 if it successfully parses the video link pattern and the link
@@ -563,7 +563,7 @@ static int dp_link_parse_link_training_params(struct dp_link_private *link)
}
/**
- * dp_parse_phy_test_params() - parses the phy link parameters
+ * dp_link_parse_phy_test_params() - parses the phy link parameters
* @link: Display Port Driver data
*
* Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being
@@ -843,10 +843,8 @@ bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum)
return ret == 1;
}
-static int dp_link_parse_vx_px(struct dp_link_private *link)
+static void dp_link_parse_vx_px(struct dp_link_private *link)
{
- int ret = 0;
-
DRM_DEBUG_DP("vx: 0=%d, 1=%d, 2=%d, 3=%d\n",
drm_dp_get_adjust_request_voltage(link->link_status, 0),
drm_dp_get_adjust_request_voltage(link->link_status, 1),
@@ -876,8 +874,6 @@ static int dp_link_parse_vx_px(struct dp_link_private *link)
DRM_DEBUG_DP("Requested: v_level = 0x%x, p_level = 0x%x\n",
link->dp_link.phy_params.v_level,
link->dp_link.phy_params.p_level);
-
- return ret;
}
/**
@@ -891,8 +887,6 @@ static int dp_link_parse_vx_px(struct dp_link_private *link)
static int dp_link_process_phy_test_pattern_request(
struct dp_link_private *link)
{
- int ret = 0;
-
if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) {
DRM_DEBUG_DP("no phy test\n");
return -EINVAL;
@@ -918,12 +912,9 @@ static int dp_link_process_phy_test_pattern_request(
link->dp_link.link_params.rate =
drm_dp_bw_code_to_link_rate(link->request.test_link_rate);
- ret = dp_link_parse_vx_px(link);
-
- if (ret)
- DRM_ERROR("parse_vx_px failed. ret=%d\n", ret);
+ dp_link_parse_vx_px(link);
- return ret;
+ return 0;
}
static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
@@ -961,7 +952,7 @@ static int dp_link_process_link_status_update(struct dp_link_private *link)
}
/**
- * dp_link_process_downstream_port_status_change() - process port status changes
+ * dp_link_process_ds_port_status_change() - process port status changes
* @link: Display Port Driver data
*
* This function will handle downstream port updates that are initiated by
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 9cc816663668..440b32753430 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -141,7 +141,6 @@ static int dp_panel_update_modes(struct drm_connector *connector,
return rc;
}
rc = drm_add_edid_modes(connector, edid);
- DRM_DEBUG_DP("%s -", __func__);
return rc;
}
@@ -351,7 +350,6 @@ void dp_panel_dump_regs(struct dp_panel *dp_panel)
int dp_panel_timing_cfg(struct dp_panel *dp_panel)
{
- int rc = 0;
u32 data, total_ver, total_hor;
struct dp_catalog *catalog;
struct dp_panel_private *panel;
@@ -404,7 +402,7 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
dp_catalog_panel_timing_cfg(catalog);
panel->panel_on = true;
- return rc;
+ return 0;
}
int dp_panel_init_panel_info(struct dp_panel *dp_panel)
diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h
index 7d0327bbc0d5..e3f959ffae12 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.h
+++ b/drivers/gpu/drm/msm/dp/dp_power.h
@@ -88,7 +88,7 @@ int dp_power_client_init(struct dp_power *power);
* return: 0 for success, error for failure.
*
* This API will de-initialize the DisplayPort's clocks and regulator
- * modueles.
+ * modules.
*/
void dp_power_client_deinit(struct dp_power *power);
@@ -100,7 +100,7 @@ void dp_power_client_deinit(struct dp_power *power);
*
* This API will configure the DisplayPort's power module and provides
* methods to be called by the client to configure the power related
- * modueles.
+ * modules.
*/
struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 627048851d99..75afc12a7b25 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -266,3 +266,9 @@ fail:
return ret;
}
+void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi)
+{
+ msm_dsi_host_snapshot(disp_state, msm_dsi->host);
+ msm_dsi_phy_snapshot(disp_state, msm_dsi->phy);
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 7abfeab08165..9b8e9b07eced 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -15,6 +15,7 @@
#include <drm/drm_panel.h>
#include "msm_drv.h"
+#include "disp/msm_disp_snapshot.h"
#define DSI_0 0
#define DSI_1 1
@@ -146,7 +147,7 @@ int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host);
int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi);
int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi);
-
+void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host);
/* dsi phy */
struct msm_dsi_phy;
struct msm_dsi_phy_shared_timings {
@@ -173,6 +174,7 @@ int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy);
int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy);
+void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy);
#endif /* __DSI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 50eb4d1b8fdd..eadbcc78fd72 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,19 +8,27 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -621,1693 +629,7 @@ static inline uint32_t DSI_VERSION_MAJOR(uint32_t val)
return ((val) << DSI_VERSION_MAJOR__SHIFT) & DSI_VERSION_MAJOR__MASK;
}
-#define REG_DSI_PHY_PLL_CTRL_0 0x00000200
-#define DSI_PHY_PLL_CTRL_0_ENABLE 0x00000001
-
-#define REG_DSI_PHY_PLL_CTRL_1 0x00000204
-
-#define REG_DSI_PHY_PLL_CTRL_2 0x00000208
-
-#define REG_DSI_PHY_PLL_CTRL_3 0x0000020c
-
-#define REG_DSI_PHY_PLL_CTRL_4 0x00000210
-
-#define REG_DSI_PHY_PLL_CTRL_5 0x00000214
-
-#define REG_DSI_PHY_PLL_CTRL_6 0x00000218
-
-#define REG_DSI_PHY_PLL_CTRL_7 0x0000021c
-
-#define REG_DSI_PHY_PLL_CTRL_8 0x00000220
-
-#define REG_DSI_PHY_PLL_CTRL_9 0x00000224
-
-#define REG_DSI_PHY_PLL_CTRL_10 0x00000228
-
-#define REG_DSI_PHY_PLL_CTRL_11 0x0000022c
-
-#define REG_DSI_PHY_PLL_CTRL_12 0x00000230
-
-#define REG_DSI_PHY_PLL_CTRL_13 0x00000234
-
-#define REG_DSI_PHY_PLL_CTRL_14 0x00000238
-
-#define REG_DSI_PHY_PLL_CTRL_15 0x0000023c
-
-#define REG_DSI_PHY_PLL_CTRL_16 0x00000240
-
-#define REG_DSI_PHY_PLL_CTRL_17 0x00000244
-
-#define REG_DSI_PHY_PLL_CTRL_18 0x00000248
-
-#define REG_DSI_PHY_PLL_CTRL_19 0x0000024c
-
-#define REG_DSI_PHY_PLL_CTRL_20 0x00000250
-
-#define REG_DSI_PHY_PLL_STATUS 0x00000280
-#define DSI_PHY_PLL_STATUS_PLL_BUSY 0x00000001
-
-#define REG_DSI_8x60_PHY_TPA_CTRL_1 0x00000258
-
-#define REG_DSI_8x60_PHY_TPA_CTRL_2 0x0000025c
-
-#define REG_DSI_8x60_PHY_TIMING_CTRL_0 0x00000260
-
-#define REG_DSI_8x60_PHY_TIMING_CTRL_1 0x00000264
-
-#define REG_DSI_8x60_PHY_TIMING_CTRL_2 0x00000268
-
-#define REG_DSI_8x60_PHY_TIMING_CTRL_3 0x0000026c
-
-#define REG_DSI_8x60_PHY_TIMING_CTRL_4 0x00000270
-
-#define REG_DSI_8x60_PHY_TIMING_CTRL_5 0x00000274
-
-#define REG_DSI_8x60_PHY_TIMING_CTRL_6 0x00000278
-
-#define REG_DSI_8x60_PHY_TIMING_CTRL_7 0x0000027c
-
-#define REG_DSI_8x60_PHY_TIMING_CTRL_8 0x00000280
-
-#define REG_DSI_8x60_PHY_TIMING_CTRL_9 0x00000284
-
-#define REG_DSI_8x60_PHY_TIMING_CTRL_10 0x00000288
-
-#define REG_DSI_8x60_PHY_TIMING_CTRL_11 0x0000028c
-
-#define REG_DSI_8x60_PHY_CTRL_0 0x00000290
-
-#define REG_DSI_8x60_PHY_CTRL_1 0x00000294
-
-#define REG_DSI_8x60_PHY_CTRL_2 0x00000298
-
-#define REG_DSI_8x60_PHY_CTRL_3 0x0000029c
-
-#define REG_DSI_8x60_PHY_STRENGTH_0 0x000002a0
-
-#define REG_DSI_8x60_PHY_STRENGTH_1 0x000002a4
-
-#define REG_DSI_8x60_PHY_STRENGTH_2 0x000002a8
-
-#define REG_DSI_8x60_PHY_STRENGTH_3 0x000002ac
-
-#define REG_DSI_8x60_PHY_REGULATOR_CTRL_0 0x000002cc
-
-#define REG_DSI_8x60_PHY_REGULATOR_CTRL_1 0x000002d0
-
-#define REG_DSI_8x60_PHY_REGULATOR_CTRL_2 0x000002d4
-
-#define REG_DSI_8x60_PHY_REGULATOR_CTRL_3 0x000002d8
-
-#define REG_DSI_8x60_PHY_REGULATOR_CTRL_4 0x000002dc
-
-#define REG_DSI_8x60_PHY_CAL_HW_TRIGGER 0x000000f0
-
-#define REG_DSI_8x60_PHY_CAL_CTRL 0x000000f4
-
-#define REG_DSI_8x60_PHY_CAL_STATUS 0x000000fc
-#define DSI_8x60_PHY_CAL_STATUS_CAL_BUSY 0x10000000
-
-static inline uint32_t REG_DSI_28nm_8960_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x00000014 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000018 + 0x40*i0; }
-
-#define REG_DSI_28nm_8960_PHY_LNCK_CFG_0 0x00000100
-
-#define REG_DSI_28nm_8960_PHY_LNCK_CFG_1 0x00000104
-
-#define REG_DSI_28nm_8960_PHY_LNCK_CFG_2 0x00000108
-
-#define REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH 0x0000010c
-
-#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0 0x00000114
-
-#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1 0x00000118
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_0 0x00000140
-#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_1 0x00000144
-#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_2 0x00000148
-#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_3 0x0000014c
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_4 0x00000150
-#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_5 0x00000154
-#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_6 0x00000158
-#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_7 0x0000015c
-#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_8 0x00000160
-#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_9 0x00000164
-#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
-#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK;
-}
-#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
-#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_10 0x00000168
-#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
-#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_11 0x0000016c
-#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
-#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
-static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
-{
- return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
-}
-
-#define REG_DSI_28nm_8960_PHY_CTRL_0 0x00000170
-
-#define REG_DSI_28nm_8960_PHY_CTRL_1 0x00000174
-
-#define REG_DSI_28nm_8960_PHY_CTRL_2 0x00000178
-
-#define REG_DSI_28nm_8960_PHY_CTRL_3 0x0000017c
-
-#define REG_DSI_28nm_8960_PHY_STRENGTH_0 0x00000180
-
-#define REG_DSI_28nm_8960_PHY_STRENGTH_1 0x00000184
-
-#define REG_DSI_28nm_8960_PHY_STRENGTH_2 0x00000188
-
-#define REG_DSI_28nm_8960_PHY_BIST_CTRL_0 0x0000018c
-
-#define REG_DSI_28nm_8960_PHY_BIST_CTRL_1 0x00000190
-
-#define REG_DSI_28nm_8960_PHY_BIST_CTRL_2 0x00000194
-
-#define REG_DSI_28nm_8960_PHY_BIST_CTRL_3 0x00000198
-
-#define REG_DSI_28nm_8960_PHY_BIST_CTRL_4 0x0000019c
-
-#define REG_DSI_28nm_8960_PHY_LDO_CTRL 0x000001b0
-
-#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0 0x00000000
-
-#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1 0x00000004
-
-#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2 0x00000008
-
-#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3 0x0000000c
-
-#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4 0x00000010
-
-#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_5 0x00000014
-
-#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG 0x00000018
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER 0x00000028
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_0 0x0000002c
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_1 0x00000030
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2 0x00000034
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0 0x00000038
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1 0x0000003c
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_2 0x00000040
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3 0x00000044
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4 0x00000048
-
-#define REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS 0x00000050
-#define DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY 0x00000010
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_0 0x00000000
-#define DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE 0x00000001
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_1 0x00000004
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_2 0x00000008
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_3 0x0000000c
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_4 0x00000010
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_5 0x00000014
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_6 0x00000018
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_7 0x0000001c
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_8 0x00000020
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_9 0x00000024
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_10 0x00000028
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_11 0x0000002c
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_12 0x00000030
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_13 0x00000034
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_14 0x00000038
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_15 0x0000003c
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_16 0x00000040
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_17 0x00000044
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_18 0x00000048
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_19 0x0000004c
-
-#define REG_DSI_28nm_8960_PHY_PLL_CTRL_20 0x00000050
-
-#define REG_DSI_28nm_8960_PHY_PLL_RDY 0x00000080
-#define DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY 0x00000001
-
-static inline uint32_t REG_DSI_28nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
-
-static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
-
-#define REG_DSI_28nm_PHY_LNCK_CFG_0 0x00000100
-
-#define REG_DSI_28nm_PHY_LNCK_CFG_1 0x00000104
-
-#define REG_DSI_28nm_PHY_LNCK_CFG_2 0x00000108
-
-#define REG_DSI_28nm_PHY_LNCK_CFG_3 0x0000010c
-
-#define REG_DSI_28nm_PHY_LNCK_CFG_4 0x00000110
-
-#define REG_DSI_28nm_PHY_LNCK_TEST_DATAPATH 0x00000114
-
-#define REG_DSI_28nm_PHY_LNCK_DEBUG_SEL 0x00000118
-
-#define REG_DSI_28nm_PHY_LNCK_TEST_STR0 0x0000011c
-
-#define REG_DSI_28nm_PHY_LNCK_TEST_STR1 0x00000120
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_0 0x00000140
-#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_1 0x00000144
-#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_2 0x00000148
-#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_3 0x0000014c
-#define DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_4 0x00000150
-#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_5 0x00000154
-#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_6 0x00000158
-#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_7 0x0000015c
-#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_8 0x00000160
-#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_9 0x00000164
-#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
-#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
-}
-#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
-#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_10 0x00000168
-#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
-#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
-}
-
-#define REG_DSI_28nm_PHY_TIMING_CTRL_11 0x0000016c
-#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
-#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
-}
-
-#define REG_DSI_28nm_PHY_CTRL_0 0x00000170
-
-#define REG_DSI_28nm_PHY_CTRL_1 0x00000174
-
-#define REG_DSI_28nm_PHY_CTRL_2 0x00000178
-
-#define REG_DSI_28nm_PHY_CTRL_3 0x0000017c
-
-#define REG_DSI_28nm_PHY_CTRL_4 0x00000180
-
-#define REG_DSI_28nm_PHY_STRENGTH_0 0x00000184
-
-#define REG_DSI_28nm_PHY_STRENGTH_1 0x00000188
-
-#define REG_DSI_28nm_PHY_BIST_CTRL_0 0x000001b4
-
-#define REG_DSI_28nm_PHY_BIST_CTRL_1 0x000001b8
-
-#define REG_DSI_28nm_PHY_BIST_CTRL_2 0x000001bc
-
-#define REG_DSI_28nm_PHY_BIST_CTRL_3 0x000001c0
-
-#define REG_DSI_28nm_PHY_BIST_CTRL_4 0x000001c4
-
-#define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8
-
-#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4
-#define DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001
-
-#define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc
-
-#define REG_DSI_28nm_PHY_REGULATOR_CTRL_0 0x00000000
-
-#define REG_DSI_28nm_PHY_REGULATOR_CTRL_1 0x00000004
-
-#define REG_DSI_28nm_PHY_REGULATOR_CTRL_2 0x00000008
-
-#define REG_DSI_28nm_PHY_REGULATOR_CTRL_3 0x0000000c
-
-#define REG_DSI_28nm_PHY_REGULATOR_CTRL_4 0x00000010
-
-#define REG_DSI_28nm_PHY_REGULATOR_CTRL_5 0x00000014
-
-#define REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
-
-#define REG_DSI_28nm_PHY_PLL_REFCLK_CFG 0x00000000
-#define DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR 0x00000001
-
-#define REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004
-
-#define REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008
-
-#define REG_DSI_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c
-
-#define REG_DSI_28nm_PHY_PLL_VREG_CFG 0x00000010
-#define DSI_28nm_PHY_PLL_VREG_CFG_POSTDIV1_BYPASS_B 0x00000002
-
-#define REG_DSI_28nm_PHY_PLL_PWRGEN_CFG 0x00000014
-
-#define REG_DSI_28nm_PHY_PLL_DMUX_CFG 0x00000018
-
-#define REG_DSI_28nm_PHY_PLL_AMUX_CFG 0x0000001c
-
-#define REG_DSI_28nm_PHY_PLL_GLB_CFG 0x00000020
-#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001
-#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002
-#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004
-#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008
-
-#define REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024
-
-#define REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028
-
-#define REG_DSI_28nm_PHY_PLL_LPFR_CFG 0x0000002c
-
-#define REG_DSI_28nm_PHY_PLL_LPFC1_CFG 0x00000030
-
-#define REG_DSI_28nm_PHY_PLL_LPFC2_CFG 0x00000034
-
-#define REG_DSI_28nm_PHY_PLL_SDM_CFG0 0x00000038
-#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK 0x0000003f
-#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK;
-}
-#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP 0x00000040
-
-#define REG_DSI_28nm_PHY_PLL_SDM_CFG1 0x0000003c
-#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK 0x0000003f
-#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
-}
-#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK 0x00000040
-#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT 6
-static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK;
-}
-
-#define REG_DSI_28nm_PHY_PLL_SDM_CFG2 0x00000040
-#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK 0x000000ff
-#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK;
-}
-
-#define REG_DSI_28nm_PHY_PLL_SDM_CFG3 0x00000044
-#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK 0x000000ff
-#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT 0
-static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(uint32_t val)
-{
- return ((val) << DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK;
-}
-
-#define REG_DSI_28nm_PHY_PLL_SDM_CFG4 0x00000048
-
-#define REG_DSI_28nm_PHY_PLL_SSC_CFG0 0x0000004c
-
-#define REG_DSI_28nm_PHY_PLL_SSC_CFG1 0x00000050
-
-#define REG_DSI_28nm_PHY_PLL_SSC_CFG2 0x00000054
-
-#define REG_DSI_28nm_PHY_PLL_SSC_CFG3 0x00000058
-
-#define REG_DSI_28nm_PHY_PLL_LKDET_CFG0 0x0000005c
-
-#define REG_DSI_28nm_PHY_PLL_LKDET_CFG1 0x00000060
-
-#define REG_DSI_28nm_PHY_PLL_LKDET_CFG2 0x00000064
-
-#define REG_DSI_28nm_PHY_PLL_TEST_CFG 0x00000068
-#define DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG0 0x0000006c
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG1 0x00000070
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG2 0x00000074
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG3 0x00000078
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG4 0x0000007c
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG5 0x00000080
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG6 0x00000084
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG7 0x00000088
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG8 0x0000008c
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG9 0x00000090
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG10 0x00000094
-
-#define REG_DSI_28nm_PHY_PLL_CAL_CFG11 0x00000098
-
-#define REG_DSI_28nm_PHY_PLL_EFUSE_CFG 0x0000009c
-
-#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_42 0x000000a4
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_43 0x000000a8
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_44 0x000000ac
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_45 0x000000b0
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_46 0x000000b4
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_47 0x000000b8
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_48 0x000000bc
-
-#define REG_DSI_28nm_PHY_PLL_STATUS 0x000000c0
-#define DSI_28nm_PHY_PLL_STATUS_PLL_RDY 0x00000001
-
-#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS0 0x000000c4
-
-#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS1 0x000000c8
-
-#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS2 0x000000cc
-
-#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS3 0x000000d0
-
-#define REG_DSI_28nm_PHY_PLL_CTRL_54 0x000000d4
-
-static inline uint32_t REG_DSI_20nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
-
-static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
-
-#define REG_DSI_20nm_PHY_LNCK_CFG_0 0x00000100
-
-#define REG_DSI_20nm_PHY_LNCK_CFG_1 0x00000104
-
-#define REG_DSI_20nm_PHY_LNCK_CFG_2 0x00000108
-
-#define REG_DSI_20nm_PHY_LNCK_CFG_3 0x0000010c
-
-#define REG_DSI_20nm_PHY_LNCK_CFG_4 0x00000110
-
-#define REG_DSI_20nm_PHY_LNCK_TEST_DATAPATH 0x00000114
-
-#define REG_DSI_20nm_PHY_LNCK_DEBUG_SEL 0x00000118
-
-#define REG_DSI_20nm_PHY_LNCK_TEST_STR0 0x0000011c
-
-#define REG_DSI_20nm_PHY_LNCK_TEST_STR1 0x00000120
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_0 0x00000140
-#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_1 0x00000144
-#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_2 0x00000148
-#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_3 0x0000014c
-#define DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_4 0x00000150
-#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_5 0x00000154
-#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_6 0x00000158
-#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_7 0x0000015c
-#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_8 0x00000160
-#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_9 0x00000164
-#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
-#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
-}
-#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
-#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_10 0x00000168
-#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
-#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
-}
-
-#define REG_DSI_20nm_PHY_TIMING_CTRL_11 0x0000016c
-#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
-#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
-static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
-{
- return ((val) << DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
-}
-
-#define REG_DSI_20nm_PHY_CTRL_0 0x00000170
-
-#define REG_DSI_20nm_PHY_CTRL_1 0x00000174
-
-#define REG_DSI_20nm_PHY_CTRL_2 0x00000178
-
-#define REG_DSI_20nm_PHY_CTRL_3 0x0000017c
-
-#define REG_DSI_20nm_PHY_CTRL_4 0x00000180
-
-#define REG_DSI_20nm_PHY_STRENGTH_0 0x00000184
-
-#define REG_DSI_20nm_PHY_STRENGTH_1 0x00000188
-
-#define REG_DSI_20nm_PHY_BIST_CTRL_0 0x000001b4
-
-#define REG_DSI_20nm_PHY_BIST_CTRL_1 0x000001b8
-
-#define REG_DSI_20nm_PHY_BIST_CTRL_2 0x000001bc
-
-#define REG_DSI_20nm_PHY_BIST_CTRL_3 0x000001c0
-
-#define REG_DSI_20nm_PHY_BIST_CTRL_4 0x000001c4
-
-#define REG_DSI_20nm_PHY_BIST_CTRL_5 0x000001c8
-
-#define REG_DSI_20nm_PHY_GLBL_TEST_CTRL 0x000001d4
-#define DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001
-
-#define REG_DSI_20nm_PHY_LDO_CNTRL 0x000001dc
-
-#define REG_DSI_20nm_PHY_REGULATOR_CTRL_0 0x00000000
-
-#define REG_DSI_20nm_PHY_REGULATOR_CTRL_1 0x00000004
-
-#define REG_DSI_20nm_PHY_REGULATOR_CTRL_2 0x00000008
-
-#define REG_DSI_20nm_PHY_REGULATOR_CTRL_3 0x0000000c
-
-#define REG_DSI_20nm_PHY_REGULATOR_CTRL_4 0x00000010
-
-#define REG_DSI_20nm_PHY_REGULATOR_CTRL_5 0x00000014
-
-#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
-
-#define REG_DSI_14nm_PHY_CMN_REVISION_ID0 0x00000000
-
-#define REG_DSI_14nm_PHY_CMN_REVISION_ID1 0x00000004
-
-#define REG_DSI_14nm_PHY_CMN_REVISION_ID2 0x00000008
-
-#define REG_DSI_14nm_PHY_CMN_REVISION_ID3 0x0000000c
-
-#define REG_DSI_14nm_PHY_CMN_CLK_CFG0 0x00000010
-#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK 0x000000f0
-#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT 4
-static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK;
-}
-#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK 0x000000f0
-#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT 4
-static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK;
-}
-
-#define REG_DSI_14nm_PHY_CMN_CLK_CFG1 0x00000014
-#define DSI_14nm_PHY_CMN_CLK_CFG1_DSICLK_SEL 0x00000001
-
-#define REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL 0x00000018
-#define DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000004
-
-#define REG_DSI_14nm_PHY_CMN_CTRL_0 0x0000001c
-
-#define REG_DSI_14nm_PHY_CMN_CTRL_1 0x00000020
-
-#define REG_DSI_14nm_PHY_CMN_HW_TRIGGER 0x00000024
-
-#define REG_DSI_14nm_PHY_CMN_SW_CFG0 0x00000028
-
-#define REG_DSI_14nm_PHY_CMN_SW_CFG1 0x0000002c
-
-#define REG_DSI_14nm_PHY_CMN_SW_CFG2 0x00000030
-
-#define REG_DSI_14nm_PHY_CMN_HW_CFG0 0x00000034
-
-#define REG_DSI_14nm_PHY_CMN_HW_CFG1 0x00000038
-
-#define REG_DSI_14nm_PHY_CMN_HW_CFG2 0x0000003c
-
-#define REG_DSI_14nm_PHY_CMN_HW_CFG3 0x00000040
-
-#define REG_DSI_14nm_PHY_CMN_HW_CFG4 0x00000044
-
-#define REG_DSI_14nm_PHY_CMN_PLL_CNTRL 0x00000048
-#define DSI_14nm_PHY_CMN_PLL_CNTRL_PLL_START 0x00000001
-
-#define REG_DSI_14nm_PHY_CMN_LDO_CNTRL 0x0000004c
-#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK 0x0000003f
-#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT) & DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK 0x000000c0
-#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT 6
-static inline uint32_t DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT) & DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN 0x00000001
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_STR(uint32_t i0) { return 0x00000014 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(uint32_t i0) { return 0x00000018 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
-#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(uint32_t i0) { return 0x0000001c + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
-#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(uint32_t i0) { return 0x00000020 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
-#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(uint32_t i0) { return 0x00000024 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
-#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(uint32_t i0) { return 0x00000028 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
-#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(uint32_t i0) { return 0x0000002c + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK 0x00000007
-#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK;
-}
-#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
-#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT 4
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(uint32_t i0) { return 0x00000030 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK 0x00000007
-#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(uint32_t i0) { return 0x00000034 + 0x80*i0; }
-#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
-#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
-static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
-{
- return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK;
-}
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(uint32_t i0) { return 0x00000038 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(uint32_t i0) { return 0x0000003c + 0x80*i0; }
-
-static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00000064 + 0x80*i0; }
-
-#define REG_DSI_14nm_PHY_PLL_IE_TRIM 0x00000000
-
-#define REG_DSI_14nm_PHY_PLL_IP_TRIM 0x00000004
-
-#define REG_DSI_14nm_PHY_PLL_IPTAT_TRIM 0x00000010
-
-#define REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN 0x0000001c
-
-#define REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET 0x00000028
-
-#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL 0x0000002c
-
-#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2 0x00000030
-
-#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL3 0x00000034
-
-#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL4 0x00000038
-
-#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5 0x0000003c
-
-#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1 0x00000040
-
-#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2 0x00000044
-
-#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT1 0x00000048
-
-#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT2 0x0000004c
-
-#define REG_DSI_14nm_PHY_PLL_VREF_CFG1 0x0000005c
-
-#define REG_DSI_14nm_PHY_PLL_KVCO_CODE 0x00000058
-
-#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1 0x0000006c
-
-#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2 0x00000070
-
-#define REG_DSI_14nm_PHY_PLL_VCO_COUNT1 0x00000074
-
-#define REG_DSI_14nm_PHY_PLL_VCO_COUNT2 0x00000078
-
-#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1 0x0000007c
-
-#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2 0x00000080
-
-#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3 0x00000084
-
-#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN 0x00000088
-
-#define REG_DSI_14nm_PHY_PLL_PLL_VCO_TUNE 0x0000008c
-
-#define REG_DSI_14nm_PHY_PLL_DEC_START 0x00000090
-
-#define REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER 0x00000094
-
-#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1 0x00000098
-
-#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2 0x0000009c
-
-#define REG_DSI_14nm_PHY_PLL_SSC_PER1 0x000000a0
-
-#define REG_DSI_14nm_PHY_PLL_SSC_PER2 0x000000a4
-
-#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1 0x000000a8
-
-#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2 0x000000ac
-
-#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1 0x000000b4
-
-#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2 0x000000b8
-
-#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3 0x000000bc
-
-#define REG_DSI_14nm_PHY_PLL_TXCLK_EN 0x000000c0
-
-#define REG_DSI_14nm_PHY_PLL_PLL_CRCTRL 0x000000c4
-
-#define REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS 0x000000cc
-
-#define REG_DSI_14nm_PHY_PLL_PLL_MISC1 0x000000e8
-
-#define REG_DSI_14nm_PHY_PLL_CP_SET_CUR 0x000000f0
-
-#define REG_DSI_14nm_PHY_PLL_PLL_ICPMSET 0x000000f4
-
-#define REG_DSI_14nm_PHY_PLL_PLL_ICPCSET 0x000000f8
-
-#define REG_DSI_14nm_PHY_PLL_PLL_ICP_SET 0x000000fc
-
-#define REG_DSI_14nm_PHY_PLL_PLL_LPF1 0x00000100
-
-#define REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV 0x00000104
-
-#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108
-
-#define REG_DSI_10nm_PHY_CMN_REVISION_ID0 0x00000000
-
-#define REG_DSI_10nm_PHY_CMN_REVISION_ID1 0x00000004
-
-#define REG_DSI_10nm_PHY_CMN_REVISION_ID2 0x00000008
-
-#define REG_DSI_10nm_PHY_CMN_REVISION_ID3 0x0000000c
-
-#define REG_DSI_10nm_PHY_CMN_CLK_CFG0 0x00000010
-
-#define REG_DSI_10nm_PHY_CMN_CLK_CFG1 0x00000014
-
-#define REG_DSI_10nm_PHY_CMN_GLBL_CTRL 0x00000018
-
-#define REG_DSI_10nm_PHY_CMN_RBUF_CTRL 0x0000001c
-
-#define REG_DSI_10nm_PHY_CMN_VREG_CTRL 0x00000020
-
-#define REG_DSI_10nm_PHY_CMN_CTRL_0 0x00000024
-
-#define REG_DSI_10nm_PHY_CMN_CTRL_1 0x00000028
-
-#define REG_DSI_10nm_PHY_CMN_CTRL_2 0x0000002c
-
-#define REG_DSI_10nm_PHY_CMN_LANE_CFG0 0x00000030
-
-#define REG_DSI_10nm_PHY_CMN_LANE_CFG1 0x00000034
-
-#define REG_DSI_10nm_PHY_CMN_PLL_CNTRL 0x00000038
-
-#define REG_DSI_10nm_PHY_CMN_LANE_CTRL0 0x00000098
-
-#define REG_DSI_10nm_PHY_CMN_LANE_CTRL1 0x0000009c
-
-#define REG_DSI_10nm_PHY_CMN_LANE_CTRL2 0x000000a0
-
-#define REG_DSI_10nm_PHY_CMN_LANE_CTRL3 0x000000a4
-
-#define REG_DSI_10nm_PHY_CMN_LANE_CTRL4 0x000000a8
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0 0x000000ac
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1 0x000000b0
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2 0x000000b4
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3 0x000000b8
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4 0x000000bc
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5 0x000000c0
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6 0x000000c4
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7 0x000000c8
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8 0x000000cc
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9 0x000000d0
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10 0x000000d4
-
-#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11 0x000000d8
-
-#define REG_DSI_10nm_PHY_CMN_PHY_STATUS 0x000000ec
-
-#define REG_DSI_10nm_PHY_CMN_LANE_STATUS0 0x000000f4
-
-#define REG_DSI_10nm_PHY_CMN_LANE_STATUS1 0x000000f8
-
-static inline uint32_t REG_DSI_10nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000014 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(uint32_t i0) { return 0x0000001c + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(uint32_t i0) { return 0x00000020 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(uint32_t i0) { return 0x00000024 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000028 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000002c + 0x80*i0; }
-
-#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000
-
-#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004
-
-#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010
-
-#define REG_DSI_10nm_PHY_PLL_DSM_DIVIDER 0x0000001c
-
-#define REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000020
-
-#define REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES 0x00000024
-
-#define REG_DSI_10nm_PHY_PLL_CMODE 0x0000002c
-
-#define REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000030
-
-#define REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000054
-
-#define REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000064
-
-#define REG_DSI_10nm_PHY_PLL_PFILT 0x0000007c
-
-#define REG_DSI_10nm_PHY_PLL_IFILT 0x00000080
-
-#define REG_DSI_10nm_PHY_PLL_OUTDIV 0x00000094
-
-#define REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE 0x000000a4
-
-#define REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000a8
-
-#define REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000b4
-
-#define REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000cc
-
-#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000d0
-
-#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000d4
-
-#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000d8
-
-#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x0000010c
-
-#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000110
-
-#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000114
-
-#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x00000118
-
-#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1 0x0000011c
-
-#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1 0x00000120
-
-#define REG_DSI_10nm_PHY_PLL_SSC_CONTROL 0x0000013c
-
-#define REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000140
-
-#define REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000144
-
-#define REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x0000014c
-
-#define REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1 0x00000154
-
-#define REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x0000015c
-
-#define REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000164
-
-#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000180
-
-#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY 0x00000184
-
-#define REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS 0x0000018c
-
-#define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE 0x000001a0
-
-#define REG_DSI_7nm_PHY_CMN_REVISION_ID0 0x00000000
-
-#define REG_DSI_7nm_PHY_CMN_REVISION_ID1 0x00000004
-
-#define REG_DSI_7nm_PHY_CMN_REVISION_ID2 0x00000008
-
-#define REG_DSI_7nm_PHY_CMN_REVISION_ID3 0x0000000c
-
-#define REG_DSI_7nm_PHY_CMN_CLK_CFG0 0x00000010
-
-#define REG_DSI_7nm_PHY_CMN_CLK_CFG1 0x00000014
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_CTRL 0x00000018
-
-#define REG_DSI_7nm_PHY_CMN_RBUF_CTRL 0x0000001c
-
-#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_0 0x00000020
-
-#define REG_DSI_7nm_PHY_CMN_CTRL_0 0x00000024
-
-#define REG_DSI_7nm_PHY_CMN_CTRL_1 0x00000028
-
-#define REG_DSI_7nm_PHY_CMN_CTRL_2 0x0000002c
-
-#define REG_DSI_7nm_PHY_CMN_CTRL_3 0x00000030
-
-#define REG_DSI_7nm_PHY_CMN_LANE_CFG0 0x00000034
-
-#define REG_DSI_7nm_PHY_CMN_LANE_CFG1 0x00000038
-
-#define REG_DSI_7nm_PHY_CMN_PLL_CNTRL 0x0000003c
-
-#define REG_DSI_7nm_PHY_CMN_DPHY_SOT 0x00000040
-
-#define REG_DSI_7nm_PHY_CMN_LANE_CTRL0 0x000000a0
-
-#define REG_DSI_7nm_PHY_CMN_LANE_CTRL1 0x000000a4
-
-#define REG_DSI_7nm_PHY_CMN_LANE_CTRL2 0x000000a8
-
-#define REG_DSI_7nm_PHY_CMN_LANE_CTRL3 0x000000ac
-
-#define REG_DSI_7nm_PHY_CMN_LANE_CTRL4 0x000000b0
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0 0x000000b4
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1 0x000000b8
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2 0x000000bc
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3 0x000000c0
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4 0x000000c4
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5 0x000000c8
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6 0x000000cc
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7 0x000000d0
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8 0x000000d4
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9 0x000000d8
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10 0x000000dc
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11 0x000000e0
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12 0x000000e4
-
-#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13 0x000000e8
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0 0x000000ec
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_1 0x000000f0
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL 0x000000f4
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL 0x000000f8
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_MID_CTRL 0x000000fc
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL 0x00000100
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0 0x00000104
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1 0x00000108
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL 0x0000010c
-
-#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_1 0x00000110
-
-#define REG_DSI_7nm_PHY_CMN_CTRL_4 0x00000114
-
-#define REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4 0x00000128
-
-#define REG_DSI_7nm_PHY_CMN_PHY_STATUS 0x00000140
-
-#define REG_DSI_7nm_PHY_CMN_LANE_STATUS0 0x00000148
-
-#define REG_DSI_7nm_PHY_CMN_LANE_STATUS1 0x0000014c
-
-static inline uint32_t REG_DSI_7nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_7nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_7nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_7nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_7nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x80*i0; }
-
-static inline uint32_t REG_DSI_7nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000010 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_7nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000014 + 0x80*i0; }
-
-static inline uint32_t REG_DSI_7nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004
-
-#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS 0x00000008
-
-#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS_TWO 0x0000000c
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FOUR 0x00000014
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE 0x00000018
-
-#define REG_DSI_7nm_PHY_PLL_INT_LOOP_CONTROLS 0x0000001c
-
-#define REG_DSI_7nm_PHY_PLL_DSM_DIVIDER 0x00000020
-
-#define REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000024
-
-#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES 0x00000028
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_UPDATE_CONTROL_OVERRIDES 0x0000002c
-
-#define REG_DSI_7nm_PHY_PLL_CMODE 0x00000030
-
-#define REG_DSI_7nm_PHY_PLL_PSM_CTRL 0x00000034
-
-#define REG_DSI_7nm_PHY_PLL_RSM_CTRL 0x00000038
-
-#define REG_DSI_7nm_PHY_PLL_VCO_TUNE_MAP 0x0000003c
-
-#define REG_DSI_7nm_PHY_PLL_PLL_CNTRL 0x00000040
-
-#define REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000044
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_LOW 0x00000048
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_HIGH 0x0000004c
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS 0x00000050
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MIN 0x00000054
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MAX 0x00000058
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_PFILT 0x0000005c
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_IFILT 0x00000060
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_TWO 0x00000064
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000068
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_FOUR 0x0000006c
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_HIGH 0x00000070
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_LOW 0x00000074
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000078
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_THRESH 0x0000007c
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_HIGH 0x00000080
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_LOW 0x00000084
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_HIGH 0x00000088
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_LOW 0x0000008c
-
-#define REG_DSI_7nm_PHY_PLL_PFILT 0x00000090
-
-#define REG_DSI_7nm_PHY_PLL_IFILT 0x00000094
-
-#define REG_DSI_7nm_PHY_PLL_PLL_GAIN 0x00000098
-
-#define REG_DSI_7nm_PHY_PLL_ICODE_LOW 0x0000009c
-
-#define REG_DSI_7nm_PHY_PLL_ICODE_HIGH 0x000000a0
-
-#define REG_DSI_7nm_PHY_PLL_LOCKDET 0x000000a4
-
-#define REG_DSI_7nm_PHY_PLL_OUTDIV 0x000000a8
-
-#define REG_DSI_7nm_PHY_PLL_FASTLOCK_CONTROL 0x000000ac
-
-#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_ONE 0x000000b0
-
-#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_TWO 0x000000b4
-
-#define REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE 0x000000b8
-
-#define REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000bc
-
-#define REG_DSI_7nm_PHY_PLL_RATE_CHANGE 0x000000c0
-
-#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS 0x000000c4
-
-#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000c8
-
-#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START 0x000000cc
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW 0x000000d0
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID 0x000000d4
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH 0x000000d8
-
-#define REG_DSI_7nm_PHY_PLL_DEC_FRAC_MUXES 0x000000dc
-
-#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000e0
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000e4
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000e8
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000ec
-
-#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_2 0x000000f0
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_2 0x000000f4
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_2 0x000000f8
-
-#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_2 0x000000fc
-
-#define REG_DSI_7nm_PHY_PLL_MASH_CONTROL 0x00000100
-
-#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW 0x00000104
-
-#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH 0x00000108
-
-#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW 0x0000010c
-
-#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH 0x00000110
-
-#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW 0x00000114
-
-#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH 0x00000118
-
-#define REG_DSI_7nm_PHY_PLL_SSC_MUX_CONTROL 0x0000011c
-
-#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x00000120
-
-#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000124
-
-#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000128
-
-#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x0000012c
-
-#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1 0x00000130
-
-#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1 0x00000134
-
-#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_2 0x00000138
-
-#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_2 0x0000013c
-
-#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_2 0x00000140
-
-#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_2 0x00000144
-
-#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_2 0x00000148
-
-#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_2 0x0000014c
-
-#define REG_DSI_7nm_PHY_PLL_SSC_CONTROL 0x00000150
-
-#define REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000154
-
-#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000158
-
-#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_2 0x0000015c
-
-#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x00000160
-
-#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_2 0x00000164
-
-#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1 0x00000168
-
-#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_2 0x0000016c
-
-#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x00000170
-
-#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_2 0x00000174
-
-#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000178
-
-#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_2 0x0000017c
-
-#define REG_DSI_7nm_PHY_PLL_PLL_FASTLOCK_EN_BAND 0x00000180
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MID 0x00000184
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_HIGH 0x00000188
-
-#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MUX 0x0000018c
-
-#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000190
-
-#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY 0x00000194
-
-#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_MIN_DELAY 0x00000198
-
-#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS 0x0000019c
-
-#define REG_DSI_7nm_PHY_PLL_SPARE_AND_JPC_OVERRIDES 0x000001a0
-
-#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_1 0x000001a4
-
-#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_2 0x000001a8
-
-#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_CTRL_1 0x000001ac
-
-#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE 0x000001b0
-
-#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_TWO 0x000001b4
-
-#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL 0x000001b8
-
-#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_LOW 0x000001bc
-
-#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_HIGH 0x000001c0
-
-#define REG_DSI_7nm_PHY_PLL_FD_OUT_LOW 0x000001c4
-
-#define REG_DSI_7nm_PHY_PLL_FD_OUT_HIGH 0x000001c8
-
-#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_STATUS_1 0x000001cc
-
-#define REG_DSI_7nm_PHY_PLL_PLL_MISC_CONFIG 0x000001d0
-
-#define REG_DSI_7nm_PHY_PLL_FLL_CONFIG 0x000001d4
-
-#define REG_DSI_7nm_PHY_PLL_FLL_FREQ_ACQ_TIME 0x000001d8
-
-#define REG_DSI_7nm_PHY_PLL_FLL_CODE0 0x000001dc
-
-#define REG_DSI_7nm_PHY_PLL_FLL_CODE1 0x000001e0
-
-#define REG_DSI_7nm_PHY_PLL_FLL_GAIN0 0x000001e4
-
-#define REG_DSI_7nm_PHY_PLL_FLL_GAIN1 0x000001e8
-
-#define REG_DSI_7nm_PHY_PLL_SW_RESET 0x000001ec
-
-#define REG_DSI_7nm_PHY_PLL_FAST_PWRUP 0x000001f0
-
-#define REG_DSI_7nm_PHY_PLL_LOCKTIME0 0x000001f4
-
-#define REG_DSI_7nm_PHY_PLL_LOCKTIME1 0x000001f8
-
-#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS_SEL 0x000001fc
-
-#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS0 0x00000200
-
-#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS1 0x00000204
-
-#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS2 0x00000208
-
-#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS3 0x0000020c
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_FLL_CONTROL_OVERRIDES 0x00000210
-
-#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG 0x00000214
-
-#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE0_STATUS 0x00000218
-
-#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE1_STATUS 0x0000021c
-
-#define REG_DSI_7nm_PHY_PLL_RESET_SM_STATUS 0x00000220
-
-#define REG_DSI_7nm_PHY_PLL_TDC_OFFSET 0x00000224
-
-#define REG_DSI_7nm_PHY_PLL_PS3_PWRDOWN_CONTROLS 0x00000228
-
-#define REG_DSI_7nm_PHY_PLL_PS4_PWRDOWN_CONTROLS 0x0000022c
-
-#define REG_DSI_7nm_PHY_PLL_PLL_RST_CONTROLS 0x00000230
-
-#define REG_DSI_7nm_PHY_PLL_GEAR_BAND_SELECT_CONTROLS 0x00000234
-
-#define REG_DSI_7nm_PHY_PLL_PSM_CLK_CONTROLS 0x00000238
-
-#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES_2 0x0000023c
-
-#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1 0x00000240
-
-#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_2 0x00000244
-
-#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1 0x00000248
-
-#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_2 0x0000024c
-
-#define REG_DSI_7nm_PHY_PLL_CMODE_1 0x00000250
-
-#define REG_DSI_7nm_PHY_PLL_CMODE_2 0x00000254
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1 0x00000258
-
-#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_2 0x0000025c
+#define REG_DSI_CPHY_MODE_CTRL 0x000002d4
-#define REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE 0x00000260
#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 8a10e4343281..ed504fe5074f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -102,6 +102,7 @@ struct msm_dsi_host {
int id;
void __iomem *ctrl_base;
+ phys_addr_t ctrl_size;
struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
struct clk *bus_clks[DSI_BUS_CLK_MAX];
@@ -113,8 +114,6 @@ struct msm_dsi_host {
struct clk *pixel_clk_src;
struct clk *byte_intf_clk;
- struct opp_table *opp_table;
-
u32 byte_clk_rate;
u32 pixel_clk_rate;
u32 esc_clk_rate;
@@ -1092,7 +1091,7 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
uint64_t iova;
u8 *data;
- data = msm_gem_kernel_new(dev, size, MSM_BO_UNCACHED,
+ data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
priv->kms->aspace,
&msm_host->tx_gem_obj, &iova);
@@ -1839,7 +1838,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
- msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
+ msm_host->ctrl_base = msm_ioremap_size(pdev, "dsi_ctrl", "DSI CTRL", &msm_host->ctrl_size);
if (IS_ERR(msm_host->ctrl_base)) {
pr_err("%s: unable to map Dsi ctrl base\n", __func__);
ret = PTR_ERR(msm_host->ctrl_base);
@@ -1884,14 +1883,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
- msm_host->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "byte");
- if (IS_ERR(msm_host->opp_table))
- return PTR_ERR(msm_host->opp_table);
+ ret = devm_pm_opp_set_clkname(&pdev->dev, "byte");
+ if (ret)
+ return ret;
/* OPP table is optional */
- ret = dev_pm_opp_of_add_table(&pdev->dev);
+ ret = devm_pm_opp_of_add_table(&pdev->dev);
if (ret && ret != -ENODEV) {
dev_err(&pdev->dev, "invalid OPP table in device tree\n");
- dev_pm_opp_put_clkname(msm_host->opp_table);
return ret;
}
@@ -1930,8 +1928,6 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
mutex_destroy(&msm_host->cmd_mutex);
mutex_destroy(&msm_host->dev_mutex);
- dev_pm_opp_of_remove_table(&msm_host->pdev->dev);
- dev_pm_opp_put_clkname(msm_host->opp_table);
pm_runtime_disable(&msm_host->pdev->dev);
}
@@ -2487,3 +2483,15 @@ struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
return of_drm_find_bridge(msm_host->device_node);
}
+
+void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ pm_runtime_get_sync(&msm_host->pdev->dev);
+
+ msm_disp_snapshot_add_block(disp_state, msm_host->ctrl_size,
+ msm_host->ctrl_base, "dsi%d_ctrl", msm_host->id);
+
+ pm_runtime_put_sync(&msm_host->pdev->dev);
+}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index cd016576e8c5..4ebfedc4a9ac 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -373,14 +373,14 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (!msm_dsi_device_connected(msm_dsi))
return;
- ret = dsi_mgr_phy_enable(id, phy_shared_timings);
- if (ret)
- goto phy_en_fail;
-
/* Do nothing with the host if it is slave-DSI in case of dual DSI */
if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
return;
+ ret = dsi_mgr_phy_enable(id, phy_shared_timings);
+ if (ret)
+ goto phy_en_fail;
+
ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_dual_dsi);
if (ret) {
pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
@@ -817,8 +817,8 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
ret = dsi_mgr_setup_components(id);
if (ret) {
- pr_err("%s: failed to register mipi dsi host for DSI %d\n",
- __func__, id);
+ pr_err("%s: failed to register mipi dsi host for DSI %d: %d\n",
+ __func__, id, ret);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h
new file mode 100644
index 000000000000..8872d77027dd
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h
@@ -0,0 +1,228 @@
+#ifndef DSI_PHY_10NM_XML
+#define DSI_PHY_10NM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID0 0x00000000
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID1 0x00000004
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID2 0x00000008
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID3 0x0000000c
+
+#define REG_DSI_10nm_PHY_CMN_CLK_CFG0 0x00000010
+
+#define REG_DSI_10nm_PHY_CMN_CLK_CFG1 0x00000014
+
+#define REG_DSI_10nm_PHY_CMN_GLBL_CTRL 0x00000018
+
+#define REG_DSI_10nm_PHY_CMN_RBUF_CTRL 0x0000001c
+
+#define REG_DSI_10nm_PHY_CMN_VREG_CTRL 0x00000020
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_0 0x00000024
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_1 0x00000028
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_2 0x0000002c
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CFG0 0x00000030
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CFG1 0x00000034
+
+#define REG_DSI_10nm_PHY_CMN_PLL_CNTRL 0x00000038
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL0 0x00000098
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL1 0x0000009c
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL2 0x000000a0
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL3 0x000000a4
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL4 0x000000a8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0 0x000000ac
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1 0x000000b0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2 0x000000b4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3 0x000000b8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4 0x000000bc
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5 0x000000c0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6 0x000000c4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7 0x000000c8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8 0x000000cc
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9 0x000000d0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10 0x000000d4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11 0x000000d8
+
+#define REG_DSI_10nm_PHY_CMN_PHY_STATUS 0x000000ec
+
+#define REG_DSI_10nm_PHY_CMN_LANE_STATUS0 0x000000f4
+
+#define REG_DSI_10nm_PHY_CMN_LANE_STATUS1 0x000000f8
+
+static inline uint32_t REG_DSI_10nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(uint32_t i0) { return 0x0000001c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(uint32_t i0) { return 0x00000020 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(uint32_t i0) { return 0x00000024 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000028 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000002c + 0x80*i0; }
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010
+
+#define REG_DSI_10nm_PHY_PLL_DSM_DIVIDER 0x0000001c
+
+#define REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000020
+
+#define REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES 0x00000024
+
+#define REG_DSI_10nm_PHY_PLL_CMODE 0x0000002c
+
+#define REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000030
+
+#define REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000054
+
+#define REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000064
+
+#define REG_DSI_10nm_PHY_PLL_PFILT 0x0000007c
+
+#define REG_DSI_10nm_PHY_PLL_IFILT 0x00000080
+
+#define REG_DSI_10nm_PHY_PLL_OUTDIV 0x00000094
+
+#define REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE 0x000000a4
+
+#define REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000a8
+
+#define REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000b4
+
+#define REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000cc
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000d0
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000d4
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000d8
+
+#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x0000010c
+
+#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000110
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000114
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x00000118
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1 0x0000011c
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1 0x00000120
+
+#define REG_DSI_10nm_PHY_PLL_SSC_CONTROL 0x0000013c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000140
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000144
+
+#define REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x0000014c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1 0x00000154
+
+#define REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x0000015c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000164
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000180
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY 0x00000184
+
+#define REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS 0x0000018c
+
+#define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE 0x000001a0
+
+
+#endif /* DSI_PHY_10NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h
new file mode 100644
index 000000000000..0d9a5ccfb808
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h
@@ -0,0 +1,310 @@
+#ifndef DSI_PHY_14NM_XML
+#define DSI_PHY_14NM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID0 0x00000000
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID1 0x00000004
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID2 0x00000008
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID3 0x0000000c
+
+#define REG_DSI_14nm_PHY_CMN_CLK_CFG0 0x00000010
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK 0x000000f0
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT 4
+static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK;
+}
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK 0x000000f0
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT 4
+static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK;
+}
+
+#define REG_DSI_14nm_PHY_CMN_CLK_CFG1 0x00000014
+#define DSI_14nm_PHY_CMN_CLK_CFG1_DSICLK_SEL 0x00000001
+
+#define REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL 0x00000018
+#define DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000004
+
+#define REG_DSI_14nm_PHY_CMN_CTRL_0 0x0000001c
+
+#define REG_DSI_14nm_PHY_CMN_CTRL_1 0x00000020
+
+#define REG_DSI_14nm_PHY_CMN_HW_TRIGGER 0x00000024
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG0 0x00000028
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG1 0x0000002c
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG2 0x00000030
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG0 0x00000034
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG1 0x00000038
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG2 0x0000003c
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG3 0x00000040
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG4 0x00000044
+
+#define REG_DSI_14nm_PHY_CMN_PLL_CNTRL 0x00000048
+#define DSI_14nm_PHY_CMN_PLL_CNTRL_PLL_START 0x00000001
+
+#define REG_DSI_14nm_PHY_CMN_LDO_CNTRL 0x0000004c
+#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK 0x0000003f
+#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT) & DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK 0x000000c0
+#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT 6
+static inline uint32_t DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT) & DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN 0x00000001
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_STR(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(uint32_t i0) { return 0x0000001c + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(uint32_t i0) { return 0x00000020 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(uint32_t i0) { return 0x00000024 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(uint32_t i0) { return 0x00000028 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(uint32_t i0) { return 0x0000002c + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK 0x00000007
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT 4
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(uint32_t i0) { return 0x00000030 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK 0x00000007
+#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(uint32_t i0) { return 0x00000034 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(uint32_t i0) { return 0x00000038 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(uint32_t i0) { return 0x0000003c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00000064 + 0x80*i0; }
+
+#define REG_DSI_14nm_PHY_PLL_IE_TRIM 0x00000000
+
+#define REG_DSI_14nm_PHY_PLL_IP_TRIM 0x00000004
+
+#define REG_DSI_14nm_PHY_PLL_IPTAT_TRIM 0x00000010
+
+#define REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN 0x0000001c
+
+#define REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET 0x00000028
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL 0x0000002c
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2 0x00000030
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL3 0x00000034
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL4 0x00000038
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5 0x0000003c
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1 0x00000040
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2 0x00000044
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT1 0x00000048
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT2 0x0000004c
+
+#define REG_DSI_14nm_PHY_PLL_VREF_CFG1 0x0000005c
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_CODE 0x00000058
+
+#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1 0x0000006c
+
+#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2 0x00000070
+
+#define REG_DSI_14nm_PHY_PLL_VCO_COUNT1 0x00000074
+
+#define REG_DSI_14nm_PHY_PLL_VCO_COUNT2 0x00000078
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1 0x0000007c
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2 0x00000080
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3 0x00000084
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN 0x00000088
+
+#define REG_DSI_14nm_PHY_PLL_PLL_VCO_TUNE 0x0000008c
+
+#define REG_DSI_14nm_PHY_PLL_DEC_START 0x00000090
+
+#define REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER 0x00000094
+
+#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1 0x00000098
+
+#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2 0x0000009c
+
+#define REG_DSI_14nm_PHY_PLL_SSC_PER1 0x000000a0
+
+#define REG_DSI_14nm_PHY_PLL_SSC_PER2 0x000000a4
+
+#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1 0x000000a8
+
+#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2 0x000000ac
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1 0x000000b4
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2 0x000000b8
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3 0x000000bc
+
+#define REG_DSI_14nm_PHY_PLL_TXCLK_EN 0x000000c0
+
+#define REG_DSI_14nm_PHY_PLL_PLL_CRCTRL 0x000000c4
+
+#define REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS 0x000000cc
+
+#define REG_DSI_14nm_PHY_PLL_PLL_MISC1 0x000000e8
+
+#define REG_DSI_14nm_PHY_PLL_CP_SET_CUR 0x000000f0
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICPMSET 0x000000f4
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICPCSET 0x000000f8
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICP_SET 0x000000fc
+
+#define REG_DSI_14nm_PHY_PLL_PLL_LPF1 0x00000100
+
+#define REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV 0x00000104
+
+#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108
+
+
+#endif /* DSI_PHY_14NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h
new file mode 100644
index 000000000000..15c7a5852c7a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h
@@ -0,0 +1,238 @@
+#ifndef DSI_PHY_20NM_XML
+#define DSI_PHY_20NM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+static inline uint32_t REG_DSI_20nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_0 0x00000100
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_1 0x00000104
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_2 0x00000108
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_3 0x0000010c
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_4 0x00000110
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_DATAPATH 0x00000114
+
+#define REG_DSI_20nm_PHY_LNCK_DEBUG_SEL 0x00000118
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR0 0x0000011c
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR1 0x00000120
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_0 0x00000140
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_1 0x00000144
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_2 0x00000148
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_3 0x0000014c
+#define DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_4 0x00000150
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_5 0x00000154
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_6 0x00000158
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_7 0x0000015c
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_8 0x00000160
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_9 0x00000164
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_10 0x00000168
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_11 0x0000016c
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_20nm_PHY_CTRL_0 0x00000170
+
+#define REG_DSI_20nm_PHY_CTRL_1 0x00000174
+
+#define REG_DSI_20nm_PHY_CTRL_2 0x00000178
+
+#define REG_DSI_20nm_PHY_CTRL_3 0x0000017c
+
+#define REG_DSI_20nm_PHY_CTRL_4 0x00000180
+
+#define REG_DSI_20nm_PHY_STRENGTH_0 0x00000184
+
+#define REG_DSI_20nm_PHY_STRENGTH_1 0x00000188
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_0 0x000001b4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_1 0x000001b8
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_2 0x000001bc
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_3 0x000001c0
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_4 0x000001c4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_5 0x000001c8
+
+#define REG_DSI_20nm_PHY_GLBL_TEST_CTRL 0x000001d4
+#define DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001
+
+#define REG_DSI_20nm_PHY_LDO_CNTRL 0x000001dc
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_0 0x00000000
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_1 0x00000004
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_2 0x00000008
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_3 0x0000000c
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_4 0x00000010
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_5 0x00000014
+
+#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
+
+
+#endif /* DSI_PHY_20NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h
new file mode 100644
index 000000000000..5148f4d5b182
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h
@@ -0,0 +1,385 @@
+#ifndef DSI_PHY_28NM_XML
+#define DSI_PHY_28NM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+static inline uint32_t REG_DSI_28nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_0 0x00000100
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_1 0x00000104
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_2 0x00000108
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_3 0x0000010c
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_4 0x00000110
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_DATAPATH 0x00000114
+
+#define REG_DSI_28nm_PHY_LNCK_DEBUG_SEL 0x00000118
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_STR0 0x0000011c
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_STR1 0x00000120
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_0 0x00000140
+#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_1 0x00000144
+#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_2 0x00000148
+#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_3 0x0000014c
+#define DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_4 0x00000150
+#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_5 0x00000154
+#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_6 0x00000158
+#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_7 0x0000015c
+#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_8 0x00000160
+#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_9 0x00000164
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_10 0x00000168
+#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
+#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_11 0x0000016c
+#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_28nm_PHY_CTRL_0 0x00000170
+
+#define REG_DSI_28nm_PHY_CTRL_1 0x00000174
+
+#define REG_DSI_28nm_PHY_CTRL_2 0x00000178
+
+#define REG_DSI_28nm_PHY_CTRL_3 0x0000017c
+
+#define REG_DSI_28nm_PHY_CTRL_4 0x00000180
+
+#define REG_DSI_28nm_PHY_STRENGTH_0 0x00000184
+
+#define REG_DSI_28nm_PHY_STRENGTH_1 0x00000188
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_0 0x000001b4
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_1 0x000001b8
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_2 0x000001bc
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_3 0x000001c0
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_4 0x000001c4
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8
+
+#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4
+#define DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001
+
+#define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_0 0x00000000
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_1 0x00000004
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_2 0x00000008
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_3 0x0000000c
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_4 0x00000010
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_5 0x00000014
+
+#define REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
+
+#define REG_DSI_28nm_PHY_PLL_REFCLK_CFG 0x00000000
+#define DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR 0x00000001
+
+#define REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004
+
+#define REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008
+
+#define REG_DSI_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c
+
+#define REG_DSI_28nm_PHY_PLL_VREG_CFG 0x00000010
+#define DSI_28nm_PHY_PLL_VREG_CFG_POSTDIV1_BYPASS_B 0x00000002
+
+#define REG_DSI_28nm_PHY_PLL_PWRGEN_CFG 0x00000014
+
+#define REG_DSI_28nm_PHY_PLL_DMUX_CFG 0x00000018
+
+#define REG_DSI_28nm_PHY_PLL_AMUX_CFG 0x0000001c
+
+#define REG_DSI_28nm_PHY_PLL_GLB_CFG 0x00000020
+#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001
+#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002
+#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004
+#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008
+
+#define REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024
+
+#define REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028
+
+#define REG_DSI_28nm_PHY_PLL_LPFR_CFG 0x0000002c
+
+#define REG_DSI_28nm_PHY_PLL_LPFC1_CFG 0x00000030
+
+#define REG_DSI_28nm_PHY_PLL_LPFC2_CFG 0x00000034
+
+#define REG_DSI_28nm_PHY_PLL_SDM_CFG0 0x00000038
+#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK 0x0000003f
+#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK;
+}
+#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP 0x00000040
+
+#define REG_DSI_28nm_PHY_PLL_SDM_CFG1 0x0000003c
+#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK 0x0000003f
+#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
+}
+#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK 0x00000040
+#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT 6
+static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK;
+}
+
+#define REG_DSI_28nm_PHY_PLL_SDM_CFG2 0x00000040
+#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK 0x000000ff
+#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK;
+}
+
+#define REG_DSI_28nm_PHY_PLL_SDM_CFG3 0x00000044
+#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK 0x000000ff
+#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK;
+}
+
+#define REG_DSI_28nm_PHY_PLL_SDM_CFG4 0x00000048
+
+#define REG_DSI_28nm_PHY_PLL_SSC_CFG0 0x0000004c
+
+#define REG_DSI_28nm_PHY_PLL_SSC_CFG1 0x00000050
+
+#define REG_DSI_28nm_PHY_PLL_SSC_CFG2 0x00000054
+
+#define REG_DSI_28nm_PHY_PLL_SSC_CFG3 0x00000058
+
+#define REG_DSI_28nm_PHY_PLL_LKDET_CFG0 0x0000005c
+
+#define REG_DSI_28nm_PHY_PLL_LKDET_CFG1 0x00000060
+
+#define REG_DSI_28nm_PHY_PLL_LKDET_CFG2 0x00000064
+
+#define REG_DSI_28nm_PHY_PLL_TEST_CFG 0x00000068
+#define DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG0 0x0000006c
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG1 0x00000070
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG2 0x00000074
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG3 0x00000078
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG4 0x0000007c
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG5 0x00000080
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG6 0x00000084
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG7 0x00000088
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG8 0x0000008c
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG9 0x00000090
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG10 0x00000094
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG11 0x00000098
+
+#define REG_DSI_28nm_PHY_PLL_EFUSE_CFG 0x0000009c
+
+#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_42 0x000000a4
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_43 0x000000a8
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_44 0x000000ac
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_45 0x000000b0
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_46 0x000000b4
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_47 0x000000b8
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_48 0x000000bc
+
+#define REG_DSI_28nm_PHY_PLL_STATUS 0x000000c0
+#define DSI_28nm_PHY_PLL_STATUS_PLL_RDY 0x00000001
+
+#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS0 0x000000c4
+
+#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS1 0x000000c8
+
+#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS2 0x000000cc
+
+#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS3 0x000000d0
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_54 0x000000d4
+
+
+#endif /* DSI_PHY_28NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h
new file mode 100644
index 000000000000..446fe9ffc9c4
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h
@@ -0,0 +1,287 @@
+#ifndef DSI_PHY_28NM_8960_XML
+#define DSI_PHY_28NM_8960_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+#define REG_DSI_28nm_8960_PHY_LNCK_CFG_0 0x00000100
+
+#define REG_DSI_28nm_8960_PHY_LNCK_CFG_1 0x00000104
+
+#define REG_DSI_28nm_8960_PHY_LNCK_CFG_2 0x00000108
+
+#define REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH 0x0000010c
+
+#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0 0x00000114
+
+#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1 0x00000118
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_0 0x00000140
+#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_1 0x00000144
+#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_2 0x00000148
+#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_3 0x0000014c
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_4 0x00000150
+#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_5 0x00000154
+#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_6 0x00000158
+#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_7 0x0000015c
+#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_8 0x00000160
+#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_9 0x00000164
+#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
+#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
+#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_10 0x00000168
+#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
+#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_11 0x0000016c
+#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_CTRL_0 0x00000170
+
+#define REG_DSI_28nm_8960_PHY_CTRL_1 0x00000174
+
+#define REG_DSI_28nm_8960_PHY_CTRL_2 0x00000178
+
+#define REG_DSI_28nm_8960_PHY_CTRL_3 0x0000017c
+
+#define REG_DSI_28nm_8960_PHY_STRENGTH_0 0x00000180
+
+#define REG_DSI_28nm_8960_PHY_STRENGTH_1 0x00000184
+
+#define REG_DSI_28nm_8960_PHY_STRENGTH_2 0x00000188
+
+#define REG_DSI_28nm_8960_PHY_BIST_CTRL_0 0x0000018c
+
+#define REG_DSI_28nm_8960_PHY_BIST_CTRL_1 0x00000190
+
+#define REG_DSI_28nm_8960_PHY_BIST_CTRL_2 0x00000194
+
+#define REG_DSI_28nm_8960_PHY_BIST_CTRL_3 0x00000198
+
+#define REG_DSI_28nm_8960_PHY_BIST_CTRL_4 0x0000019c
+
+#define REG_DSI_28nm_8960_PHY_LDO_CTRL 0x000001b0
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0 0x00000000
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1 0x00000004
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2 0x00000008
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3 0x0000000c
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4 0x00000010
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_5 0x00000014
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG 0x00000018
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER 0x00000028
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_0 0x0000002c
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_1 0x00000030
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2 0x00000034
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0 0x00000038
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1 0x0000003c
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_2 0x00000040
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3 0x00000044
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4 0x00000048
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS 0x00000050
+#define DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY 0x00000010
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_0 0x00000000
+#define DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE 0x00000001
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_1 0x00000004
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_2 0x00000008
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_3 0x0000000c
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_4 0x00000010
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_5 0x00000014
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_6 0x00000018
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_7 0x0000001c
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_8 0x00000020
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_9 0x00000024
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_10 0x00000028
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_11 0x0000002c
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_12 0x00000030
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_13 0x00000034
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_14 0x00000038
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_15 0x0000003c
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_16 0x00000040
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_17 0x00000044
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_18 0x00000048
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_19 0x0000004c
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_20 0x00000050
+
+#define REG_DSI_28nm_8960_PHY_PLL_RDY 0x00000080
+#define DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY 0x00000001
+
+
+#endif /* DSI_PHY_28NM_8960_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_5nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_5nm.xml.h
new file mode 100644
index 000000000000..404c890a29f1
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy_5nm.xml.h
@@ -0,0 +1,480 @@
+#ifndef DSI_PHY_5NM_XML
+#define DSI_PHY_5NM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define REG_DSI_5nm_PHY_CMN_REVISION_ID0 0x00000000
+
+#define REG_DSI_5nm_PHY_CMN_REVISION_ID1 0x00000004
+
+#define REG_DSI_5nm_PHY_CMN_REVISION_ID2 0x00000008
+
+#define REG_DSI_5nm_PHY_CMN_REVISION_ID3 0x0000000c
+
+#define REG_DSI_5nm_PHY_CMN_CLK_CFG0 0x00000010
+
+#define REG_DSI_5nm_PHY_CMN_CLK_CFG1 0x00000014
+
+#define REG_DSI_5nm_PHY_CMN_GLBL_CTRL 0x00000018
+
+#define REG_DSI_5nm_PHY_CMN_RBUF_CTRL 0x0000001c
+
+#define REG_DSI_5nm_PHY_CMN_VREG_CTRL_0 0x00000020
+
+#define REG_DSI_5nm_PHY_CMN_CTRL_0 0x00000024
+
+#define REG_DSI_5nm_PHY_CMN_CTRL_1 0x00000028
+
+#define REG_DSI_5nm_PHY_CMN_CTRL_2 0x0000002c
+
+#define REG_DSI_5nm_PHY_CMN_CTRL_3 0x00000030
+
+#define REG_DSI_5nm_PHY_CMN_LANE_CFG0 0x00000034
+
+#define REG_DSI_5nm_PHY_CMN_LANE_CFG1 0x00000038
+
+#define REG_DSI_5nm_PHY_CMN_PLL_CNTRL 0x0000003c
+
+#define REG_DSI_5nm_PHY_CMN_DPHY_SOT 0x00000040
+
+#define REG_DSI_5nm_PHY_CMN_LANE_CTRL0 0x000000a0
+
+#define REG_DSI_5nm_PHY_CMN_LANE_CTRL1 0x000000a4
+
+#define REG_DSI_5nm_PHY_CMN_LANE_CTRL2 0x000000a8
+
+#define REG_DSI_5nm_PHY_CMN_LANE_CTRL3 0x000000ac
+
+#define REG_DSI_5nm_PHY_CMN_LANE_CTRL4 0x000000b0
+
+#define REG_DSI_5nm_PHY_CMN_TIMING_CTRL_0 0x000000b4
+
+#define REG_DSI_5nm_PHY_CMN_TIMING_CTRL_1 0x000000b8
+
+#define REG_DSI_5nm_PHY_CMN_TIMING_CTRL_2 0x000000bc
+
+#define REG_DSI_5nm_PHY_CMN_TIMING_CTRL_3 0x000000c0
+
+#define REG_DSI_5nm_PHY_CMN_TIMING_CTRL_4 0x000000c4
+
+#define REG_DSI_5nm_PHY_CMN_TIMING_CTRL_5 0x000000c8
+
+#define REG_DSI_5nm_PHY_CMN_TIMING_CTRL_6 0x000000cc
+
+#define REG_DSI_5nm_PHY_CMN_TIMING_CTRL_7 0x000000d0
+
+#define REG_DSI_5nm_PHY_CMN_TIMING_CTRL_8 0x000000d4
+
+#define REG_DSI_5nm_PHY_CMN_TIMING_CTRL_9 0x000000d8
+
+#define REG_DSI_5nm_PHY_CMN_TIMING_CTRL_10 0x000000dc
+
+#define REG_DSI_5nm_PHY_CMN_TIMING_CTRL_11 0x000000e0
+
+#define REG_DSI_5nm_PHY_CMN_TIMING_CTRL_12 0x000000e4
+
+#define REG_DSI_5nm_PHY_CMN_TIMING_CTRL_13 0x000000e8
+
+#define REG_DSI_5nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0 0x000000ec
+
+#define REG_DSI_5nm_PHY_CMN_GLBL_HSTX_STR_CTRL_1 0x000000f0
+
+#define REG_DSI_5nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL 0x000000f4
+
+#define REG_DSI_5nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL 0x000000f8
+
+#define REG_DSI_5nm_PHY_CMN_GLBL_RESCODE_OFFSET_MID_CTRL 0x000000fc
+
+#define REG_DSI_5nm_PHY_CMN_GLBL_LPTX_STR_CTRL 0x00000100
+
+#define REG_DSI_5nm_PHY_CMN_GLBL_PEMPH_CTRL_0 0x00000104
+
+#define REG_DSI_5nm_PHY_CMN_GLBL_PEMPH_CTRL_1 0x00000108
+
+#define REG_DSI_5nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL 0x0000010c
+
+#define REG_DSI_5nm_PHY_CMN_VREG_CTRL_1 0x00000110
+
+#define REG_DSI_5nm_PHY_CMN_CTRL_4 0x00000114
+
+#define REG_DSI_5nm_PHY_CMN_PHY_STATUS 0x00000140
+
+#define REG_DSI_5nm_PHY_CMN_LANE_STATUS0 0x00000148
+
+#define REG_DSI_5nm_PHY_CMN_LANE_STATUS1 0x0000014c
+
+static inline uint32_t REG_DSI_5nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_5nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_5nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_5nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_5nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_5nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_5nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_5nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+
+#define REG_DSI_5nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000
+
+#define REG_DSI_5nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004
+
+#define REG_DSI_5nm_PHY_PLL_INT_LOOP_SETTINGS 0x00000008
+
+#define REG_DSI_5nm_PHY_PLL_INT_LOOP_SETTINGS_TWO 0x0000000c
+
+#define REG_DSI_5nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010
+
+#define REG_DSI_5nm_PHY_PLL_ANALOG_CONTROLS_FOUR 0x00000014
+
+#define REG_DSI_5nm_PHY_PLL_ANALOG_CONTROLS_FIVE 0x00000018
+
+#define REG_DSI_5nm_PHY_PLL_INT_LOOP_CONTROLS 0x0000001c
+
+#define REG_DSI_5nm_PHY_PLL_DSM_DIVIDER 0x00000020
+
+#define REG_DSI_5nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000024
+
+#define REG_DSI_5nm_PHY_PLL_SYSTEM_MUXES 0x00000028
+
+#define REG_DSI_5nm_PHY_PLL_FREQ_UPDATE_CONTROL_OVERRIDES 0x0000002c
+
+#define REG_DSI_5nm_PHY_PLL_CMODE 0x00000030
+
+#define REG_DSI_5nm_PHY_PLL_PSM_CTRL 0x00000034
+
+#define REG_DSI_5nm_PHY_PLL_RSM_CTRL 0x00000038
+
+#define REG_DSI_5nm_PHY_PLL_VCO_TUNE_MAP 0x0000003c
+
+#define REG_DSI_5nm_PHY_PLL_PLL_CNTRL 0x00000040
+
+#define REG_DSI_5nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000044
+
+#define REG_DSI_5nm_PHY_PLL_BAND_SEL_CAL_TIMER_LOW 0x00000048
+
+#define REG_DSI_5nm_PHY_PLL_BAND_SEL_CAL_TIMER_HIGH 0x0000004c
+
+#define REG_DSI_5nm_PHY_PLL_BAND_SEL_CAL_SETTINGS 0x00000050
+
+#define REG_DSI_5nm_PHY_PLL_BAND_SEL_MIN 0x00000054
+
+#define REG_DSI_5nm_PHY_PLL_BAND_SEL_MAX 0x00000058
+
+#define REG_DSI_5nm_PHY_PLL_BAND_SEL_PFILT 0x0000005c
+
+#define REG_DSI_5nm_PHY_PLL_BAND_SEL_IFILT 0x00000060
+
+#define REG_DSI_5nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_TWO 0x00000064
+
+#define REG_DSI_5nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000068
+
+#define REG_DSI_5nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_FOUR 0x0000006c
+
+#define REG_DSI_5nm_PHY_PLL_BAND_SEL_ICODE_HIGH 0x00000070
+
+#define REG_DSI_5nm_PHY_PLL_BAND_SEL_ICODE_LOW 0x00000074
+
+#define REG_DSI_5nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000078
+
+#define REG_DSI_5nm_PHY_PLL_FREQ_DETECT_THRESH 0x0000007c
+
+#define REG_DSI_5nm_PHY_PLL_FREQ_DET_REFCLK_HIGH 0x00000080
+
+#define REG_DSI_5nm_PHY_PLL_FREQ_DET_REFCLK_LOW 0x00000084
+
+#define REG_DSI_5nm_PHY_PLL_FREQ_DET_PLLCLK_HIGH 0x00000088
+
+#define REG_DSI_5nm_PHY_PLL_FREQ_DET_PLLCLK_LOW 0x0000008c
+
+#define REG_DSI_5nm_PHY_PLL_PFILT 0x00000090
+
+#define REG_DSI_5nm_PHY_PLL_IFILT 0x00000094
+
+#define REG_DSI_5nm_PHY_PLL_PLL_GAIN 0x00000098
+
+#define REG_DSI_5nm_PHY_PLL_ICODE_LOW 0x0000009c
+
+#define REG_DSI_5nm_PHY_PLL_ICODE_HIGH 0x000000a0
+
+#define REG_DSI_5nm_PHY_PLL_LOCKDET 0x000000a4
+
+#define REG_DSI_5nm_PHY_PLL_OUTDIV 0x000000a8
+
+#define REG_DSI_5nm_PHY_PLL_FASTLOCK_CONTROL 0x000000ac
+
+#define REG_DSI_5nm_PHY_PLL_PASS_OUT_OVERRIDE_ONE 0x000000b0
+
+#define REG_DSI_5nm_PHY_PLL_PASS_OUT_OVERRIDE_TWO 0x000000b4
+
+#define REG_DSI_5nm_PHY_PLL_CORE_OVERRIDE 0x000000b8
+
+#define REG_DSI_5nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000bc
+
+#define REG_DSI_5nm_PHY_PLL_RATE_CHANGE 0x000000c0
+
+#define REG_DSI_5nm_PHY_PLL_PLL_DIGITAL_TIMERS 0x000000c4
+
+#define REG_DSI_5nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000c8
+
+#define REG_DSI_5nm_PHY_PLL_DECIMAL_DIV_START 0x000000cc
+
+#define REG_DSI_5nm_PHY_PLL_FRAC_DIV_START_LOW 0x000000d0
+
+#define REG_DSI_5nm_PHY_PLL_FRAC_DIV_START_MID 0x000000d4
+
+#define REG_DSI_5nm_PHY_PLL_FRAC_DIV_START_HIGH 0x000000d8
+
+#define REG_DSI_5nm_PHY_PLL_DEC_FRAC_MUXES 0x000000dc
+
+#define REG_DSI_5nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000e0
+
+#define REG_DSI_5nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000e4
+
+#define REG_DSI_5nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000e8
+
+#define REG_DSI_5nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000ec
+
+#define REG_DSI_5nm_PHY_PLL_DECIMAL_DIV_START_2 0x000000f0
+
+#define REG_DSI_5nm_PHY_PLL_FRAC_DIV_START_LOW_2 0x000000f4
+
+#define REG_DSI_5nm_PHY_PLL_FRAC_DIV_START_MID_2 0x000000f8
+
+#define REG_DSI_5nm_PHY_PLL_FRAC_DIV_START_HIGH_2 0x000000fc
+
+#define REG_DSI_5nm_PHY_PLL_MASH_CONTROL 0x00000100
+
+#define REG_DSI_5nm_PHY_PLL_SSC_STEPSIZE_LOW 0x00000104
+
+#define REG_DSI_5nm_PHY_PLL_SSC_STEPSIZE_HIGH 0x00000108
+
+#define REG_DSI_5nm_PHY_PLL_SSC_DIV_PER_LOW 0x0000010c
+
+#define REG_DSI_5nm_PHY_PLL_SSC_DIV_PER_HIGH 0x00000110
+
+#define REG_DSI_5nm_PHY_PLL_SSC_ADJPER_LOW 0x00000114
+
+#define REG_DSI_5nm_PHY_PLL_SSC_ADJPER_HIGH 0x00000118
+
+#define REG_DSI_5nm_PHY_PLL_SSC_MUX_CONTROL 0x0000011c
+
+#define REG_DSI_5nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x00000120
+
+#define REG_DSI_5nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000124
+
+#define REG_DSI_5nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000128
+
+#define REG_DSI_5nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x0000012c
+
+#define REG_DSI_5nm_PHY_PLL_SSC_ADJPER_LOW_1 0x00000130
+
+#define REG_DSI_5nm_PHY_PLL_SSC_ADJPER_HIGH_1 0x00000134
+
+#define REG_DSI_5nm_PHY_PLL_SSC_STEPSIZE_LOW_2 0x00000138
+
+#define REG_DSI_5nm_PHY_PLL_SSC_STEPSIZE_HIGH_2 0x0000013c
+
+#define REG_DSI_5nm_PHY_PLL_SSC_DIV_PER_LOW_2 0x00000140
+
+#define REG_DSI_5nm_PHY_PLL_SSC_DIV_PER_HIGH_2 0x00000144
+
+#define REG_DSI_5nm_PHY_PLL_SSC_ADJPER_LOW_2 0x00000148
+
+#define REG_DSI_5nm_PHY_PLL_SSC_ADJPER_HIGH_2 0x0000014c
+
+#define REG_DSI_5nm_PHY_PLL_SSC_CONTROL 0x00000150
+
+#define REG_DSI_5nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000154
+
+#define REG_DSI_5nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000158
+
+#define REG_DSI_5nm_PHY_PLL_PLL_LOCKDET_RATE_2 0x0000015c
+
+#define REG_DSI_5nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x00000160
+
+#define REG_DSI_5nm_PHY_PLL_PLL_PROP_GAIN_RATE_2 0x00000164
+
+#define REG_DSI_5nm_PHY_PLL_PLL_BAND_SEL_RATE_1 0x00000168
+
+#define REG_DSI_5nm_PHY_PLL_PLL_BAND_SEL_RATE_2 0x0000016c
+
+#define REG_DSI_5nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x00000170
+
+#define REG_DSI_5nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_2 0x00000174
+
+#define REG_DSI_5nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000178
+
+#define REG_DSI_5nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_2 0x0000017c
+
+#define REG_DSI_5nm_PHY_PLL_PLL_FASTLOCK_EN_BAND 0x00000180
+
+#define REG_DSI_5nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MID 0x00000184
+
+#define REG_DSI_5nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_HIGH 0x00000188
+
+#define REG_DSI_5nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MUX 0x0000018c
+
+#define REG_DSI_5nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000190
+
+#define REG_DSI_5nm_PHY_PLL_PLL_LOCK_DELAY 0x00000194
+
+#define REG_DSI_5nm_PHY_PLL_PLL_LOCK_MIN_DELAY 0x00000198
+
+#define REG_DSI_5nm_PHY_PLL_CLOCK_INVERTERS 0x0000019c
+
+#define REG_DSI_5nm_PHY_PLL_SPARE_AND_JPC_OVERRIDES 0x000001a0
+
+#define REG_DSI_5nm_PHY_PLL_BIAS_CONTROL_1 0x000001a4
+
+#define REG_DSI_5nm_PHY_PLL_BIAS_CONTROL_2 0x000001a8
+
+#define REG_DSI_5nm_PHY_PLL_ALOG_OBSV_BUS_CTRL_1 0x000001ac
+
+#define REG_DSI_5nm_PHY_PLL_COMMON_STATUS_ONE 0x000001b0
+
+#define REG_DSI_5nm_PHY_PLL_COMMON_STATUS_TWO 0x000001b4
+
+#define REG_DSI_5nm_PHY_PLL_BAND_SEL_CAL 0x000001b8
+
+#define REG_DSI_5nm_PHY_PLL_ICODE_ACCUM_STATUS_LOW 0x000001bc
+
+#define REG_DSI_5nm_PHY_PLL_ICODE_ACCUM_STATUS_HIGH 0x000001c0
+
+#define REG_DSI_5nm_PHY_PLL_FD_OUT_LOW 0x000001c4
+
+#define REG_DSI_5nm_PHY_PLL_FD_OUT_HIGH 0x000001c8
+
+#define REG_DSI_5nm_PHY_PLL_ALOG_OBSV_BUS_STATUS_1 0x000001cc
+
+#define REG_DSI_5nm_PHY_PLL_PLL_MISC_CONFIG 0x000001d0
+
+#define REG_DSI_5nm_PHY_PLL_FLL_CONFIG 0x000001d4
+
+#define REG_DSI_5nm_PHY_PLL_FLL_FREQ_ACQ_TIME 0x000001d8
+
+#define REG_DSI_5nm_PHY_PLL_FLL_CODE0 0x000001dc
+
+#define REG_DSI_5nm_PHY_PLL_FLL_CODE1 0x000001e0
+
+#define REG_DSI_5nm_PHY_PLL_FLL_GAIN0 0x000001e4
+
+#define REG_DSI_5nm_PHY_PLL_FLL_GAIN1 0x000001e8
+
+#define REG_DSI_5nm_PHY_PLL_SW_RESET 0x000001ec
+
+#define REG_DSI_5nm_PHY_PLL_FAST_PWRUP 0x000001f0
+
+#define REG_DSI_5nm_PHY_PLL_LOCKTIME0 0x000001f4
+
+#define REG_DSI_5nm_PHY_PLL_LOCKTIME1 0x000001f8
+
+#define REG_DSI_5nm_PHY_PLL_DEBUG_BUS_SEL 0x000001fc
+
+#define REG_DSI_5nm_PHY_PLL_DEBUG_BUS0 0x00000200
+
+#define REG_DSI_5nm_PHY_PLL_DEBUG_BUS1 0x00000204
+
+#define REG_DSI_5nm_PHY_PLL_DEBUG_BUS2 0x00000208
+
+#define REG_DSI_5nm_PHY_PLL_DEBUG_BUS3 0x0000020c
+
+#define REG_DSI_5nm_PHY_PLL_ANALOG_FLL_CONTROL_OVERRIDES 0x00000210
+
+#define REG_DSI_5nm_PHY_PLL_VCO_CONFIG 0x00000214
+
+#define REG_DSI_5nm_PHY_PLL_VCO_CAL_CODE1_MODE0_STATUS 0x00000218
+
+#define REG_DSI_5nm_PHY_PLL_VCO_CAL_CODE1_MODE1_STATUS 0x0000021c
+
+#define REG_DSI_5nm_PHY_PLL_RESET_SM_STATUS 0x00000220
+
+#define REG_DSI_5nm_PHY_PLL_TDC_OFFSET 0x00000224
+
+#define REG_DSI_5nm_PHY_PLL_PS3_PWRDOWN_CONTROLS 0x00000228
+
+#define REG_DSI_5nm_PHY_PLL_PS4_PWRDOWN_CONTROLS 0x0000022c
+
+#define REG_DSI_5nm_PHY_PLL_PLL_RST_CONTROLS 0x00000230
+
+#define REG_DSI_5nm_PHY_PLL_GEAR_BAND_SELECT_CONTROLS 0x00000234
+
+#define REG_DSI_5nm_PHY_PLL_PSM_CLK_CONTROLS 0x00000238
+
+#define REG_DSI_5nm_PHY_PLL_SYSTEM_MUXES_2 0x0000023c
+
+#define REG_DSI_5nm_PHY_PLL_VCO_CONFIG_1 0x00000240
+
+#define REG_DSI_5nm_PHY_PLL_VCO_CONFIG_2 0x00000244
+
+#define REG_DSI_5nm_PHY_PLL_CLOCK_INVERTERS_1 0x00000248
+
+#define REG_DSI_5nm_PHY_PLL_CLOCK_INVERTERS_2 0x0000024c
+
+#define REG_DSI_5nm_PHY_PLL_CMODE_1 0x00000250
+
+#define REG_DSI_5nm_PHY_PLL_CMODE_2 0x00000254
+
+#define REG_DSI_5nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1 0x00000258
+
+#define REG_DSI_5nm_PHY_PLL_ANALOG_CONTROLS_FIVE_2 0x0000025c
+
+#define REG_DSI_5nm_PHY_PLL_PERF_OPTIMIZE 0x00000260
+
+
+#endif /* DSI_PHY_5NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h
new file mode 100644
index 000000000000..782f6b332baf
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h
@@ -0,0 +1,482 @@
+#ifndef DSI_PHY_7NM_XML
+#define DSI_PHY_7NM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID0 0x00000000
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID1 0x00000004
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID2 0x00000008
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID3 0x0000000c
+
+#define REG_DSI_7nm_PHY_CMN_CLK_CFG0 0x00000010
+
+#define REG_DSI_7nm_PHY_CMN_CLK_CFG1 0x00000014
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_CTRL 0x00000018
+
+#define REG_DSI_7nm_PHY_CMN_RBUF_CTRL 0x0000001c
+
+#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_0 0x00000020
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_0 0x00000024
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_1 0x00000028
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_2 0x0000002c
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_3 0x00000030
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CFG0 0x00000034
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CFG1 0x00000038
+
+#define REG_DSI_7nm_PHY_CMN_PLL_CNTRL 0x0000003c
+
+#define REG_DSI_7nm_PHY_CMN_DPHY_SOT 0x00000040
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL0 0x000000a0
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL1 0x000000a4
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL2 0x000000a8
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL3 0x000000ac
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL4 0x000000b0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0 0x000000b4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1 0x000000b8
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2 0x000000bc
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3 0x000000c0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4 0x000000c4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5 0x000000c8
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6 0x000000cc
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7 0x000000d0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8 0x000000d4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9 0x000000d8
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10 0x000000dc
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11 0x000000e0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12 0x000000e4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13 0x000000e8
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0 0x000000ec
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_1 0x000000f0
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL 0x000000f4
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL 0x000000f8
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_MID_CTRL 0x000000fc
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL 0x00000100
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0 0x00000104
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1 0x00000108
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL 0x0000010c
+
+#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_1 0x00000110
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_4 0x00000114
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4 0x00000128
+
+#define REG_DSI_7nm_PHY_CMN_PHY_STATUS 0x00000140
+
+#define REG_DSI_7nm_PHY_CMN_LANE_STATUS0 0x00000148
+
+#define REG_DSI_7nm_PHY_CMN_LANE_STATUS1 0x0000014c
+
+static inline uint32_t REG_DSI_7nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004
+
+#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS 0x00000008
+
+#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS_TWO 0x0000000c
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FOUR 0x00000014
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE 0x00000018
+
+#define REG_DSI_7nm_PHY_PLL_INT_LOOP_CONTROLS 0x0000001c
+
+#define REG_DSI_7nm_PHY_PLL_DSM_DIVIDER 0x00000020
+
+#define REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000024
+
+#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES 0x00000028
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_UPDATE_CONTROL_OVERRIDES 0x0000002c
+
+#define REG_DSI_7nm_PHY_PLL_CMODE 0x00000030
+
+#define REG_DSI_7nm_PHY_PLL_PSM_CTRL 0x00000034
+
+#define REG_DSI_7nm_PHY_PLL_RSM_CTRL 0x00000038
+
+#define REG_DSI_7nm_PHY_PLL_VCO_TUNE_MAP 0x0000003c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_CNTRL 0x00000040
+
+#define REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000044
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_LOW 0x00000048
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_HIGH 0x0000004c
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS 0x00000050
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MIN 0x00000054
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MAX 0x00000058
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_PFILT 0x0000005c
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_IFILT 0x00000060
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_TWO 0x00000064
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000068
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_FOUR 0x0000006c
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_HIGH 0x00000070
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_LOW 0x00000074
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000078
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_THRESH 0x0000007c
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_HIGH 0x00000080
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_LOW 0x00000084
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_HIGH 0x00000088
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_LOW 0x0000008c
+
+#define REG_DSI_7nm_PHY_PLL_PFILT 0x00000090
+
+#define REG_DSI_7nm_PHY_PLL_IFILT 0x00000094
+
+#define REG_DSI_7nm_PHY_PLL_PLL_GAIN 0x00000098
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_LOW 0x0000009c
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_HIGH 0x000000a0
+
+#define REG_DSI_7nm_PHY_PLL_LOCKDET 0x000000a4
+
+#define REG_DSI_7nm_PHY_PLL_OUTDIV 0x000000a8
+
+#define REG_DSI_7nm_PHY_PLL_FASTLOCK_CONTROL 0x000000ac
+
+#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_ONE 0x000000b0
+
+#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_TWO 0x000000b4
+
+#define REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE 0x000000b8
+
+#define REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000bc
+
+#define REG_DSI_7nm_PHY_PLL_RATE_CHANGE 0x000000c0
+
+#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS 0x000000c4
+
+#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000c8
+
+#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START 0x000000cc
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW 0x000000d0
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID 0x000000d4
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH 0x000000d8
+
+#define REG_DSI_7nm_PHY_PLL_DEC_FRAC_MUXES 0x000000dc
+
+#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000e0
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000e4
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000e8
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000ec
+
+#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_2 0x000000f0
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_2 0x000000f4
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_2 0x000000f8
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_2 0x000000fc
+
+#define REG_DSI_7nm_PHY_PLL_MASH_CONTROL 0x00000100
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW 0x00000104
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH 0x00000108
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW 0x0000010c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH 0x00000110
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW 0x00000114
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH 0x00000118
+
+#define REG_DSI_7nm_PHY_PLL_SSC_MUX_CONTROL 0x0000011c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x00000120
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000124
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000128
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x0000012c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1 0x00000130
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1 0x00000134
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_2 0x00000138
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_2 0x0000013c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_2 0x00000140
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_2 0x00000144
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_2 0x00000148
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_2 0x0000014c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_CONTROL 0x00000150
+
+#define REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000154
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000158
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_2 0x0000015c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x00000160
+
+#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_2 0x00000164
+
+#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1 0x00000168
+
+#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_2 0x0000016c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x00000170
+
+#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_2 0x00000174
+
+#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000178
+
+#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_2 0x0000017c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_FASTLOCK_EN_BAND 0x00000180
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MID 0x00000184
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_HIGH 0x00000188
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MUX 0x0000018c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000190
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY 0x00000194
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_MIN_DELAY 0x00000198
+
+#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS 0x0000019c
+
+#define REG_DSI_7nm_PHY_PLL_SPARE_AND_JPC_OVERRIDES 0x000001a0
+
+#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_1 0x000001a4
+
+#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_2 0x000001a8
+
+#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_CTRL_1 0x000001ac
+
+#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE 0x000001b0
+
+#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_TWO 0x000001b4
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL 0x000001b8
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_LOW 0x000001bc
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_HIGH 0x000001c0
+
+#define REG_DSI_7nm_PHY_PLL_FD_OUT_LOW 0x000001c4
+
+#define REG_DSI_7nm_PHY_PLL_FD_OUT_HIGH 0x000001c8
+
+#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_STATUS_1 0x000001cc
+
+#define REG_DSI_7nm_PHY_PLL_PLL_MISC_CONFIG 0x000001d0
+
+#define REG_DSI_7nm_PHY_PLL_FLL_CONFIG 0x000001d4
+
+#define REG_DSI_7nm_PHY_PLL_FLL_FREQ_ACQ_TIME 0x000001d8
+
+#define REG_DSI_7nm_PHY_PLL_FLL_CODE0 0x000001dc
+
+#define REG_DSI_7nm_PHY_PLL_FLL_CODE1 0x000001e0
+
+#define REG_DSI_7nm_PHY_PLL_FLL_GAIN0 0x000001e4
+
+#define REG_DSI_7nm_PHY_PLL_FLL_GAIN1 0x000001e8
+
+#define REG_DSI_7nm_PHY_PLL_SW_RESET 0x000001ec
+
+#define REG_DSI_7nm_PHY_PLL_FAST_PWRUP 0x000001f0
+
+#define REG_DSI_7nm_PHY_PLL_LOCKTIME0 0x000001f4
+
+#define REG_DSI_7nm_PHY_PLL_LOCKTIME1 0x000001f8
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS_SEL 0x000001fc
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS0 0x00000200
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS1 0x00000204
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS2 0x00000208
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS3 0x0000020c
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_FLL_CONTROL_OVERRIDES 0x00000210
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG 0x00000214
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE0_STATUS 0x00000218
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE1_STATUS 0x0000021c
+
+#define REG_DSI_7nm_PHY_PLL_RESET_SM_STATUS 0x00000220
+
+#define REG_DSI_7nm_PHY_PLL_TDC_OFFSET 0x00000224
+
+#define REG_DSI_7nm_PHY_PLL_PS3_PWRDOWN_CONTROLS 0x00000228
+
+#define REG_DSI_7nm_PHY_PLL_PS4_PWRDOWN_CONTROLS 0x0000022c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_RST_CONTROLS 0x00000230
+
+#define REG_DSI_7nm_PHY_PLL_GEAR_BAND_SELECT_CONTROLS 0x00000234
+
+#define REG_DSI_7nm_PHY_PLL_PSM_CLK_CONTROLS 0x00000238
+
+#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES_2 0x0000023c
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1 0x00000240
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_2 0x00000244
+
+#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1 0x00000248
+
+#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_2 0x0000024c
+
+#define REG_DSI_7nm_PHY_PLL_CMODE_1 0x00000250
+
+#define REG_DSI_7nm_PHY_PLL_CMODE_2 0x00000254
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1 0x00000258
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_2 0x0000025c
+
+#define REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE 0x00000260
+
+
+#endif /* DSI_PHY_7NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 4e8660c3eb08..fbb6a7a6b940 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,19 +8,27 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index ff7f2ec42030..6ca6bfd4809b 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -658,14 +658,14 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
"qcom,dsi-phy-regulator-ldo-mode");
- phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+ phy->base = msm_ioremap_size(pdev, "dsi_phy", "DSI_PHY", &phy->base_size);
if (IS_ERR(phy->base)) {
DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
ret = -ENOMEM;
goto fail;
}
- phy->pll_base = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+ phy->pll_base = msm_ioremap_size(pdev, "dsi_pll", "DSI_PLL", &phy->pll_size);
if (IS_ERR(phy->pll_base)) {
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
ret = -ENOMEM;
@@ -673,7 +673,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
}
if (phy->cfg->has_phy_lane) {
- phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane", "DSI_PHY_LANE");
+ phy->lane_base = msm_ioremap_size(pdev, "dsi_phy_lane", "DSI_PHY_LANE", &phy->lane_size);
if (IS_ERR(phy->lane_base)) {
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", __func__);
ret = -ENOMEM;
@@ -682,7 +682,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
}
if (phy->cfg->has_phy_regulator) {
- phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
+ phy->reg_base = msm_ioremap_size(pdev, "dsi_phy_regulator", "DSI_PHY_REG", &phy->reg_size);
if (IS_ERR(phy->reg_base)) {
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n", __func__);
ret = -ENOMEM;
@@ -868,3 +868,26 @@ int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy)
return 0;
}
+
+void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy)
+{
+ msm_disp_snapshot_add_block(disp_state,
+ phy->base_size, phy->base,
+ "dsi%d_phy", phy->id);
+
+ /* Do not try accessing PLL registers if it is switched off */
+ if (phy->pll_on)
+ msm_disp_snapshot_add_block(disp_state,
+ phy->pll_size, phy->pll_base,
+ "dsi%d_pll", phy->id);
+
+ if (phy->lane_base)
+ msm_disp_snapshot_add_block(disp_state,
+ phy->lane_size, phy->lane_base,
+ "dsi%d_lane", phy->id);
+
+ if (phy->reg_base)
+ msm_disp_snapshot_add_block(disp_state,
+ phy->reg_size, phy->reg_base,
+ "dsi%d_reg", phy->id);
+}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 94a77ac364d3..5b0feef87127 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -85,6 +85,10 @@ struct msm_dsi_phy {
void __iomem *pll_base;
void __iomem *reg_base;
void __iomem *lane_base;
+ phys_addr_t base_size;
+ phys_addr_t pll_size;
+ phys_addr_t reg_size;
+ phys_addr_t lane_size;
int id;
struct clk *ahb_clk;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 34bc93548fcf..e46b10fc793a 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -9,6 +9,7 @@
#include "dsi_phy.h"
#include "dsi.xml.h"
+#include "dsi_phy_10nm.xml.h"
/*
* DSI PLL 10nm - clock diagram (eg: DSI0):
@@ -432,6 +433,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
pll_freq += div_u64(tmp64, multiplier);
vco_rate = pll_freq;
+ pll_10nm->vco_current_rate = vco_rate;
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 65d68eb9e3cb..a34cf151c517 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -9,6 +9,7 @@
#include "dsi_phy.h"
#include "dsi.xml.h"
+#include "dsi_phy_14nm.xml.h"
#define PHY_14NM_CKLN_IDX 4
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index e96d789aea18..ee7c418a1c29 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -5,6 +5,7 @@
#include "dsi_phy.h"
#include "dsi.xml.h"
+#include "dsi_phy_20nm.xml.h"
static void dsi_20nm_dphy_set_timing(struct msm_dsi_phy *phy,
struct msm_dsi_dphy_timing *timing)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 3304acda2165..2da673a2add6 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -8,6 +8,7 @@
#include "dsi_phy.h"
#include "dsi.xml.h"
+#include "dsi_phy_28nm.xml.h"
/*
* DSI PLL 28nm - clock diagram (eg: DSI0):
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 86e40a0d41a3..aaa37456f4ee 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -8,6 +8,7 @@
#include "dsi_phy.h"
#include "dsi.xml.h"
+#include "dsi_phy_28nm_8960.xml.h"
/*
* DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index e76ce40a12ab..7c23d4c47338 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -9,6 +9,7 @@
#include "dsi_phy.h"
#include "dsi.xml.h"
+#include "dsi_phy_7nm.xml.h"
/*
* DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram
@@ -460,6 +461,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
pll_freq += div_u64(tmp64, multiplier);
vco_rate = pll_freq;
+ pll_7nm->vco_current_rate = vco_rate;
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac);
@@ -972,7 +974,11 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
.restore_pll_state = dsi_7nm_pll_restore_state,
},
.min_pll_rate = 600000000UL,
- .max_pll_rate = (5000000000ULL < ULONG_MAX) ? 5000000000ULL : ULONG_MAX,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
.io_start = { 0xae94400, 0xae96400 },
.num_dsi_phy = 2,
.quirks = DSI_PHY_7NM_QUIRK_V4_1,
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index a3849220fc6a..1f5e9e9f9803 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,19 +8,27 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index 7aed6cf27790..7907e0f5988f 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -8,19 +8,27 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 72c95b60b829..a1cede6cfd02 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,19 +8,27 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index 85be1b1f19a4..4bf34cea1c89 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,19 +8,27 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
-
-Copyright (C) 2013-2020 by the following authors:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
+
+Copyright (C) 2013-2021 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index d611cc8e54a4..09d2d279c30a 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -108,6 +108,31 @@ static const struct file_operations msm_gpu_fops = {
.release = msm_gpu_release,
};
+static unsigned long last_shrink_freed;
+
+static int
+shrink_get(void *data, u64 *val)
+{
+ *val = last_shrink_freed;
+
+ return 0;
+}
+
+static int
+shrink_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+
+ last_shrink_freed = msm_gem_shrinker_shrink(dev, val);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(shrink_fops,
+ shrink_get, shrink_set,
+ "0x%08llx\n");
+
+
static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -226,6 +251,12 @@ void msm_debugfs_init(struct drm_minor *minor)
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
+ debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root,
+ &priv->hangcheck_period);
+
+ debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
+ dev, &shrink_fops);
+
if (priv->kms && priv->kms->funcs->debugfs_init)
priv->kms->funcs->debugfs_init(priv->kms, minor);
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index fe7d17cd35ec..9b8fa2ad0d84 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -19,6 +19,7 @@
#include <drm/drm_of.h>
#include <drm/drm_vblank.h>
+#include "disp/msm_disp_snapshot.h"
#include "msm_drv.h"
#include "msm_debugfs.h"
#include "msm_fence.h"
@@ -40,9 +41,10 @@
* - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
* - 1.6.0 - Syncobj support
* - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
+ * - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 7
+#define MSM_VERSION_MINOR 8
#define MSM_VERSION_PATCHLEVEL 0
static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -123,7 +125,7 @@ struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
}
static void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name,
- const char *dbgname, bool quiet)
+ const char *dbgname, bool quiet, phys_addr_t *psize)
{
struct resource *res;
unsigned long size;
@@ -152,19 +154,28 @@ static void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name
if (reglog)
printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
+ if (psize)
+ *psize = size;
+
return ptr;
}
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname)
{
- return _msm_ioremap(pdev, name, dbgname, false);
+ return _msm_ioremap(pdev, name, dbgname, false, NULL);
}
void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name,
const char *dbgname)
{
- return _msm_ioremap(pdev, name, dbgname, true);
+ return _msm_ioremap(pdev, name, dbgname, true, NULL);
+}
+
+void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
+ const char *dbgname, phys_addr_t *psize)
+{
+ return _msm_ioremap(pdev, name, dbgname, false, psize);
}
void msm_writel(u32 data, void __iomem *addr)
@@ -278,6 +289,8 @@ static int msm_drm_uninit(struct device *dev)
msm_fbdev_free(ddev);
#endif
+ msm_disp_snapshot_destroy(ddev);
+
drm_mode_config_cleanup(ddev);
pm_runtime_get_sync(dev);
@@ -446,6 +459,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
mdss = priv->mdss;
priv->wq = alloc_ordered_workqueue("msm", 0);
+ priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
INIT_LIST_HEAD(&priv->objects);
mutex_init(&priv->obj_lock);
@@ -523,6 +537,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
priv->event_thread[i].worker = kthread_create_worker(0,
"crtc_event:%d", priv->event_thread[i].crtc_id);
if (IS_ERR(priv->event_thread[i].worker)) {
+ ret = PTR_ERR(priv->event_thread[i].worker);
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
goto err_msm_uninit;
}
@@ -550,6 +565,10 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
+ ret = msm_disp_snapshot_init(ddev);
+ if (ret)
+ DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
+
drm_mode_config_reset(ddev);
#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -688,7 +707,7 @@ int msm_crtc_enable_vblank(struct drm_crtc *crtc)
struct msm_kms *kms = priv->kms;
if (!kms)
return -ENXIO;
- DBG("dev=%p, crtc=%u", dev, pipe);
+ drm_dbg_vbl(dev, "crtc=%u", pipe);
return vblank_ctrl_queue_work(priv, pipe, true);
}
@@ -700,7 +719,7 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc)
struct msm_kms *kms = priv->kms;
if (!kms)
return;
- DBG("dev=%p, crtc=%u", dev, pipe);
+ drm_dbg_vbl(dev, "crtc=%u", pipe);
vblank_ctrl_queue_work(priv, pipe, false);
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 2668941df529..1a48a709ffb3 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -43,6 +43,7 @@ struct msm_gem_submit;
struct msm_fence_context;
struct msm_gem_address_space;
struct msm_gem_vma;
+struct msm_disp_state;
#define MAX_CRTCS 8
#define MAX_PLANES 20
@@ -167,6 +168,7 @@ struct msm_drm_private {
struct msm_file_private *lastctx;
/* gpu is only set on open(), but we need this info earlier */
bool is_a2xx;
+ bool has_cached_coherent;
struct drm_fb_helper *fbdev;
@@ -242,6 +244,9 @@ struct msm_drm_private {
struct shrinker shrinker;
struct drm_atomic_state *pm_state;
+
+ /* For hang detection, in ms */
+ unsigned int hangcheck_period;
};
struct msm_format {
@@ -294,6 +299,10 @@ bool msm_use_mmu(struct drm_device *dev);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
+#ifdef CONFIG_DEBUG_FS
+unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan);
+#endif
+
void msm_gem_shrinker_init(struct drm_device *dev);
void msm_gem_shrinker_cleanup(struct drm_device *dev);
@@ -340,6 +349,8 @@ void __init msm_dsi_register(void);
void __exit msm_dsi_unregister(void);
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder);
+void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi);
+
#else
static inline void __init msm_dsi_register(void)
{
@@ -353,6 +364,10 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
{
return -EINVAL;
}
+static inline void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi)
+{
+}
+
#endif
#ifdef CONFIG_DRM_MSM_DP
@@ -367,6 +382,7 @@ void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void msm_dp_irq_postinstall(struct msm_dp *dp_display);
+void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display);
void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor);
@@ -410,6 +426,10 @@ static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display)
{
}
+static inline void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display)
+{
+}
+
static inline void msm_dp_debugfs_init(struct msm_dp *dp_display,
struct drm_minor *minor)
{
@@ -448,6 +468,8 @@ struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
const char *name);
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname);
+void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
+ const char *dbgname, phys_addr_t *size);
void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name,
const char *dbgname);
void msm_writel(u32 data, void __iomem *addr);
@@ -502,7 +524,7 @@ static inline int align_pitch(int width, int bpp)
/* for the generated headers: */
#define INVALID_IDX(idx) ({BUG(); 0;})
#define fui(x) ({BUG(); 0;})
-#define util_float_to_half(x) ({BUG(); 0;})
+#define _mesa_float_to_half(x) ({BUG(); 0;})
#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 91c0e493aed5..4d34df5354e0 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -61,7 +61,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
for (i = 0; i < n; i++) {
ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &iova);
- DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
+ drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
}
@@ -140,8 +140,8 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct msm_format *format;
int ret, i, n;
- DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
- dev, mode_cmd, mode_cmd->width, mode_cmd->height,
+ drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)",
+ mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
n = info->num_planes;
@@ -194,7 +194,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
goto fail;
}
- DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+ drm_dbg_state(dev, "create: FB ID: %d (%p)", fb->base.id, fb);
return fb;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 72a07e311de3..39c35414d7b5 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -211,6 +211,13 @@ void msm_gem_put_pages(struct drm_gem_object *obj)
msm_gem_unlock(obj);
}
+static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
+{
+ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ return pgprot_writecombine(prot);
+ return prot;
+}
+
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
@@ -218,22 +225,7 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
-
- if (msm_obj->flags & MSM_BO_WC) {
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- } else if (msm_obj->flags & MSM_BO_UNCACHED) {
- vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
- } else {
- /*
- * Shunt off cached objs to shmem file so they have their own
- * address_space (so unmap_mapping_range does what we want,
- * in particular in the case of mmap'd dmabufs)
- */
- vma->vm_pgoff = 0;
- vma_set_file(vma, obj->filp);
-
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- }
+ vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
}
@@ -372,7 +364,7 @@ static void del_vma(struct msm_gem_vma *vma)
kfree(vma);
}
-/**
+/*
* If close is true, this also closes the VMA (releasing the allocated
* iova range) in addition to removing the iommu mapping. In the eviction
* case (!close), we keep the iova allocated, but only remove the iommu
@@ -451,6 +443,9 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
if (msm_obj->flags & MSM_BO_MAP_PRIV)
prot |= IOMMU_PRIV;
+ if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
+ prot |= IOMMU_CACHE;
+
GEM_WARN_ON(!msm_gem_is_locked(obj));
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
@@ -653,7 +648,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
goto fail;
}
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
if (msm_obj->vaddr == NULL) {
ret = -ENOMEM;
goto fail;
@@ -773,7 +768,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
0, (loff_t)-1);
}
-/**
+/*
* Unpin the backing pages and make them available to be swapped out.
*/
void msm_gem_evict(struct drm_gem_object *obj)
@@ -1161,6 +1156,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags,
struct drm_gem_object **obj)
{
+ struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
switch (flags & MSM_BO_CACHE_MASK) {
@@ -1168,6 +1164,10 @@ static int msm_gem_new_impl(struct drm_device *dev,
case MSM_BO_CACHED:
case MSM_BO_WC:
break;
+ case MSM_BO_CACHED_COHERENT:
+ if (priv->has_cached_coherent)
+ break;
+ fallthrough;
default:
DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
@@ -1238,6 +1238,13 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
to_msm_bo(obj)->vram_node = &vma->node;
+ /* Call chain get_pages() -> update_inactive() tries to
+ * access msm_obj->mm_list, but it is not initialized yet.
+ * To avoid NULL pointer dereference error, initialize
+ * mm_list to be empty.
+ */
+ INIT_LIST_HEAD(&msm_obj->mm_list);
+
msm_gem_lock(obj);
pages = get_pages(obj);
msm_gem_unlock(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 03e2cc2a2ce1..405f8411e395 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -328,6 +328,7 @@ struct msm_gem_submit {
struct dma_fence *fence;
struct msm_gpu_submitqueue *queue;
struct pid *pid; /* submitting process */
+ bool fault_dumped; /* Limit devcoredump dumping to one per submit */
bool valid; /* true if no cmdstream patching needed */
bool in_rb; /* "sudo" mode, copy cmds into RB */
struct msm_ringbuffer *ring;
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 1187ecf9d647..0f1b29ee04a9 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -145,6 +145,24 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
return (freed > 0) ? freed : SHRINK_STOP;
}
+#ifdef CONFIG_DEBUG_FS
+unsigned long
+msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct shrink_control sc = {
+ .nr_to_scan = nr_to_scan,
+ };
+ int ret;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ ret = msm_gem_shrinker_scan(&priv->shrinker, &sc);
+ fs_reclaim_release(GFP_KERNEL);
+
+ return ret;
+}
+#endif
+
/* since we don't know any better, lets bail after a few
* and if necessary the shrinker will be invoked again.
* Seems better than unmapping *everything*
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 6ff6df6c4791..b71da71a3dd8 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -50,6 +50,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
submit->ring = gpu->rb[queue->prio];
+ submit->fault_dumped = false;
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 9dd1c58430ab..0ebf7bc6ad09 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -387,6 +387,7 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
/* Fill in the additional crash state information */
state->comm = kstrdup(comm, GFP_KERNEL);
state->cmd = kstrdup(cmd, GFP_KERNEL);
+ state->fault_info = gpu->fault_info;
if (submit) {
int i, nr = 0;
@@ -559,10 +560,57 @@ static void recover_worker(struct kthread_work *work)
msm_gpu_retire(gpu);
}
+static void fault_worker(struct kthread_work *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
+ struct drm_device *dev = gpu->dev;
+ struct msm_gem_submit *submit;
+ struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
+ char *comm = NULL, *cmd = NULL;
+
+ mutex_lock(&dev->struct_mutex);
+
+ submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
+ if (submit && submit->fault_dumped)
+ goto resume_smmu;
+
+ if (submit) {
+ struct task_struct *task;
+
+ task = get_pid_task(submit->pid, PIDTYPE_PID);
+ if (task) {
+ comm = kstrdup(task->comm, GFP_KERNEL);
+ cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+ put_task_struct(task);
+ }
+
+ /*
+ * When we get GPU iova faults, we can get 1000s of them,
+ * but we really only want to log the first one.
+ */
+ submit->fault_dumped = true;
+ }
+
+ /* Record the crash state */
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ kfree(cmd);
+ kfree(comm);
+
+resume_smmu:
+ memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
+ gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
+
+ mutex_unlock(&dev->struct_mutex);
+}
+
static void hangcheck_timer_reset(struct msm_gpu *gpu)
{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
mod_timer(&gpu->hangcheck_timer,
- round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
+ round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
}
static void hangcheck_handler(struct timer_list *t)
@@ -922,6 +970,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
INIT_LIST_HEAD(&gpu->active_list);
kthread_init_work(&gpu->retire_work, retire_worker);
kthread_init_work(&gpu->recover_work, recover_worker);
+ kthread_init_work(&gpu->fault_work, fault_worker);
timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 18baf935e143..ef41ec09f59c 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -71,6 +71,15 @@ struct msm_gpu_funcs {
uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
};
+/* Additional state for iommu faults: */
+struct msm_gpu_fault_info {
+ u64 ttbr0;
+ unsigned long iova;
+ int flags;
+ const char *type;
+ const char *block;
+};
+
struct msm_gpu {
const char *name;
struct drm_device *dev;
@@ -118,23 +127,19 @@ struct msm_gpu {
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
uint32_t fast_rate;
- /* The gfx-mem interconnect path that's used by all GPU types. */
- struct icc_path *icc_path;
-
- /*
- * Second interconnect path for some A3xx and all A4xx GPUs to the
- * On Chip MEMory (OCMEM).
- */
- struct icc_path *ocmem_icc_path;
-
/* Hang and Inactivity Detection:
*/
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
-#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
-#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
+#define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */
struct timer_list hangcheck_timer;
+ /* Fault info for most recent iova fault: */
+ struct msm_gpu_fault_info fault_info;
+
+ /* work for handling GPU ioval faults: */
+ struct kthread_work fault_work;
+
/* work for handling GPU recovery: */
struct kthread_work recover_work;
@@ -242,6 +247,8 @@ struct msm_gpu_state {
char *comm;
char *cmd;
+ struct msm_gpu_fault_info fault_info;
+
int nr_bos;
struct msm_gpu_state_bo *bos;
};
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
index 379496186c7f..f7d1945e0c9f 100644
--- a/drivers/gpu/drm/msm/msm_gpummu.c
+++ b/drivers/gpu/drm/msm/msm_gpummu.c
@@ -68,6 +68,10 @@ static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
return 0;
}
+static void msm_gpummu_resume_translation(struct msm_mmu *mmu)
+{
+}
+
static void msm_gpummu_destroy(struct msm_mmu *mmu)
{
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
@@ -83,6 +87,7 @@ static const struct msm_mmu_funcs funcs = {
.map = msm_gpummu_map,
.unmap = msm_gpummu_unmap,
.destroy = msm_gpummu_destroy,
+ .resume_translation = msm_gpummu_resume_translation,
};
struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 50d881794758..eed2a762e9dd 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -184,6 +184,9 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
* the arm-smmu driver as a trigger to set up TTBR0
*/
if (atomic_inc_return(&iommu->pagetables) == 1) {
+ /* Enable stall on iommu fault: */
+ adreno_smmu->set_stall(adreno_smmu->cookie, true);
+
ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
if (ret) {
free_io_pgtable_ops(pagetable->pgtbl_ops);
@@ -211,12 +214,28 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg)
{
struct msm_iommu *iommu = arg;
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
+ struct adreno_smmu_fault_info info, *ptr = NULL;
+
+ if (adreno_smmu->get_fault_info) {
+ adreno_smmu->get_fault_info(adreno_smmu->cookie, &info);
+ ptr = &info;
+ }
+
if (iommu->base.handler)
- return iommu->base.handler(iommu->base.arg, iova, flags);
+ return iommu->base.handler(iommu->base.arg, iova, flags, ptr);
+
pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
return 0;
}
+static void msm_iommu_resume_translation(struct msm_mmu *mmu)
+{
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
+
+ adreno_smmu->resume_translation(adreno_smmu->cookie, true);
+}
+
static void msm_iommu_detach(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
@@ -264,6 +283,7 @@ static const struct msm_mmu_funcs funcs = {
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.destroy = msm_iommu_destroy,
+ .resume_translation = msm_iommu_resume_translation,
};
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index d8151a89e163..086a2d59b8c8 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -122,6 +122,10 @@ struct msm_kms_funcs {
bool cmd_mode);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
+
+ /* snapshot: */
+ void (*snapshot)(struct msm_disp_state *disp_state, struct msm_kms *kms);
+
#ifdef CONFIG_DEBUG_FS
/* debugfs: */
int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
@@ -152,6 +156,11 @@ struct msm_kms {
/* mapper-id used to request GEM buffer mapped for scanout: */
struct msm_gem_address_space *aspace;
+ /* disp snapshot support */
+ struct kthread_worker *dump_worker;
+ struct kthread_work dump_work;
+ struct mutex dump_mutex;
+
/*
* For async commit, where ->flush_commit() and later happens
* from the crtc's pending_timer close to end of the frame:
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 61ade89d9e48..de158e1bf765 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -15,6 +15,7 @@ struct msm_mmu_funcs {
size_t len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
void (*destroy)(struct msm_mmu *mmu);
+ void (*resume_translation)(struct msm_mmu *mmu);
};
enum msm_mmu_type {
@@ -26,7 +27,7 @@ enum msm_mmu_type {
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
struct device *dev;
- int (*handler)(void *arg, unsigned long iova, int flags);
+ int (*handler)(void *arg, unsigned long iova, int flags, void *data);
void *arg;
enum msm_mmu_type type;
};
@@ -43,7 +44,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
- int (*handler)(void *arg, unsigned long iova, int flags))
+ int (*handler)(void *arg, unsigned long iova, int flags, void *data))
{
mmu->arg = arg;
mmu->handler = handler;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000c.h b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
index d6dd40f21eed..9c7ff56831c5 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if000c.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
@@ -77,6 +77,7 @@ struct nvif_vmm_pfnmap_v0 {
#define NVIF_VMM_PFNMAP_V0_APER 0x00000000000000f0ULL
#define NVIF_VMM_PFNMAP_V0_HOST 0x0000000000000000ULL
#define NVIF_VMM_PFNMAP_V0_VRAM 0x0000000000000010ULL
+#define NVIF_VMM_PFNMAP_V0_A 0x0000000000000004ULL
#define NVIF_VMM_PFNMAP_V0_W 0x0000000000000002ULL
#define NVIF_VMM_PFNMAP_V0_V 0x0000000000000001ULL
#define NVIF_VMM_PFNMAP_V0_NONE 0x0000000000000000ULL
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 085023624fb0..6d07e653f82d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -445,6 +445,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
break;
case TTM_PL_TT:
error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
+ break;
default:
break;
}
@@ -551,7 +552,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i, j;
- if (!ttm_dma)
+ if (!ttm_dma || !ttm_dma->dma_address)
return;
if (!ttm_dma->pages) {
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
@@ -587,7 +588,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i, j;
- if (!ttm_dma)
+ if (!ttm_dma || !ttm_dma->dma_address)
return;
if (!ttm_dma->pages) {
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 2a298c171d4d..22b83a6577eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -157,6 +157,7 @@ nouveau_conn_atomic_set_property(struct drm_connector *connector,
default:
break;
}
+ break;
case DRM_MODE_SCALE_FULLSCREEN:
case DRM_MODE_SCALE_CENTER:
case DRM_MODE_SCALE_ASPECT:
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 347488685f74..60019d0532fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -93,7 +93,22 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
if (ret)
return -EINVAL;
- return 0;
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
+ if (ret)
+ goto error;
+
+ if (nvbo->bo.moving)
+ ret = dma_fence_wait(nvbo->bo.moving, true);
+
+ ttm_bo_unreserve(&nvbo->bo);
+ if (ret)
+ goto error;
+
+ return ret;
+
+error:
+ nouveau_bo_unpin(nvbo);
+ return ret;
}
void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 1c3f890377d2..82b583f5fca8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -35,6 +35,7 @@
#include <linux/sched/mm.h>
#include <linux/sort.h>
#include <linux/hmm.h>
+#include <linux/rmap.h>
struct nouveau_svm {
struct nouveau_drm *drm;
@@ -67,6 +68,11 @@ struct nouveau_svm {
} buffer[1];
};
+#define FAULT_ACCESS_READ 0
+#define FAULT_ACCESS_WRITE 1
+#define FAULT_ACCESS_ATOMIC 2
+#define FAULT_ACCESS_PREFETCH 3
+
#define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
#define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
@@ -265,7 +271,7 @@ nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
* the invalidation is handled as part of the migration process.
*/
if (update->event == MMU_NOTIFY_MIGRATE &&
- update->migrate_pgmap_owner == svmm->vmm->cli->drm->dev)
+ update->owner == svmm->vmm->cli->drm->dev)
goto out;
if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
@@ -412,6 +418,24 @@ nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
}
static int
+nouveau_svm_fault_priority(u8 fault)
+{
+ switch (fault) {
+ case FAULT_ACCESS_PREFETCH:
+ return 0;
+ case FAULT_ACCESS_READ:
+ return 1;
+ case FAULT_ACCESS_WRITE:
+ return 2;
+ case FAULT_ACCESS_ATOMIC:
+ return 3;
+ default:
+ WARN_ON_ONCE(1);
+ return -1;
+ }
+}
+
+static int
nouveau_svm_fault_cmp(const void *a, const void *b)
{
const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
@@ -421,9 +445,8 @@ nouveau_svm_fault_cmp(const void *a, const void *b)
return ret;
if ((ret = (s64)fa->addr - fb->addr))
return ret;
- /*XXX: atomic? */
- return (fa->access == 0 || fa->access == 3) -
- (fb->access == 0 || fb->access == 3);
+ return nouveau_svm_fault_priority(fa->access) -
+ nouveau_svm_fault_priority(fb->access);
}
static void
@@ -487,6 +510,10 @@ static bool nouveau_svm_range_invalidate(struct mmu_interval_notifier *mni,
struct svm_notifier *sn =
container_of(mni, struct svm_notifier, notifier);
+ if (range->event == MMU_NOTIFY_EXCLUSIVE &&
+ range->owner == sn->svmm->vmm->cli->drm->dev)
+ return true;
+
/*
* serializes the update to mni->invalidate_seq done by caller and
* prevents invalidation of the PTE from progressing while HW is being
@@ -555,6 +582,71 @@ static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm,
args->p.phys[0] |= NVIF_VMM_PFNMAP_V0_W;
}
+static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
+ struct nouveau_drm *drm,
+ struct nouveau_pfnmap_args *args, u32 size,
+ struct svm_notifier *notifier)
+{
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ struct mm_struct *mm = svmm->notifier.mm;
+ struct page *page;
+ unsigned long start = args->p.addr;
+ unsigned long notifier_seq;
+ int ret = 0;
+
+ ret = mmu_interval_notifier_insert(&notifier->notifier, mm,
+ args->p.addr, args->p.size,
+ &nouveau_svm_mni_ops);
+ if (ret)
+ return ret;
+
+ while (true) {
+ if (time_after(jiffies, timeout)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ notifier_seq = mmu_interval_read_begin(&notifier->notifier);
+ mmap_read_lock(mm);
+ ret = make_device_exclusive_range(mm, start, start + PAGE_SIZE,
+ &page, drm->dev);
+ mmap_read_unlock(mm);
+ if (ret <= 0 || !page) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&svmm->mutex);
+ if (!mmu_interval_read_retry(&notifier->notifier,
+ notifier_seq))
+ break;
+ mutex_unlock(&svmm->mutex);
+ }
+
+ /* Map the page on the GPU. */
+ args->p.page = 12;
+ args->p.size = PAGE_SIZE;
+ args->p.addr = start;
+ args->p.phys[0] = page_to_phys(page) |
+ NVIF_VMM_PFNMAP_V0_V |
+ NVIF_VMM_PFNMAP_V0_W |
+ NVIF_VMM_PFNMAP_V0_A |
+ NVIF_VMM_PFNMAP_V0_HOST;
+
+ svmm->vmm->vmm.object.client->super = true;
+ ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
+ svmm->vmm->vmm.object.client->super = false;
+ mutex_unlock(&svmm->mutex);
+
+ unlock_page(page);
+ put_page(page);
+
+out:
+ mmu_interval_notifier_remove(&notifier->notifier);
+ return ret;
+}
+
static int nouveau_range_fault(struct nouveau_svmm *svmm,
struct nouveau_drm *drm,
struct nouveau_pfnmap_args *args, u32 size,
@@ -567,18 +659,27 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
unsigned long hmm_pfns[1];
struct hmm_range range = {
.notifier = &notifier->notifier,
- .start = notifier->notifier.interval_tree.start,
- .end = notifier->notifier.interval_tree.last + 1,
.default_flags = hmm_flags,
.hmm_pfns = hmm_pfns,
.dev_private_owner = drm->dev,
};
- struct mm_struct *mm = notifier->notifier.mm;
+ struct mm_struct *mm = svmm->notifier.mm;
int ret;
+ ret = mmu_interval_notifier_insert(&notifier->notifier, mm,
+ args->p.addr, args->p.size,
+ &nouveau_svm_mni_ops);
+ if (ret)
+ return ret;
+
+ range.start = notifier->notifier.interval_tree.start;
+ range.end = notifier->notifier.interval_tree.last + 1;
+
while (true) {
- if (time_after(jiffies, timeout))
- return -EBUSY;
+ if (time_after(jiffies, timeout)) {
+ ret = -EBUSY;
+ goto out;
+ }
range.notifier_seq = mmu_interval_read_begin(range.notifier);
mmap_read_lock(mm);
@@ -587,7 +688,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
if (ret) {
if (ret == -EBUSY)
continue;
- return ret;
+ goto out;
}
mutex_lock(&svmm->mutex);
@@ -606,6 +707,9 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
+out:
+ mmu_interval_notifier_remove(&notifier->notifier);
+
return ret;
}
@@ -625,7 +729,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
unsigned long hmm_flags;
u64 inst, start, limit;
int fi, fn;
- int replay = 0, ret;
+ int replay = 0, atomic = 0, ret;
/* Parse available fault buffer entries into a cache, and update
* the GET pointer so HW can reuse the entries.
@@ -706,12 +810,14 @@ nouveau_svm_fault(struct nvif_notify *notify)
/*
* Determine required permissions based on GPU fault
* access flags.
- * XXX: atomic?
*/
switch (buffer->fault[fi]->access) {
case 0: /* READ. */
hmm_flags = HMM_PFN_REQ_FAULT;
break;
+ case 2: /* ATOMIC. */
+ atomic = true;
+ break;
case 3: /* PREFETCH. */
hmm_flags = 0;
break;
@@ -727,14 +833,14 @@ nouveau_svm_fault(struct nvif_notify *notify)
}
notifier.svmm = svmm;
- ret = mmu_interval_notifier_insert(&notifier.notifier, mm,
- args.i.p.addr, args.i.p.size,
- &nouveau_svm_mni_ops);
- if (!ret) {
+ if (atomic)
+ ret = nouveau_atomic_range_fault(svmm, svm->drm,
+ &args.i, sizeof(args),
+ &notifier);
+ else
ret = nouveau_range_fault(svmm, svm->drm, &args.i,
- sizeof(args), hmm_flags, &notifier);
- mmu_interval_notifier_remove(&notifier.notifier);
- }
+ sizeof(args), hmm_flags,
+ &notifier);
mmput(mm);
limit = args.i.p.addr + args.i.p.size;
@@ -750,11 +856,15 @@ nouveau_svm_fault(struct nvif_notify *notify)
*/
if (buffer->fault[fn]->svmm != svmm ||
buffer->fault[fn]->addr >= limit ||
- (buffer->fault[fi]->access == 0 /* READ. */ &&
+ (buffer->fault[fi]->access == FAULT_ACCESS_READ &&
!(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
- (buffer->fault[fi]->access != 0 /* READ. */ &&
- buffer->fault[fi]->access != 3 /* PREFETCH. */ &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)))
+ (buffer->fault[fi]->access != FAULT_ACCESS_READ &&
+ buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
+ !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
+ (buffer->fault[fi]->access != FAULT_ACCESS_READ &&
+ buffer->fault[fi]->access != FAULT_ACCESS_WRITE &&
+ buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
+ !(args.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
index 83067763c0ec..e1d31c62f9ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
@@ -313,6 +313,7 @@ nv50_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
default:
break;
}
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index a2b179568970..f6188aa9171c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -178,6 +178,7 @@ void nvkm_vmm_unmap_region(struct nvkm_vmm *, struct nvkm_vma *);
#define NVKM_VMM_PFN_APER 0x00000000000000f0ULL
#define NVKM_VMM_PFN_HOST 0x0000000000000000ULL
#define NVKM_VMM_PFN_VRAM 0x0000000000000010ULL
+#define NVKM_VMM_PFN_A 0x0000000000000004ULL
#define NVKM_VMM_PFN_W 0x0000000000000002ULL
#define NVKM_VMM_PFN_V 0x0000000000000001ULL
#define NVKM_VMM_PFN_NONE 0x0000000000000000ULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index 236db5570771..f02abd9cb4dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -88,6 +88,9 @@ gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
if (!(*map->pfn & NVKM_VMM_PFN_W))
data |= BIT_ULL(6); /* RO. */
+ if (!(*map->pfn & NVKM_VMM_PFN_A))
+ data |= BIT_ULL(7); /* Atomic disable. */
+
if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
addr = dma_map_page(dev, pfn_to_page(addr), 0,
@@ -322,6 +325,9 @@ gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
if (!(*map->pfn & NVKM_VMM_PFN_W))
data |= BIT_ULL(6); /* RO. */
+ if (!(*map->pfn & NVKM_VMM_PFN_A))
+ data |= BIT_ULL(7); /* Atomic disable. */
+
if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
addr = dma_map_page(dev, pfn_to_page(addr), 0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
index 2b031d4eaeb6..684aff7437ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
@@ -41,6 +41,7 @@ pwm_info(struct nvkm_therm *therm, int line)
default:
break;
}
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index ef70140c5b09..873cbd38e6d3 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -706,9 +706,7 @@ static int nt35510_power_on(struct nt35510 *nt)
if (ret)
return ret;
- ret = nt35510_read_id(nt);
- if (ret)
- return ret;
+ nt35510_read_id(nt);
/* Set up stuff in manufacturer control, page 1 */
ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index f484147fc3a6..c4b388850a13 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -383,6 +383,7 @@ MODULE_DEVICE_TABLE(spi, ld9040_ids);
static struct spi_driver ld9040_driver = {
.probe = ld9040_probe,
.remove = ld9040_remove,
+ .id_table = ld9040_ids,
.driver = {
.name = "panel-samsung-ld9040",
.of_match_table = ld9040_of_match,
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 19fd39d9a00c..37a1b6a6ad6d 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -127,7 +127,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
struct qxl_bo *qbo;
struct qxl_device *qdev;
- if (!qxl_ttm_bo_is_qxl_bo(bo))
+ if (!qxl_ttm_bo_is_qxl_bo(bo) || !bo->resource)
return;
qbo = to_qxl_bo(bo);
qdev = to_qxl(qbo->tbo.base.dev);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 73ea5189dfb1..81b4de7be9f2 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -8584,9 +8584,7 @@ int cik_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Fence driver */
- r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
+ radeon_fence_driver_init(rdev);
/* initialize memory controller */
r = cik_mc_init(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 8e9e88bf1f43..36a888e1b179 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -5208,9 +5208,7 @@ int evergreen_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Fence driver */
- r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
+ radeon_fence_driver_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index ab7bd3080217..4a364ca7a1be 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2375,9 +2375,7 @@ int cayman_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Fence driver */
- r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
+ radeon_fence_driver_init(rdev);
/* initialize memory controller */
r = evergreen_mc_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 3c4e7c15fd15..ba724198b72e 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -4056,9 +4056,7 @@ int r100_init(struct radeon_device *rdev)
/* initialize VRAM */
r100_mc_init(rdev);
/* Fence driver */
- r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
+ radeon_fence_driver_init(rdev);
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 92643dfdd8a8..621ff174dff3 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1549,9 +1549,7 @@ int r300_init(struct radeon_device *rdev)
/* initialize memory controller */
r300_mc_init(rdev);
/* Fence driver */
- r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
+ radeon_fence_driver_init(rdev);
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 1ed4407b91aa..7e6320e8c6a0 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -425,10 +425,7 @@ int r420_init(struct radeon_device *rdev)
r300_mc_init(rdev);
r420_debugfs(rdev);
/* Fence driver */
- r = radeon_fence_driver_init(rdev);
- if (r) {
- return r;
- }
+ radeon_fence_driver_init(rdev);
/* Memory manager */
r = radeon_bo_init(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index fc78e64ae727..6cbcaa845192 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -299,9 +299,7 @@ int r520_init(struct radeon_device *rdev)
r520_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
- r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
+ radeon_fence_driver_init(rdev);
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 7444dc0e0c0e..ca3fcae2adb5 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3282,9 +3282,7 @@ int r600_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Fence driver */
- r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
+ radeon_fence_driver_init(rdev);
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 65301d6acf13..895776c421d4 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -385,7 +385,7 @@ struct radeon_fence {
};
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
-int radeon_fence_driver_init(struct radeon_device *rdev);
+void radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 406681317419..573154268d43 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1325,6 +1325,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
if (obj->import_attach) {
DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 82ee8244c9b3..c8dd68152d65 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -374,13 +374,13 @@ radeon_pci_shutdown(struct pci_dev *pdev)
if (radeon_device_is_virtual())
radeon_pci_remove(pdev);
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64)
/*
* Some adapters need to be suspended before a
* shutdown occurs in order to prevent an error
- * during kexec.
- * Make this power specific becauase it breaks
- * some non-power boards.
+ * during kexec, shutdown or reboot.
+ * Make this power and Loongson specific because
+ * it breaks some other boards.
*/
radeon_suspend_kms(pci_get_drvdata(pdev), true, true, false);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7ec581363e23..18f2c2e0dfb3 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -905,9 +905,8 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
* Not all asics have all rings, so each asic will only
* start the fence driver on the rings it has using
* radeon_fence_driver_start_ring().
- * Returns 0 for success.
*/
-int radeon_fence_driver_init(struct radeon_device *rdev)
+void radeon_fence_driver_init(struct radeon_device *rdev)
{
int ring;
@@ -917,8 +916,6 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
}
radeon_debugfs_fence_init(rdev);
-
- return 0;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 42a87948e28c..4a90807351e7 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -77,9 +77,19 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
- if (likely(ret == 0))
- bo->prime_shared_count++;
-
+ if (unlikely(ret))
+ goto error;
+
+ if (bo->tbo.moving) {
+ ret = dma_fence_wait(bo->tbo.moving, false);
+ if (unlikely(ret)) {
+ radeon_bo_unpin(bo);
+ goto error;
+ }
+ }
+
+ bo->prime_shared_count++;
+error:
radeon_bo_unreserve(bo);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 8423bcc3302b..6383f7a34bd8 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -555,9 +555,7 @@ int rs400_init(struct radeon_device *rdev)
/* initialize memory controller */
rs400_mc_init(rdev);
/* Fence driver */
- r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
+ radeon_fence_driver_init(rdev);
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5bf26058eec0..b2d22e25eee1 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -1132,9 +1132,7 @@ int rs600_init(struct radeon_device *rdev)
rs600_mc_init(rdev);
r100_debugfs_rbbm_init(rdev);
/* Fence driver */
- r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
+ radeon_fence_driver_init(rdev);
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 7bc302a89232..14fb0819b8c1 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -850,9 +850,7 @@ int rs690_init(struct radeon_device *rdev)
rs690_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
- r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
+ radeon_fence_driver_init(rdev);
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 46a53dd38079..63fb06e8e2d7 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -648,9 +648,7 @@ int rv515_init(struct radeon_device *rdev)
rv515_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
- r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
+ radeon_fence_driver_init(rdev);
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 88e29ebaad46..74499307285b 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1941,9 +1941,7 @@ int rv770_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Fence driver */
- r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
+ radeon_fence_driver_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d0e94b10e4c0..013e44ed0f39 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6857,9 +6857,7 @@ int si_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Fence driver */
- r = radeon_fence_driver_init(rdev);
- if (r)
- return r;
+ radeon_fence_driver_init(rdev);
/* initialize memory controller */
r = si_mc_init(rdev);
diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
index 789f22773dbc..61b44d3a6a61 100644
--- a/drivers/gpu/drm/selftests/test-drm_framebuffer.c
+++ b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
@@ -8,6 +8,7 @@
#include <drm/drm_device.h>
#include <drm/drm_mode.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include "../drm_crtc_internal.h"
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index bbdfd5e26ec8..f75fb157f2ff 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -209,7 +209,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
goto err_disable_clk_tmds;
}
- ret = sun8i_hdmi_phy_probe(hdmi, phy_node);
+ ret = sun8i_hdmi_phy_get(hdmi, phy_node);
of_node_put(phy_node);
if (ret) {
dev_err(dev, "Couldn't get the HDMI PHY\n");
@@ -242,7 +242,6 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
cleanup_encoder:
drm_encoder_cleanup(encoder);
- sun8i_hdmi_phy_remove(hdmi);
err_disable_clk_tmds:
clk_disable_unprepare(hdmi->clk_tmds);
err_assert_ctrl_reset:
@@ -263,7 +262,6 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
dw_hdmi_unbind(hdmi->hdmi);
- sun8i_hdmi_phy_remove(hdmi);
clk_disable_unprepare(hdmi->clk_tmds);
reset_control_assert(hdmi->rst_ctrl);
gpiod_set_value(hdmi->ddc_en, 0);
@@ -320,7 +318,32 @@ static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
.of_match_table = sun8i_dw_hdmi_dt_ids,
},
};
-module_platform_driver(sun8i_dw_hdmi_pltfm_driver);
+
+static int __init sun8i_dw_hdmi_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&sun8i_dw_hdmi_pltfm_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&sun8i_hdmi_phy_driver);
+ if (ret) {
+ platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void __exit sun8i_dw_hdmi_exit(void)
+{
+ platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
+ platform_driver_unregister(&sun8i_hdmi_phy_driver);
+}
+
+module_init(sun8i_dw_hdmi_init);
+module_exit(sun8i_dw_hdmi_exit);
MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
MODULE_DESCRIPTION("Allwinner DW HDMI bridge");
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index d4b55af0592f..74f6ed0e2570 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -195,14 +195,15 @@ struct sun8i_dw_hdmi {
struct gpio_desc *ddc_en;
};
+extern struct platform_driver sun8i_hdmi_phy_driver;
+
static inline struct sun8i_dw_hdmi *
encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct sun8i_dw_hdmi, encoder);
}
-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
+int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 9994edf67509..c9239708d398 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
#include "sun8i_dw_hdmi.h"
@@ -597,10 +598,30 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
{ /* sentinel */ }
};
-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+{
+ struct platform_device *pdev = of_find_device_by_node(node);
+ struct sun8i_hdmi_phy *phy;
+
+ if (!pdev)
+ return -EPROBE_DEFER;
+
+ phy = platform_get_drvdata(pdev);
+ if (!phy)
+ return -EPROBE_DEFER;
+
+ hdmi->phy = phy;
+
+ put_device(&pdev->dev);
+
+ return 0;
+}
+
+static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
- struct device *dev = hdmi->dev;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
struct sun8i_hdmi_phy *phy;
struct resource res;
void __iomem *regs;
@@ -704,7 +725,7 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
clk_prepare_enable(phy->clk_phy);
}
- hdmi->phy = phy;
+ platform_set_drvdata(pdev, phy);
return 0;
@@ -728,9 +749,9 @@ err_put_clk_bus:
return ret;
}
-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
+static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
{
- struct sun8i_hdmi_phy *phy = hdmi->phy;
+ struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
clk_disable_unprepare(phy->clk_mod);
clk_disable_unprepare(phy->clk_bus);
@@ -744,4 +765,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
clk_put(phy->clk_pll1);
clk_put(phy->clk_mod);
clk_put(phy->clk_bus);
+ return 0;
}
+
+struct platform_driver sun8i_hdmi_phy_driver = {
+ .probe = sun8i_hdmi_phy_probe,
+ .remove = sun8i_hdmi_phy_remove,
+ .driver = {
+ .name = "sun8i-hdmi-phy",
+ .of_match_table = sun8i_hdmi_phy_of_table,
+ },
+};
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 074563ca586c..51bbbc42a144 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -348,7 +348,7 @@ static void tegra_dc_setup_window(struct tegra_plane *plane,
* For YUV planar modes, the number of bytes per pixel takes into
* account only the luma component and therefore is 1.
*/
- yuv = tegra_plane_format_is_yuv(window->format, &planar);
+ yuv = tegra_plane_format_is_yuv(window->format, &planar, NULL);
if (!yuv)
bpp = window->bits_per_pixel / 8;
else
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 29f19c3c6149..5e13f1cfd749 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -696,6 +696,9 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define DC_WINBUF_START_ADDR_HI 0x80d
+#define DC_WINBUF_START_ADDR_HI_U 0x80f
+#define DC_WINBUF_START_ADDR_HI_V 0x811
+
#define DC_WINBUF_CDE_CONTROL 0x82f
#define ENABLE_SURFACE (1 << 0)
@@ -711,15 +714,34 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define DC_DISP_PCALC_HEAD_SET_CROPPED_POINT_IN_CURSOR 0x442
#define DC_DISP_PCALC_HEAD_SET_CROPPED_SIZE_IN_CURSOR 0x446
+#define DC_WINC_PRECOMP_WGRP_PIPE_CAPA 0x500
+#define DC_WINC_PRECOMP_WGRP_PIPE_CAPB 0x501
+#define DC_WINC_PRECOMP_WGRP_PIPE_CAPC 0x502
+#define MAX_PIXELS_5TAP444(x) ((x) & 0xffff)
+#define DC_WINC_PRECOMP_WGRP_PIPE_CAPD 0x503
+#define DC_WINC_PRECOMP_WGRP_PIPE_CAPE 0x504
+#define MAX_PIXELS_2TAP444(x) ((x) & 0xffff)
+#define DC_WINC_PRECOMP_WGRP_PIPE_CAPF 0x505
+
#define DC_WIN_CORE_WINDOWGROUP_SET_CONTROL 0x702
#define OWNER_MASK (0xf << 0)
#define OWNER(x) (((x) & 0xf) << 0)
#define DC_WIN_CROPPED_SIZE 0x706
+#define DC_WIN_SET_INPUT_SCALER_H_START_PHASE 0x707
+#define DC_WIN_SET_INPUT_SCALER_V_START_PHASE 0x708
+
#define DC_WIN_PLANAR_STORAGE 0x709
#define PITCH(x) (((x) >> 6) & 0x1fff)
+#define DC_WIN_PLANAR_STORAGE_UV 0x70a
+#define PITCH_U(x) ((((x) >> 6) & 0x1fff) << 0)
+#define PITCH_V(x) ((((x) >> 6) & 0x1fff) << 16)
+
+#define DC_WIN_SET_INPUT_SCALER_HPHASE_INCR 0x70b
+#define DC_WIN_SET_INPUT_SCALER_VPHASE_INCR 0x70c
+
#define DC_WIN_SET_PARAMS 0x70d
#define CLAMP_BEFORE_BLEND (1 << 15)
#define DEGAMMA_NONE (0 << 13)
@@ -740,6 +762,10 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define VERTICAL_TAPS_2 (1 << 0)
#define VERTICAL_TAPS_5 (4 << 0)
+#define DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF 0x70f
+#define COEFF_INDEX(x) (((x) & 0xff) << 15)
+#define COEFF_DATA(x) (((x) & 0x3ff) << 0)
+
#define DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE 0x711
#define INPUT_SCALER_USE422 (1 << 2)
#define INPUT_SCALER_VBYPASS (1 << 1)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 7d7cc90b6fc9..1f96e416fa08 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -467,10 +467,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return PTR_ERR(dpaux->regs);
dpaux->irq = platform_get_irq(pdev, 0);
- if (dpaux->irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ\n");
+ if (dpaux->irq < 0)
return -ENXIO;
- }
if (!pdev->dev.pm_domain) {
dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 87df251c1fcf..0cb868065348 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -25,7 +25,7 @@
#include "trace.h"
/* XXX move to include/uapi/drm/drm_fourcc.h? */
-#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT(22)
+#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22)
struct reset_control;
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 79bff8b48271..b910155f80c4 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -23,6 +23,8 @@
#include "dc.h"
#include "plane.h"
+#define NFB 24
+
static const u32 tegra_shared_plane_formats[] = {
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
@@ -292,6 +294,74 @@ static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
return 0;
}
+static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
+{
+ static const unsigned int coeffs[192] = {
+ 0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
+ 0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
+ 0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
+ 0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
+ 0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
+ 0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
+ 0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
+ 0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
+ 0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
+ 0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
+ 0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
+ 0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
+ 0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
+ 0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
+ 0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
+ 0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
+ 0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
+ 0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
+ 0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
+ 0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
+ 0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
+ 0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
+ 0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
+ 0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
+ 0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
+ 0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
+ 0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
+ 0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
+ 0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
+ 0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
+ 0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
+ 0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
+ 0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
+ 0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
+ 0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
+ 0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
+ 0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
+ 0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
+ 0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
+ 0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
+ 0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
+ 0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
+ 0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
+ 0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
+ 0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
+ 0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
+ 0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
+ 0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
+ };
+ unsigned int ratio, row, column;
+
+ for (ratio = 0; ratio <= 2; ratio++) {
+ for (row = 0; row <= 15; row++) {
+ for (column = 0; column <= 3; column++) {
+ unsigned int index = (ratio << 6) + (row << 2) + column;
+ u32 value;
+
+ value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
+ tegra_plane_writel(plane, value,
+ DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
+ }
+ }
+ }
+}
+
static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
struct tegra_plane *plane)
{
@@ -337,6 +407,8 @@ static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
value |= THREAD_GROUP_ENABLE;
tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
+ tegra_shared_plane_setup_scaler(plane);
+
tegra_shared_plane_update(plane);
tegra_shared_plane_activate(plane);
}
@@ -444,6 +516,18 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
host1x_client_suspend(&dc->client);
}
+static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
+{
+ u64 tmp, tmp1, tmp2;
+
+ tmp = (u64)dfixed_trunc(in);
+ tmp2 = (u64)out;
+ tmp1 = (tmp << NFB) + (tmp2 >> 1);
+ do_div(tmp1, tmp2);
+
+ return lower_32_bits(tmp1);
+}
+
static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -454,8 +538,10 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
unsigned int zpos = new_state->normalized_zpos;
struct drm_framebuffer *fb = new_state->fb;
struct tegra_plane *p = to_tegra_plane(plane);
- dma_addr_t base;
- u32 value;
+ u32 value, min_width, bypass = 0;
+ dma_addr_t base, addr_flag = 0;
+ unsigned int bpc;
+ bool yuv, planar;
int err;
/* rien ne va plus */
@@ -473,6 +559,8 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
return;
}
+ yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planar, &bpc);
+
tegra_dc_assign_shared_plane(dc, p);
tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
@@ -491,18 +579,52 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
- /* bypass scaling */
+ /* scaling */
+ min_width = min(new_state->src_w >> 16, new_state->crtc_w);
+
+ value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
+
+ if (min_width < MAX_PIXELS_5TAP444(value)) {
+ value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
+ } else {
+ value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
+
+ if (min_width < MAX_PIXELS_2TAP444(value))
+ value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
+ else
+ dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
+ }
+
value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
- value = INPUT_SCALER_VBYPASS | INPUT_SCALER_HBYPASS;
- tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
+ if (new_state->src_w != new_state->crtc_w << 16) {
+ fixed20_12 width = dfixed_init(new_state->src_w >> 16);
+ u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
+ u32 init = (1 << (NFB - 1)) + (incr >> 1);
+
+ tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
+ tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
+ } else {
+ bypass |= INPUT_SCALER_HBYPASS;
+ }
+
+ if (new_state->src_h != new_state->crtc_h << 16) {
+ fixed20_12 height = dfixed_init(new_state->src_h >> 16);
+ u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
+ u32 init = (1 << (NFB - 1)) + (incr >> 1);
+
+ tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
+ tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
+ } else {
+ bypass |= INPUT_SCALER_VBYPASS;
+ }
+
+ tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
/* disable compression */
tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
- base = tegra_plane_state->iova[0] + fb->offsets[0];
-
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
/*
* Physical address bit 39 in Tegra194 is used as a switch for special
@@ -510,9 +632,12 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
* dGPU sector layout.
*/
if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
- base |= BIT(39);
+ addr_flag = BIT_ULL(39);
#endif
+ base = tegra_plane_state->iova[0] + fb->offsets[0];
+ base |= addr_flag;
+
tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
@@ -526,7 +651,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
value = WIN_ENABLE | COLOR_EXPAND;
tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
- value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
+ value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
@@ -535,7 +660,44 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
value = PITCH(fb->pitches[0]);
tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
- value = CLAMP_BEFORE_BLEND | DEGAMMA_SRGB | INPUT_RANGE_FULL;
+ if (yuv && planar) {
+ base = tegra_plane_state->iova[1] + fb->offsets[1];
+ base |= addr_flag;
+
+ tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
+ tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
+
+ base = tegra_plane_state->iova[2] + fb->offsets[2];
+ base |= addr_flag;
+
+ tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
+ tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
+
+ value = PITCH_U(fb->pitches[2]) | PITCH_V(fb->pitches[2]);
+ tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
+ } else {
+ tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
+ tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
+ tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
+ tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
+ tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
+ }
+
+ value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
+
+ if (yuv) {
+ if (bpc < 12)
+ value |= DEGAMMA_YUV8_10;
+ else
+ value |= DEGAMMA_YUV12;
+
+ /* XXX parameterize */
+ value |= COLOR_SPACE_YUV_2020;
+ } else {
+ if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
+ value |= DEGAMMA_SRGB;
+ }
+
tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
value = OFFSET_X(new_state->src_y >> 16) |
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 2e11b4b1f702..2e65b4075ce6 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -375,7 +375,20 @@ int tegra_plane_format(u32 fourcc, u32 *format, u32 *swap)
return 0;
}
-bool tegra_plane_format_is_yuv(unsigned int format, bool *planar)
+bool tegra_plane_format_is_indexed(unsigned int format)
+{
+ switch (format) {
+ case WIN_COLOR_DEPTH_P1:
+ case WIN_COLOR_DEPTH_P2:
+ case WIN_COLOR_DEPTH_P4:
+ case WIN_COLOR_DEPTH_P8:
+ return true;
+ }
+
+ return false;
+}
+
+bool tegra_plane_format_is_yuv(unsigned int format, bool *planar, unsigned int *bpc)
{
switch (format) {
case WIN_COLOR_DEPTH_YCbCr422:
@@ -383,6 +396,9 @@ bool tegra_plane_format_is_yuv(unsigned int format, bool *planar)
if (planar)
*planar = false;
+ if (bpc)
+ *bpc = 8;
+
return true;
case WIN_COLOR_DEPTH_YCbCr420P:
@@ -396,6 +412,9 @@ bool tegra_plane_format_is_yuv(unsigned int format, bool *planar)
if (planar)
*planar = true;
+ if (bpc)
+ *bpc = 8;
+
return true;
}
@@ -421,7 +440,7 @@ static bool __drm_format_has_alpha(u32 format)
static int tegra_plane_format_get_alpha(unsigned int opaque,
unsigned int *alpha)
{
- if (tegra_plane_format_is_yuv(opaque, NULL)) {
+ if (tegra_plane_format_is_yuv(opaque, NULL, NULL)) {
*alpha = opaque;
return 0;
}
diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h
index c691dd79b27b..1785c1559c0c 100644
--- a/drivers/gpu/drm/tegra/plane.h
+++ b/drivers/gpu/drm/tegra/plane.h
@@ -74,7 +74,8 @@ int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state);
int tegra_plane_format(u32 fourcc, u32 *format, u32 *swap);
-bool tegra_plane_format_is_yuv(unsigned int format, bool *planar);
+bool tegra_plane_format_is_indexed(unsigned int format);
+bool tegra_plane_format_is_yuv(unsigned int format, bool *planar, unsigned int *bpc);
int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
struct tegra_plane_state *state);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 7b88261f57bb..0ea320c1092b 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -3125,21 +3125,21 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0) {
dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
err);
- return err;
+ goto rpm_put;
}
err = reset_control_assert(sor->rst);
if (err < 0) {
dev_err(sor->dev, "failed to assert SOR reset: %d\n",
err);
- return err;
+ goto rpm_put;
}
}
err = clk_prepare_enable(sor->clk);
if (err < 0) {
dev_err(sor->dev, "failed to enable clock: %d\n", err);
- return err;
+ goto rpm_put;
}
usleep_range(1000, 3000);
@@ -3150,7 +3150,7 @@ static int tegra_sor_init(struct host1x_client *client)
dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
err);
clk_disable_unprepare(sor->clk);
- return err;
+ goto rpm_put;
}
reset_control_release(sor->rst);
@@ -3171,6 +3171,12 @@ static int tegra_sor_init(struct host1x_client *client)
}
return 0;
+
+rpm_put:
+ if (sor->rst)
+ pm_runtime_put(sor->dev);
+
+ return err;
}
static int tegra_sor_exit(struct host1x_client *client)
@@ -3739,12 +3745,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
if (!sor->aux)
return -EPROBE_DEFER;
- if (get_device(&sor->aux->ddc.dev)) {
- if (try_module_get(sor->aux->ddc.owner))
- sor->output.ddc = &sor->aux->ddc;
- else
- put_device(&sor->aux->ddc.dev);
- }
+ if (get_device(sor->aux->dev))
+ sor->output.ddc = &sor->aux->ddc;
}
if (!sor->aux) {
@@ -3772,12 +3774,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
err = tegra_sor_parse_dt(sor);
if (err < 0)
- return err;
+ goto put_aux;
err = tegra_output_probe(&sor->output);
- if (err < 0)
- return dev_err_probe(&pdev->dev, err,
- "failed to probe output\n");
+ if (err < 0) {
+ dev_err_probe(&pdev->dev, err, "failed to probe output\n");
+ goto put_aux;
+ }
if (sor->ops && sor->ops->probe) {
err = sor->ops->probe(sor);
@@ -3916,17 +3919,10 @@ static int tegra_sor_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sor);
pm_runtime_enable(&pdev->dev);
- INIT_LIST_HEAD(&sor->client.list);
+ host1x_client_init(&sor->client);
sor->client.ops = &sor_client_ops;
sor->client.dev = &pdev->dev;
- err = host1x_client_register(&sor->client);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to register host1x client: %d\n",
- err);
- goto rpm_disable;
- }
-
/*
* On Tegra210 and earlier, provide our own implementation for the
* pad output clock.
@@ -3938,13 +3934,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
sor->index);
if (!name) {
err = -ENOMEM;
- goto unregister;
+ goto uninit;
}
err = host1x_client_resume(&sor->client);
if (err < 0) {
dev_err(sor->dev, "failed to resume: %d\n", err);
- goto unregister;
+ goto uninit;
}
sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
@@ -3955,17 +3951,30 @@ static int tegra_sor_probe(struct platform_device *pdev)
err = PTR_ERR(sor->clk_pad);
dev_err(sor->dev, "failed to register SOR pad clock: %d\n",
err);
- goto unregister;
+ goto uninit;
+ }
+
+ err = __host1x_client_register(&sor->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ goto uninit;
}
return 0;
-unregister:
- host1x_client_unregister(&sor->client);
-rpm_disable:
+uninit:
+ host1x_client_exit(&sor->client);
pm_runtime_disable(&pdev->dev);
remove:
+ if (sor->aux)
+ sor->output.ddc = NULL;
+
tegra_output_remove(&sor->output);
+put_aux:
+ if (sor->aux)
+ put_device(sor->aux->dev);
+
return err;
}
@@ -3983,6 +3992,11 @@ static int tegra_sor_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
+ if (sor->aux) {
+ put_device(sor->aux->dev);
+ sor->output.ddc = NULL;
+ }
+
tegra_output_remove(&sor->output);
return 0;
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 72aea1cc0cfa..c9d55a9a3180 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -148,8 +148,6 @@ static int vic_boot(struct vic *vic)
hdr = vic->falcon.firmware.virt;
fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
- falcon_execute_method(&vic->falcon, VIC_SET_APPLICATION_ID, 1);
-
/* Old VIC firmware needs kernel help with setting up FCE microcode. */
if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
hdr = vic->falcon.firmware.virt +
diff --git a/drivers/gpu/drm/tegra/vic.h b/drivers/gpu/drm/tegra/vic.h
index be898bee6a57..acf35aac948b 100644
--- a/drivers/gpu/drm/tegra/vic.h
+++ b/drivers/gpu/drm/tegra/vic.h
@@ -8,7 +8,6 @@
/* VIC methods */
-#define VIC_SET_APPLICATION_ID 0x00000200
#define VIC_SET_FCE_UCODE_SIZE 0x0000071C
#define VIC_SET_FCE_UCODE_OFFSET 0x0000072C
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5a2dc712c632..20fbfd922266 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1147,7 +1147,10 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
return -EBUSY;
- if (!ttm_bo_get_unless_zero(bo)) {
+ if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
+ bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
+ bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED ||
+ !ttm_bo_get_unless_zero(bo)) {
if (locked)
dma_resv_unlock(bo->base.resv);
return -EBUSY;
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 460953dcad11..5f31acec3ad7 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -143,14 +143,8 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
list_for_each_entry(bo, &man->lru[j], lru) {
- uint32_t num_pages;
+ uint32_t num_pages = PFN_UP(bo->base.size);
- if (!bo->ttm ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)
- continue;
-
- num_pages = bo->ttm->num_pages;
ret = ttm_bo_swapout(bo, ctx, gfp_flags);
/* ttm_bo_swapout has dropped the lru_lock */
if (!ret)
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 03395386e8a7..f4b08a8705b3 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -181,6 +181,9 @@ int ttm_range_man_fini(struct ttm_device *bdev,
struct drm_mm *mm = &rman->mm;
int ret;
+ if (!man)
+ return 0;
+
ttm_resource_manager_set_used(man, false);
ret = ttm_resource_manager_evict_all(bdev, man);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index cc68a7979196..d4833c878ca9 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -167,6 +167,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
bool connected = false;
+ WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
+
if (vc4_hdmi->hpd_gpio &&
gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
connected = true;
@@ -187,10 +189,12 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
}
}
+ pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_connected;
}
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+ pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_disconnected;
}
@@ -631,7 +635,6 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
vc4_hdmi->variant->phy_disable(vc4_hdmi);
clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
@@ -944,13 +947,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
return;
}
- ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
- if (ret) {
- DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->pixel_clock);
- return;
- }
-
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (pixel_rate > 297000000)
@@ -963,7 +959,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
if (ret) {
DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
@@ -971,7 +966,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) {
DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
@@ -2117,6 +2111,29 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0;
}
+#ifdef CONFIG_PM
+static int vc4_hdmi_runtime_suspend(struct device *dev)
+{
+ struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+
+ return 0;
+}
+
+static int vc4_hdmi_runtime_resume(struct device *dev)
+{
+ struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+#endif
+
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@@ -2371,11 +2388,18 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
{}
};
+static const struct dev_pm_ops vc4_hdmi_pm_ops = {
+ SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
+ vc4_hdmi_runtime_resume,
+ NULL)
+};
+
struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe,
.remove = vc4_hdmi_dev_remove,
.driver = {
.name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match,
+ .pm = &vc4_hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index f2fb047df808..f0b3e4cf5bce 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -372,7 +372,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
if (!old_hvs_state->fifo_state[channel].in_use)
continue;
- ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[i].pending_commit);
+ ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
if (ret)
drm_err(dev, "Timed out waiting for commit\n");
}
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 46f69c532b6b..218e3718fd68 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -736,6 +736,29 @@ void host1x_driver_unregister(struct host1x_driver *driver)
EXPORT_SYMBOL(host1x_driver_unregister);
/**
+ * __host1x_client_init() - initialize a host1x client
+ * @client: host1x client
+ * @key: lock class key for the client-specific mutex
+ */
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key)
+{
+ INIT_LIST_HEAD(&client->list);
+ __mutex_init(&client->lock, "host1x client lock", key);
+ client->usecount = 0;
+}
+EXPORT_SYMBOL(__host1x_client_init);
+
+/**
+ * host1x_client_exit() - uninitialize a host1x client
+ * @client: host1x client
+ */
+void host1x_client_exit(struct host1x_client *client)
+{
+ mutex_destroy(&client->lock);
+}
+EXPORT_SYMBOL(host1x_client_exit);
+
+/**
* __host1x_client_register() - register a host1x client
* @client: host1x client
* @key: lock class key for the client-specific mutex
@@ -747,16 +770,11 @@ EXPORT_SYMBOL(host1x_driver_unregister);
* device and call host1x_device_init(), which will in turn call each client's
* &host1x_client_ops.init implementation.
*/
-int __host1x_client_register(struct host1x_client *client,
- struct lock_class_key *key)
+int __host1x_client_register(struct host1x_client *client)
{
struct host1x *host1x;
int err;
- INIT_LIST_HEAD(&client->list);
- __mutex_init(&client->lock, "host1x client lock", key);
- client->usecount = 0;
-
mutex_lock(&devices_lock);
list_for_each_entry(host1x, &devices, list) {