diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
18 files changed, 4948 insertions, 2905 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 718e123a3230..90fb0f3cdb6f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -25,7 +25,13 @@ -AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o +AMDGPUDM = \ + amdgpu_dm.o \ + amdgpu_dm_plane.o \ + amdgpu_dm_crtc.o \ + amdgpu_dm_irq.o \ + amdgpu_dm_mst_types.o \ + amdgpu_dm_color.o ifdef CONFIG_DRM_AMD_DC_DCN AMDGPUDM += dc_fpu.o diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 122dae1a1813..509739d83b5a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -46,9 +46,11 @@ #include "amdgpu_ucode.h" #include "atom.h" #include "amdgpu_dm.h" +#include "amdgpu_dm_plane.h" +#include "amdgpu_dm_crtc.h" #ifdef CONFIG_DRM_AMD_DC_HDCP #include "amdgpu_dm_hdcp.h" -#include <drm/drm_hdcp.h> +#include <drm/display/drm_hdcp_helper.h> #endif #include "amdgpu_pm.h" #include "amdgpu_atombios.h" @@ -72,27 +74,34 @@ #include <linux/pci.h> #include <linux/firmware.h> #include <linux/component.h> +#include <linux/dmi.h> +#include <drm/display/drm_dp_mst_helper.h> +#include <drm/display/drm_hdmi_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_uapi.h> #include <drm/drm_atomic_helper.h> -#include <drm/drm_dp_mst_helper.h> +#include <drm/drm_blend.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_edid.h> #include <drm/drm_vblank.h> #include <drm/drm_audio_component.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_plane_helper.h> + +#include <acpi/video.h> -#if defined(CONFIG_DRM_AMD_DC_DCN) #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" #include "dcn/dcn_1_0_offset.h" #include "dcn/dcn_1_0_sh_mask.h" #include "soc15_hw_ip.h" +#include "soc15_common.h" #include "vega10_ip_offset.h" -#include "soc15_common.h" -#endif +#include "gc/gc_11_0_0_offset.h" +#include "gc/gc_11_0_0_sh_mask.h" #include "modules/inc/mod_freesync.h" #include "modules/power/power_helpers.h" @@ -114,6 +123,17 @@ MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); +#define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB); +#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); +#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); + +#define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB); +#define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB); #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); @@ -189,13 +209,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); /* removes and deallocates the drm structures, created by the above function */ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); -static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, - struct drm_plane *plane, - unsigned long possible_crtcs, - const struct dc_plane_cap *plane_cap); -static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, - struct drm_plane *plane, - uint32_t link_index); static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *amdgpu_dm_connector, uint32_t link_index, @@ -211,12 +224,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); static int amdgpu_dm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); -static void handle_cursor_update(struct drm_plane *plane, - struct drm_plane_state *old_plane_state); - -static const struct drm_format_info * -amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); - static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); static void handle_hpd_rx_irq(void *param); @@ -330,20 +337,6 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev, return NULL; } -static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc) -{ - return acrtc->dm_irq_params.freesync_config.state == - VRR_STATE_ACTIVE_VARIABLE || - acrtc->dm_irq_params.freesync_config.state == - VRR_STATE_ACTIVE_FIXED; -} - -static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) -{ - return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE || - dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; -} - static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, struct dm_crtc_state *new_state) { @@ -497,7 +490,7 @@ static void dm_vupdate_high_irq(void *interrupt_params) * if a pageflip happened inside front-porch. */ if (vrr_active) { - drm_crtc_handle_vblank(&acrtc->base); + dm_crtc_handle_vblank(acrtc); /* BTR processing for pre-DCE12 ASICs */ if (acrtc->dm_irq_params.stream && @@ -549,7 +542,7 @@ static void dm_crtc_high_irq(void *interrupt_params) * to dm_vupdate_high_irq after end of front-porch. */ if (!vrr_active) - drm_crtc_handle_vblank(&acrtc->base); + dm_crtc_handle_vblank(acrtc); /** * Following stuff must happen at start of vblank, for crc @@ -599,7 +592,6 @@ static void dm_crtc_high_irq(void *interrupt_params) spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } -#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) /** * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for @@ -624,7 +616,7 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ /** - * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command. + * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. * @adev: amdgpu_device pointer * @notify: dmub notification structure * @@ -632,7 +624,8 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) * Copies dmub notification to DM which is to be read by AUX command. * issuing thread and also signals the event to wake up the thread. */ -void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify) +static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, + struct dmub_notification *notify) { if (adev->dm.dmub_notify) memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); @@ -648,7 +641,8 @@ void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notific * Dmub Hpd interrupt processing callback. Gets displayindex through the * ink index and calls helper to do the processing. */ -void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify) +static void dmub_hpd_callback(struct amdgpu_device *adev, + struct dmub_notification *notify) { struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *hpd_aconnector = NULL; @@ -656,7 +650,7 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not struct drm_connector_list_iter iter; struct dc_link *link; uint8_t link_index = 0; - struct drm_device *dev = adev->dm.ddev; + struct drm_device *dev; if (adev == NULL) return; @@ -673,6 +667,7 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not link_index = notify->link_index; link = adev->dm.dc->links[link_index]; + dev = adev->dm.ddev; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { @@ -705,8 +700,10 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not * to dmub interrupt handling thread * Return: true if successfully registered, false if there is existing registration */ -bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type, -dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload) +static bool register_dmub_notify_callback(struct amdgpu_device *adev, + enum dmub_notification_type type, + dmub_notify_interrupt_callback_t callback, + bool dmub_int_thread_offload) { if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { adev->dm.dmub_callback[type] = callback; @@ -762,7 +759,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) do { dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); - if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) { + if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { DRM_ERROR("DM: notify type %d invalid!", notify.type); continue; } @@ -790,8 +787,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) plink = adev->dm.dc->links[notify.link_index]; if (plink) { plink->hpd_status = - notify.hpd_status == - DP_HPD_PLUG ? true : false; + notify.hpd_status == DP_HPD_PLUG; } } queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); @@ -819,7 +815,6 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) if (count > DMUB_TRACE_MAX_READ) DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); } -#endif /* CONFIG_DRM_AMD_DC_DCN */ static int dm_set_clockgating_state(void *handle, enum amd_clockgating_state state) @@ -1023,7 +1018,6 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) const unsigned char *fw_inst_const, *fw_bss_data; uint32_t i, fw_inst_const_size, fw_bss_data_size; bool has_hw_support; - struct dc *dc = adev->dm.dc; if (!dmub_srv) /* DMUB isn't supported on the ASIC. */ @@ -1051,6 +1045,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) return 0; } + /* Reset DMCUB if it was previously running - before we overwrite its memory. */ + status = dmub_srv_hw_reset(dmub_srv); + if (status != DMUB_STATUS_OK) + DRM_WARN("Error resetting DMUB HW: %d\n", status); + hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; fw_inst_const = dmub_fw->data + @@ -1110,14 +1109,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) for (i = 0; i < fb_info->num_fb; ++i) hw_params.fb[i] = &fb_info->fb[i]; - switch (adev->asic_type) { - case CHIP_YELLOW_CARP: - if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) { - hw_params.dpia_supported = true; -#if defined(CONFIG_DRM_AMD_DC_DCN) - hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia; -#endif - } + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): + hw_params.dpia_supported = true; + hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; break; default: break; @@ -1153,7 +1149,32 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) return 0; } -#if defined(CONFIG_DRM_AMD_DC_DCN) +static void dm_dmub_hw_resume(struct amdgpu_device *adev) +{ + struct dmub_srv *dmub_srv = adev->dm.dmub_srv; + enum dmub_status status; + bool init; + + if (!dmub_srv) { + /* DMUB isn't supported on the ASIC. */ + return; + } + + status = dmub_srv_is_hw_init(dmub_srv, &init); + if (status != DMUB_STATUS_OK) + DRM_WARN("DMUB hardware init check failed: %d\n", status); + + if (status == DMUB_STATUS_OK && init) { + /* Wait for firmware load to finish. */ + status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); + if (status != DMUB_STATUS_OK) + DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); + } else { + /* Perform the full hardware initialization. */ + dm_dmub_hw_init(adev); + } +} + static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) { uint64_t pt_base; @@ -1208,45 +1229,6 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ pa_config->is_hvm_enabled = 0; } -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN) -static void vblank_control_worker(struct work_struct *work) -{ - struct vblank_control_work *vblank_work = - container_of(work, struct vblank_control_work, work); - struct amdgpu_display_manager *dm = vblank_work->dm; - - mutex_lock(&dm->dc_lock); - - if (vblank_work->enable) - dm->active_vblank_irq_count++; - else if(dm->active_vblank_irq_count) - dm->active_vblank_irq_count--; - - dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0); - - DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); - - /* Control PSR based on vblank requirements from OS */ - if (vblank_work->stream && vblank_work->stream->link) { - if (vblank_work->enable) { - if (vblank_work->stream->link->psr_settings.psr_allow_active) - amdgpu_dm_psr_disable(vblank_work->stream); - } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled && - !vblank_work->stream->link->psr_settings.psr_allow_active && - vblank_work->acrtc->dm_irq_params.allow_psr_entry) { - amdgpu_dm_psr_enable(vblank_work->stream); - } - } - - mutex_unlock(&dm->dc_lock); - - dc_stream_release(vblank_work->stream); - - kfree(vblank_work); -} - -#endif static void dm_handle_hpd_rx_offload_work(struct work_struct *work) { @@ -1315,13 +1297,21 @@ static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct if (hpd_rx_offload_wq[i].wq == NULL) { DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); - return NULL; + goto out_err; } spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); } return hpd_rx_offload_wq; + +out_err: + for (i = 0; i < max_caps; i++) { + if (hpd_rx_offload_wq[i].wq) + destroy_workqueue(hpd_rx_offload_wq[i].wq); + } + kfree(hpd_rx_offload_wq); + return NULL; } struct amdgpu_stutter_quirk { @@ -1355,6 +1345,41 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev) return false; } +static const struct dmi_system_id hpd_disconnect_quirk_table[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), + }, + }, + {} +}; + +static void retrieve_dmi_info(struct amdgpu_display_manager *dm) +{ + const struct dmi_system_id *dmi_id; + + dm->aux_hpd_discon_quirk = false; + + dmi_id = dmi_first_match(hpd_disconnect_quirk_table); + if (dmi_id) { + dm->aux_hpd_discon_quirk = true; + DRM_INFO("aux_hpd_discon_quirk attached\n"); + } +} + static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; @@ -1374,9 +1399,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) mutex_init(&adev->dm.dc_lock); mutex_init(&adev->dm.audio_lock); -#if defined(CONFIG_DRM_AMD_DC_DCN) spin_lock_init(&adev->dm.vblank_lock); -#endif if(amdgpu_dm_irq_init(adev)) { DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); @@ -1407,6 +1430,25 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(2, 1, 0): + switch (adev->dm.dmcub_fw_version) { + case 0: /* development */ + case 0x1: /* linux-firmware.git hash 6d9f399 */ + case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ + init_data.flags.disable_dmcu = false; + break; + default: + init_data.flags.disable_dmcu = true; + } + break; + case IP_VERSION(2, 0, 3): + init_data.flags.disable_dmcu = true; + break; + default: + break; + } + switch (adev->asic_type) { case CHIP_CARRIZO: case CHIP_STONEY: @@ -1414,34 +1456,30 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) break; default: switch (adev->ip_versions[DCE_HWIP][0]) { - case IP_VERSION(2, 1, 0): - init_data.flags.gpu_vm_support = true; - switch (adev->dm.dmcub_fw_version) { - case 0: /* development */ - case 0x1: /* linux-firmware.git hash 6d9f399 */ - case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ - init_data.flags.disable_dmcu = false; - break; - default: - init_data.flags.disable_dmcu = true; - } - break; case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): + /* enable S/G on PCO and RV2 */ + if ((adev->apu_flags & AMD_APU_IS_RAVEN2) || + (adev->apu_flags & AMD_APU_IS_PICASSO)) + init_data.flags.gpu_vm_support = true; + break; + case IP_VERSION(2, 1, 0): case IP_VERSION(3, 0, 1): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): init_data.flags.gpu_vm_support = true; break; - case IP_VERSION(2, 0, 3): - init_data.flags.disable_dmcu = true; - break; default: break; } break; } + if (init_data.flags.gpu_vm_support) + adev->mode_info.gpu_vm_support = true; + if (amdgpu_dc_feature_mask & DC_FBC_MASK) init_data.flags.fbc_support = true; @@ -1454,9 +1492,28 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) init_data.flags.edp_no_power_sequencing = true; - init_data.flags.power_down_display_on_boot = true; + if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) + init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; + if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) + init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; + + init_data.flags.seamless_boot_edp_requested = false; + + if (check_seamless_boot_capability(adev)) { + init_data.flags.seamless_boot_edp_requested = true; + init_data.flags.allow_seamless_boot_optimization = true; + DRM_INFO("Seamless boot condition check passed\n"); + } + + init_data.flags.enable_mipi_converter_optimization = true; + + init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; + init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; INIT_LIST_HEAD(&adev->dm.da_list); + + retrieve_dmi_info(&adev->dm); + /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -1480,12 +1537,21 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) adev->dm.dc->debug.disable_stutter = true; - if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { adev->dm.dc->debug.disable_dsc = true; + } if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) adev->dm.dc->debug.disable_clock_gate = true; + if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) + adev->dm.dc->debug.force_subvp_mclk_switch = true; + + adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; + + /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ + adev->dm.dc->debug.ignore_cable_id = true; + r = dm_dmub_hw_init(adev); if (r) { DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -1500,7 +1566,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } -#if defined(CONFIG_DRM_AMD_DC_DCN) if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { struct dc_phy_addr_space_config pa_config; @@ -1509,7 +1574,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) // Call the DC init_memory func dc_setup_system_context(adev->dm.dc, &pa_config); } -#endif adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { @@ -1521,14 +1585,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) amdgpu_dm_init_color_mod(); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (adev->dm.dc->caps.max_links > 0) { adev->dm.vblank_control_workqueue = create_singlethread_workqueue("dm_vblank_control_workqueue"); if (!adev->dm.vblank_control_workqueue) DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); } -#endif #ifdef CONFIG_DRM_AMD_DC_HDCP if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { @@ -1545,7 +1607,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); #endif - if (dc_enable_dmub_notifications(adev->dm.dc)) { + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { init_completion(&adev->dm.dmub_aux_transfer_done); adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); if (!adev->dm.dmub_notify) { @@ -1560,7 +1622,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) } amdgpu_dm_outbox_init(adev); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, dmub_aux_setconfig_callback, false)) { DRM_ERROR("amdgpu: fail to register dmub aux callback"); @@ -1574,7 +1635,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_ERROR("amdgpu: fail to register dmub hpd callback"); goto error; } -#endif /* CONFIG_DRM_AMD_DC_DCN */ } if (amdgpu_dm_initialize_drm_device(adev)) { @@ -1583,6 +1643,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } + /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. + * It is expected that DMUB will resend any pending notifications at this point, for + * example HPD from DPIA. + */ + if (dc_is_dmub_outbox_supported(adev->dm.dc)) + dc_enable_dmub_outbox(adev->dm.dc); + /* create fake encoders for MST */ dm_dp_create_fake_mst_encoders(adev); @@ -1621,12 +1688,10 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) { int i; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (adev->dm.vblank_control_workqueue) { destroy_workqueue(adev->dm.vblank_control_workqueue); adev->dm.vblank_control_workqueue = NULL; } -#endif for (i = 0; i < adev->dm.display_indexes_num; i++) { drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); @@ -1754,6 +1819,11 @@ static int load_dmcu_fw(struct amdgpu_device *adev) case IP_VERSION(3, 0, 1): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): return 0; default: break; @@ -1869,7 +1939,26 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; break; - + case IP_VERSION(3, 1, 4): + dmub_asic = DMUB_ASIC_DCN314; + fw_name_dmub = FIRMWARE_DCN_314_DMUB; + break; + case IP_VERSION(3, 1, 5): + dmub_asic = DMUB_ASIC_DCN315; + fw_name_dmub = FIRMWARE_DCN_315_DMUB; + break; + case IP_VERSION(3, 1, 6): + dmub_asic = DMUB_ASIC_DCN316; + fw_name_dmub = FIRMWARE_DCN316_DMUB; + break; + case IP_VERSION(3, 2, 0): + dmub_asic = DMUB_ASIC_DCN32; + fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; + break; + case IP_VERSION(3, 2, 1): + dmub_asic = DMUB_ASIC_DCN321; + fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; + break; default: /* ASIC doesn't support DMUB. */ return 0; @@ -2116,7 +2205,8 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) } else { ret = drm_dp_mst_topology_mgr_resume(mgr, true); if (ret < 0) { - drm_dp_mst_topology_mgr_set_mst(mgr, false); + dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, + aconnector->dc_link); need_hotplug = true; } } @@ -2129,12 +2219,8 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) { - struct smu_context *smu = &adev->smu; int ret = 0; - if (!is_support_sw_smu(adev)) - return 0; - /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends * on window driver dc implementation. * For Navi1x, clock settings of dcn watermarks are fixed. the settings @@ -2173,7 +2259,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) return 0; } - ret = smu_write_watermarks_table(smu); + ret = amdgpu_dpm_write_watermarks_table(adev); if (ret) { DRM_ERROR("Failed to update WMTABLE!\n"); return ret; @@ -2232,9 +2318,6 @@ static int dm_hw_fini(void *handle) } -static int dm_enable_vblank(struct drm_crtc *crtc); -static void dm_disable_vblank(struct drm_crtc *crtc); - static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, struct dc_state *state, bool enable) { @@ -2304,14 +2387,6 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) goto fail; } - - res = dc_validate_global_state(dc, context, false); - - if (res != DC_OK) { - DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res); - goto fail; - } - res = dc_commit_state(dc, context); fail: @@ -2340,9 +2415,7 @@ static int dm_suspend(void *handle) if (amdgpu_in_reset(adev)) { mutex_lock(&dm->dc_lock); -#if defined(CONFIG_DRM_AMD_DC_DCN) dc_allow_idle_optimizations(adev->dm.dc, false); -#endif dm->cached_dc_state = dc_copy_state(dm->dc->current_state); @@ -2371,7 +2444,7 @@ static int dm_suspend(void *handle) return 0; } -static struct amdgpu_dm_connector * +struct amdgpu_dm_connector * amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, struct drm_crtc *crtc) { @@ -2512,34 +2585,6 @@ cleanup: return; } -static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state) -{ - struct dc_stream_state *stream_state; - struct amdgpu_dm_connector *aconnector = link->priv; - struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); - struct dc_stream_update stream_update; - bool dpms_off = true; - - memset(&stream_update, 0, sizeof(stream_update)); - stream_update.dpms_off = &dpms_off; - - mutex_lock(&adev->dm.dc_lock); - stream_state = dc_stream_find_from_link(link); - - if (stream_state == NULL) { - DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n"); - mutex_unlock(&adev->dm.dc_lock); - return; - } - - stream_update.stream = stream_state; - acrtc_state->force_dpms_off = true; - dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0, - stream_state, &stream_update, - stream_state->ctx->dc->current_state); - mutex_unlock(&adev->dm.dc_lock); -} - static int dm_resume(void *handle) { struct amdgpu_device *adev = handle; @@ -2571,13 +2616,13 @@ static int dm_resume(void *handle) * before the 0 streams commit. * * DC expects that link encoder assignments are *not* valid - * when committing a state, so as a workaround it needs to be - * cleared here. + * when committing a state, so as a workaround we can copy + * off of the current state. + * + * We lose the previous assignments, but we had already + * commit 0 streams anyway. */ - link_enc_cfg_init(dm->dc, dc_state); - - if (dc_enable_dmub_notifications(adev->dm.dc)) - amdgpu_dm_outbox_init(adev); + link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); r = dm_dmub_hw_init(adev); if (r) @@ -2595,15 +2640,11 @@ static int dm_resume(void *handle) = 0xffffffff; } } -#if defined(CONFIG_DRM_AMD_DC_DCN) - /* - * Resource allocation happens for link encoders for newer ASIC in - * dc_validate_global_state, so we need to revalidate it. - * - * This shouldn't fail (it passed once before), so warn if it does. - */ - WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK); -#endif + + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + amdgpu_dm_outbox_init(adev); + dc_enable_dmub_outbox(adev->dm.dc); + } WARN_ON(!dc_commit_state(dm->dc, dc_state)); @@ -2626,14 +2667,14 @@ static int dm_resume(void *handle) /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ dc_resource_state_construct(dm->dc, dm_state->context); + /* Before powering on DC we need to re-initialize DMUB. */ + dm_dmub_hw_resume(adev); + /* Re-enable outbox interrupts for DPIA. */ - if (dc_enable_dmub_notifications(adev->dm.dc)) + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { amdgpu_dm_outbox_init(adev); - - /* Before powering on DC we need to re-initialize DMUB. */ - r = dm_dmub_hw_init(adev); - if (r) - DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + dc_enable_dmub_outbox(adev->dm.dc); + } /* power on hardware */ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); @@ -2659,17 +2700,21 @@ static int dm_resume(void *handle) * this is the case when traversing through already created * MST connectors, should be skipped */ - if (aconnector->mst_port) + if (aconnector->dc_link && + aconnector->dc_link->type == dc_connection_mst_branch) continue; mutex_lock(&aconnector->hpd_lock); if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); - if (aconnector->base.force && new_connection_type == dc_connection_none) + if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(aconnector->dc_link); - else + } else { + mutex_lock(&dm->dc_lock); dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + mutex_unlock(&dm->dc_lock); + } if (aconnector->fake_enable && aconnector->dc_link->local_sink) aconnector->fake_enable = false; @@ -2774,20 +2819,18 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { }; static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { - .atomic_commit_tail = amdgpu_dm_atomic_commit_tail + .atomic_commit_tail = amdgpu_dm_atomic_commit_tail, + .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, }; static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) { - u32 max_cll, min_cll, max, min, q, r; struct amdgpu_dm_backlight_caps *caps; struct amdgpu_display_manager *dm; struct drm_connector *conn_base; struct amdgpu_device *adev; struct dc_link *link = NULL; - static const u8 pre_computed_values[] = { - 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69, - 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98}; + struct drm_luminance_range_info *luminance_range; int i; if (!aconnector || !aconnector->dc_link) @@ -2809,8 +2852,6 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps = &dm->backlight_caps[i]; caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; caps->aux_support = false; - max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll; - min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll; if (caps->ext_caps->bits.oled == 1 /*|| caps->ext_caps->bits.sdr_aux_backlight_control == 1 || @@ -2822,31 +2863,9 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) else if (amdgpu_backlight == 1) caps->aux_support = true; - /* From the specification (CTA-861-G), for calculating the maximum - * luminance we need to use: - * Luminance = 50*2**(CV/32) - * Where CV is a one-byte value. - * For calculating this expression we may need float point precision; - * to avoid this complexity level, we take advantage that CV is divided - * by a constant. From the Euclids division algorithm, we know that CV - * can be written as: CV = 32*q + r. Next, we replace CV in the - * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just - * need to pre-compute the value of r/32. For pre-computing the values - * We just used the following Ruby line: - * (0...32).each {|cv| puts (50*2**(cv/32.0)).round} - * The results of the above expressions can be verified at - * pre_computed_values. - */ - q = max_cll >> 5; - r = max_cll % 32; - max = (1 << q) * pre_computed_values[r]; - - // min luminance: maxLum * (CV/255)^2 / 100 - q = DIV_ROUND_CLOSEST(min_cll, 255); - min = max * DIV_ROUND_CLOSEST((q * q), 100); - - caps->aux_max_input_signal = max; - caps->aux_min_input_signal = min; + luminance_range = &conn_base->display_info.luminance_range; + caps->aux_min_input_signal = luminance_range->min_luminance; + caps->aux_max_input_signal = luminance_range->max_luminance; } void amdgpu_dm_update_connector_after_detect( @@ -2960,13 +2979,12 @@ void amdgpu_dm_update_connector_after_detect( aconnector->edid = (struct edid *)sink->dc_edid.raw_edid; - drm_connector_update_edid_property(connector, - aconnector->edid); if (aconnector->dc_link->aux_mode) drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, aconnector->edid); } + drm_connector_update_edid_property(connector, aconnector->edid); amdgpu_dm_update_freesync_caps(connector, aconnector->edid); update_connector_ext_caps(aconnector); } else { @@ -2998,16 +3016,14 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) struct drm_device *dev = connector->dev; enum dc_connection_type new_connection_type = dc_connection_none; struct amdgpu_device *adev = drm_to_adev(dev); +#ifdef CONFIG_DRM_AMD_DC_HDCP struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); - struct dm_crtc_state *dm_crtc_state = NULL; +#endif + bool ret = false; if (adev->dm.disable_hpd_irq) return; - if (dm_con_state->base.state && dm_con_state->base.crtc) - dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state( - dm_con_state->base.state, - dm_con_state->base.crtc)); /* * In case of failure or MST no need to update connector status or notify the OS * since (for MST case) MST does this in its own context. @@ -3034,22 +3050,21 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) drm_modeset_unlock_all(dev); if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) - drm_kms_helper_hotplug_event(dev); - - } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { - if (new_connection_type == dc_connection_none && - aconnector->dc_link->type == dc_connection_none && - dm_crtc_state) - dm_set_dpms_off(aconnector->dc_link, dm_crtc_state); - - amdgpu_dm_update_connector_after_detect(aconnector); + drm_kms_helper_connector_hotplug_event(connector); + } else { + mutex_lock(&adev->dm.dc_lock); + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + mutex_unlock(&adev->dm.dc_lock); + if (ret) { + amdgpu_dm_update_connector_after_detect(aconnector); - drm_modeset_lock_all(dev); - dm_restore_drm_connector_state(dev, connector); - drm_modeset_unlock_all(dev); + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); - if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) - drm_kms_helper_hotplug_event(dev); + if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) + drm_kms_helper_connector_hotplug_event(connector); + } } mutex_unlock(&aconnector->hpd_lock); @@ -3243,20 +3258,26 @@ out: dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); - drm_kms_helper_hotplug_event(dev); - } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { + drm_kms_helper_connector_hotplug_event(connector); + } else { + bool ret = false; - if (aconnector->fake_enable) - aconnector->fake_enable = false; + mutex_lock(&adev->dm.dc_lock); + ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX); + mutex_unlock(&adev->dm.dc_lock); - amdgpu_dm_update_connector_after_detect(aconnector); + if (ret) { + if (aconnector->fake_enable) + aconnector->fake_enable = false; + amdgpu_dm_update_connector_after_detect(aconnector); - drm_modeset_lock_all(dev); - dm_restore_drm_connector_state(dev, connector); - drm_modeset_unlock_all(dev); + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); + } } } #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -3503,7 +3524,6 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) return 0; } -#if defined(CONFIG_DRM_AMD_DC_DCN) /* Register IRQ sources and initialize IRQ callbacks */ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) { @@ -3623,7 +3643,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) /* Use GRPH_PFLIP interrupt */ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; - i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; + i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; i++) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); if (r) { @@ -3692,7 +3712,6 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev) return 0; } -#endif /* * Acquires the lock for the atomic state object and returns @@ -3700,8 +3719,8 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev) * * This should only be called during atomic check. */ -static int dm_atomic_get_state(struct drm_atomic_state *state, - struct dm_atomic_state **dm_state) +int dm_atomic_get_state(struct drm_atomic_state *state, + struct dm_atomic_state **dm_state) { struct drm_device *dev = state->dev; struct amdgpu_device *adev = drm_to_adev(dev); @@ -3792,7 +3811,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) adev_to_drm(adev)->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.preferred_depth = 24; - adev_to_drm(adev)->mode_config.prefer_shadow = 1; + if (adev->asic_type == CHIP_HAWAII) + /* disable prefer shadow for now due to hibernation issues */ + adev_to_drm(adev)->mode_config.prefer_shadow = 0; + else + adev_to_drm(adev)->mode_config.prefer_shadow = 1; /* indicates support for immediate flip */ adev_to_drm(adev)->mode_config.async_page_flip = true; @@ -3836,9 +3859,6 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ - defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, int bl_idx) { @@ -3918,7 +3938,7 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap max - min); } -static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, +static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, int bl_idx, u32 user_brightness) { @@ -3949,7 +3969,8 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); } - return rc ? 0 : 1; + if (rc) + dm->actual_brightness[bl_idx] = user_brightness; } static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) @@ -4023,6 +4044,13 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps); dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL; + if (!acpi_video_backlight_use_native()) { + drm_info(adev_to_drm(dm->adev), "Skipping amdgpu DM backlight registration\n"); + /* Try registering an ACPI video backlight device instead. */ + acpi_video_register_backlight(); + return; + } + props.max_brightness = AMDGPU_MAX_BL_LEVEL; props.brightness = AMDGPU_MAX_BL_LEVEL; props.type = BACKLIGHT_RAW; @@ -4041,7 +4069,6 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) else DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); } -#endif static int initialize_plane(struct amdgpu_display_manager *dm, struct amdgpu_mode_info *mode_info, int plane_id, @@ -4087,9 +4114,6 @@ static int initialize_plane(struct amdgpu_display_manager *dm, static void register_backlight_device(struct amdgpu_display_manager *dm, struct dc_link *link) { -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ - defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && link->type != dc_connection_none) { /* @@ -4105,9 +4129,9 @@ static void register_backlight_device(struct amdgpu_display_manager *dm, dm->num_of_edps++; } } -#endif } +static void amdgpu_set_panel_orientation(struct drm_connector *connector); /* * In this architecture, the association @@ -4170,6 +4194,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) for (i = 0; i < dm->dc->caps.max_planes; ++i) { struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; + /* Do not create overlay if MPO disabled */ + if (amdgpu_dc_debug_mask & DC_DISABLE_MPO) + break; + if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) continue; @@ -4195,12 +4223,16 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) goto fail; } -#if defined(CONFIG_DRM_AMD_DC_DCN) /* Use Outbox interrupt */ switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(3, 0, 0): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): case IP_VERSION(2, 1, 0): if (register_outbox_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); @@ -4217,6 +4249,11 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): psr_feature_enabled = true; break; default: @@ -4224,7 +4261,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) break; } } -#endif /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { @@ -4263,17 +4299,31 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(link); amdgpu_dm_update_connector_after_detect(aconnector); + } else { + bool ret = false; - } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { - amdgpu_dm_update_connector_after_detect(aconnector); - register_backlight_device(dm, link); - if (dm->num_of_edps) - update_connector_ext_caps(aconnector); - if (psr_feature_enabled) - amdgpu_dm_set_psr_caps(link); - } + mutex_lock(&dm->dc_lock); + ret = dc_link_detect(link, DETECT_REASON_BOOT); + mutex_unlock(&dm->dc_lock); + + if (ret) { + amdgpu_dm_update_connector_after_detect(aconnector); + register_backlight_device(dm, link); + if (dm->num_of_edps) + update_connector_ext_caps(aconnector); + if (psr_feature_enabled) + amdgpu_dm_set_psr_caps(link); + + /* TODO: Fix vblank control helpers to delay PSR entry to allow this when + * PSR is also supported. + */ + if (link->psr_settings.psr_feature_enabled) + adev_to_drm(adev)->vblank_disable_immediate = false; + } + } + amdgpu_set_panel_orientation(&aconnector->base); } /* Software is initialized. Now we can register interrupt handlers. */ @@ -4311,7 +4361,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } break; default: -#if defined(CONFIG_DRM_AMD_DC_DCN) switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): @@ -4325,6 +4374,11 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 0, 1): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): if (dcn10_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -4335,7 +4389,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) adev->ip_versions[DCE_HWIP][0]); goto fail; } -#endif break; } @@ -4484,7 +4537,7 @@ static int dm_early_init(void *handle) adev->mode_info.num_dig = 6; break; default: -#if defined(CONFIG_DRM_AMD_DC_DCN) + switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(2, 0, 2): case IP_VERSION(3, 0, 0): @@ -4510,6 +4563,11 @@ static int dm_early_init(void *handle) case IP_VERSION(2, 1, 0): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; @@ -4519,7 +4577,6 @@ static int dm_early_init(void *handle) adev->ip_versions[DCE_HWIP][0]); return -EINVAL; } -#endif break; } @@ -4542,13 +4599,6 @@ static int dm_early_init(void *handle) return 0; } -static bool modeset_required(struct drm_crtc_state *crtc_state, - struct dc_stream_state *new_stream, - struct dc_stream_state *old_stream) -{ - return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); -} - static bool modereset_required(struct drm_crtc_state *crtc_state) { return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); @@ -4564,811 +4614,6 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { .destroy = amdgpu_dm_encoder_destroy, }; - -static void get_min_max_dc_plane_scaling(struct drm_device *dev, - struct drm_framebuffer *fb, - int *min_downscale, int *max_upscale) -{ - struct amdgpu_device *adev = drm_to_adev(dev); - struct dc *dc = adev->dm.dc; - /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */ - struct dc_plane_cap *plane_cap = &dc->caps.planes[0]; - - switch (fb->format->format) { - case DRM_FORMAT_P010: - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - *max_upscale = plane_cap->max_upscale_factor.nv12; - *min_downscale = plane_cap->max_downscale_factor.nv12; - break; - - case DRM_FORMAT_XRGB16161616F: - case DRM_FORMAT_ARGB16161616F: - case DRM_FORMAT_XBGR16161616F: - case DRM_FORMAT_ABGR16161616F: - *max_upscale = plane_cap->max_upscale_factor.fp16; - *min_downscale = plane_cap->max_downscale_factor.fp16; - break; - - default: - *max_upscale = plane_cap->max_upscale_factor.argb8888; - *min_downscale = plane_cap->max_downscale_factor.argb8888; - break; - } - - /* - * A factor of 1 in the plane_cap means to not allow scaling, ie. use a - * scaling factor of 1.0 == 1000 units. - */ - if (*max_upscale == 1) - *max_upscale = 1000; - - if (*min_downscale == 1) - *min_downscale = 1000; -} - - -static int fill_dc_scaling_info(struct amdgpu_device *adev, - const struct drm_plane_state *state, - struct dc_scaling_info *scaling_info) -{ - int scale_w, scale_h, min_downscale, max_upscale; - - memset(scaling_info, 0, sizeof(*scaling_info)); - - /* Source is fixed 16.16 but we ignore mantissa for now... */ - scaling_info->src_rect.x = state->src_x >> 16; - scaling_info->src_rect.y = state->src_y >> 16; - - /* - * For reasons we don't (yet) fully understand a non-zero - * src_y coordinate into an NV12 buffer can cause a - * system hang on DCN1x. - * To avoid hangs (and maybe be overly cautious) - * let's reject both non-zero src_x and src_y. - * - * We currently know of only one use-case to reproduce a - * scenario with non-zero src_x and src_y for NV12, which - * is to gesture the YouTube Android app into full screen - * on ChromeOS. - */ - if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || - (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) && - (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && - (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) - return -EINVAL; - - scaling_info->src_rect.width = state->src_w >> 16; - if (scaling_info->src_rect.width == 0) - return -EINVAL; - - scaling_info->src_rect.height = state->src_h >> 16; - if (scaling_info->src_rect.height == 0) - return -EINVAL; - - scaling_info->dst_rect.x = state->crtc_x; - scaling_info->dst_rect.y = state->crtc_y; - - if (state->crtc_w == 0) - return -EINVAL; - - scaling_info->dst_rect.width = state->crtc_w; - - if (state->crtc_h == 0) - return -EINVAL; - - scaling_info->dst_rect.height = state->crtc_h; - - /* DRM doesn't specify clipping on destination output. */ - scaling_info->clip_rect = scaling_info->dst_rect; - - /* Validate scaling per-format with DC plane caps */ - if (state->plane && state->plane->dev && state->fb) { - get_min_max_dc_plane_scaling(state->plane->dev, state->fb, - &min_downscale, &max_upscale); - } else { - min_downscale = 250; - max_upscale = 16000; - } - - scale_w = scaling_info->dst_rect.width * 1000 / - scaling_info->src_rect.width; - - if (scale_w < min_downscale || scale_w > max_upscale) - return -EINVAL; - - scale_h = scaling_info->dst_rect.height * 1000 / - scaling_info->src_rect.height; - - if (scale_h < min_downscale || scale_h > max_upscale) - return -EINVAL; - - /* - * The "scaling_quality" can be ignored for now, quality = 0 has DC - * assume reasonable defaults based on the format. - */ - - return 0; -} - -static void -fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, - uint64_t tiling_flags) -{ - /* Fill GFX8 params */ - if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { - unsigned int bankw, bankh, mtaspect, tile_split, num_banks; - - bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); - bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); - mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); - tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); - num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); - - /* XXX fix me for VI */ - tiling_info->gfx8.num_banks = num_banks; - tiling_info->gfx8.array_mode = - DC_ARRAY_2D_TILED_THIN1; - tiling_info->gfx8.tile_split = tile_split; - tiling_info->gfx8.bank_width = bankw; - tiling_info->gfx8.bank_height = bankh; - tiling_info->gfx8.tile_aspect = mtaspect; - tiling_info->gfx8.tile_mode = - DC_ADDR_SURF_MICRO_TILING_DISPLAY; - } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) - == DC_ARRAY_1D_TILED_THIN1) { - tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; - } - - tiling_info->gfx8.pipe_config = - AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); -} - -static void -fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, - union dc_tiling_info *tiling_info) -{ - tiling_info->gfx9.num_pipes = - adev->gfx.config.gb_addr_config_fields.num_pipes; - tiling_info->gfx9.num_banks = - adev->gfx.config.gb_addr_config_fields.num_banks; - tiling_info->gfx9.pipe_interleave = - adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; - tiling_info->gfx9.num_shader_engines = - adev->gfx.config.gb_addr_config_fields.num_se; - tiling_info->gfx9.max_compressed_frags = - adev->gfx.config.gb_addr_config_fields.max_compress_frags; - tiling_info->gfx9.num_rb_per_se = - adev->gfx.config.gb_addr_config_fields.num_rb_per_se; - tiling_info->gfx9.shaderEnable = 1; - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) - tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; -} - -static int -validate_dcc(struct amdgpu_device *adev, - const enum surface_pixel_format format, - const enum dc_rotation_angle rotation, - const union dc_tiling_info *tiling_info, - const struct dc_plane_dcc_param *dcc, - const struct dc_plane_address *address, - const struct plane_size *plane_size) -{ - struct dc *dc = adev->dm.dc; - struct dc_dcc_surface_param input; - struct dc_surface_dcc_cap output; - - memset(&input, 0, sizeof(input)); - memset(&output, 0, sizeof(output)); - - if (!dcc->enable) - return 0; - - if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || - !dc->cap_funcs.get_dcc_compression_cap) - return -EINVAL; - - input.format = format; - input.surface_size.width = plane_size->surface_size.width; - input.surface_size.height = plane_size->surface_size.height; - input.swizzle_mode = tiling_info->gfx9.swizzle; - - if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) - input.scan = SCAN_DIRECTION_HORIZONTAL; - else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) - input.scan = SCAN_DIRECTION_VERTICAL; - - if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) - return -EINVAL; - - if (!output.capable) - return -EINVAL; - - if (dcc->independent_64b_blks == 0 && - output.grph.rgb.independent_64b_blks != 0) - return -EINVAL; - - return 0; -} - -static bool -modifier_has_dcc(uint64_t modifier) -{ - return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); -} - -static unsigned -modifier_gfx9_swizzle_mode(uint64_t modifier) -{ - if (modifier == DRM_FORMAT_MOD_LINEAR) - return 0; - - return AMD_FMT_MOD_GET(TILE, modifier); -} - -static const struct drm_format_info * -amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd) -{ - return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); -} - -static void -fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, - union dc_tiling_info *tiling_info, - uint64_t modifier) -{ - unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); - unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); - unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier); - unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits); - - fill_gfx9_tiling_info_from_device(adev, tiling_info); - - if (!IS_AMD_FMT_MOD(modifier)) - return; - - tiling_info->gfx9.num_pipes = 1u << pipes_log2; - tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2); - - if (adev->family >= AMDGPU_FAMILY_NV) { - tiling_info->gfx9.num_pkrs = 1u << pkrs_log2; - } else { - tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits; - - /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */ - } -} - -enum dm_micro_swizzle { - MICRO_SWIZZLE_Z = 0, - MICRO_SWIZZLE_S = 1, - MICRO_SWIZZLE_D = 2, - MICRO_SWIZZLE_R = 3 -}; - -static bool dm_plane_format_mod_supported(struct drm_plane *plane, - uint32_t format, - uint64_t modifier) -{ - struct amdgpu_device *adev = drm_to_adev(plane->dev); - const struct drm_format_info *info = drm_format_info(format); - int i; - - enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; - - if (!info) - return false; - - /* - * We always have to allow these modifiers: - * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers. - * 2. Not passing any modifiers is the same as explicitly passing INVALID. - */ - if (modifier == DRM_FORMAT_MOD_LINEAR || - modifier == DRM_FORMAT_MOD_INVALID) { - return true; - } - - /* Check that the modifier is on the list of the plane's supported modifiers. */ - for (i = 0; i < plane->modifier_count; i++) { - if (modifier == plane->modifiers[i]) - break; - } - if (i == plane->modifier_count) - return false; - - /* - * For D swizzle the canonical modifier depends on the bpp, so check - * it here. - */ - if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && - adev->family >= AMDGPU_FAMILY_NV) { - if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) - return false; - } - - if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && - info->cpp[0] < 8) - return false; - - if (modifier_has_dcc(modifier)) { - /* Per radeonsi comments 16/64 bpp are more complicated. */ - if (info->cpp[0] != 4) - return false; - /* We support multi-planar formats, but not when combined with - * additional DCC metadata planes. */ - if (info->num_planes > 1) - return false; - } - - return true; -} - -static void -add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) -{ - if (!*mods) - return; - - if (*cap - *size < 1) { - uint64_t new_cap = *cap * 2; - uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); - - if (!new_mods) { - kfree(*mods); - *mods = NULL; - return; - } - - memcpy(new_mods, *mods, sizeof(uint64_t) * *size); - kfree(*mods); - *mods = new_mods; - *cap = new_cap; - } - - (*mods)[*size] = mod; - *size += 1; -} - -static void -add_gfx9_modifiers(const struct amdgpu_device *adev, - uint64_t **mods, uint64_t *size, uint64_t *capacity) -{ - int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); - int pipe_xor_bits = min(8, pipes + - ilog2(adev->gfx.config.gb_addr_config_fields.num_se)); - int bank_xor_bits = min(8 - pipe_xor_bits, - ilog2(adev->gfx.config.gb_addr_config_fields.num_banks)); - int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) + - ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se); - - - if (adev->family == AMDGPU_FAMILY_RV) { - /* Raven2 and later */ - bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81; - - /* - * No _D DCC swizzles yet because we only allow 32bpp, which - * doesn't support _D on DCN - */ - - if (has_constant_encode) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); - } - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); - - if (has_constant_encode) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(RB, rb) | - AMD_FMT_MOD_SET(PIPE, pipes)); - } - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | - AMD_FMT_MOD_SET(RB, rb) | - AMD_FMT_MOD_SET(PIPE, pipes)); - } - - /* - * Only supported for 64bpp on Raven, will be filtered on format in - * dm_plane_format_mod_supported. - */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); - - if (adev->family == AMDGPU_FAMILY_RV) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); - } - - /* - * Only supported for 64bpp on Raven, will be filtered on format in - * dm_plane_format_mod_supported. - */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); - - if (adev->family == AMDGPU_FAMILY_RV) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); - } -} - -static void -add_gfx10_1_modifiers(const struct amdgpu_device *adev, - uint64_t **mods, uint64_t *size, uint64_t *capacity) -{ - int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); - - - /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); -} - -static void -add_gfx10_3_modifiers(const struct amdgpu_device *adev, - uint64_t **mods, uint64_t *size, uint64_t *capacity) -{ - int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); - int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs)); - - /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); -} - -static int -get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) -{ - uint64_t size = 0, capacity = 128; - *mods = NULL; - - /* We have not hooked up any pre-GFX9 modifiers. */ - if (adev->family < AMDGPU_FAMILY_AI) - return 0; - - *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); - - if (plane_type == DRM_PLANE_TYPE_CURSOR) { - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); - return *mods ? 0 : -ENOMEM; - } - - switch (adev->family) { - case AMDGPU_FAMILY_AI: - case AMDGPU_FAMILY_RV: - add_gfx9_modifiers(adev, mods, &size, &capacity); - break; - case AMDGPU_FAMILY_NV: - case AMDGPU_FAMILY_VGH: - case AMDGPU_FAMILY_YC: - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) - add_gfx10_3_modifiers(adev, mods, &size, &capacity); - else - add_gfx10_1_modifiers(adev, mods, &size, &capacity); - break; - } - - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); - - /* INVALID marks the end of the list. */ - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); - - if (!*mods) - return -ENOMEM; - - return 0; -} - -static int -fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, - const struct amdgpu_framebuffer *afb, - const enum surface_pixel_format format, - const enum dc_rotation_angle rotation, - const struct plane_size *plane_size, - union dc_tiling_info *tiling_info, - struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address, - const bool force_disable_dcc) -{ - const uint64_t modifier = afb->base.modifier; - int ret = 0; - - fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); - tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier); - - if (modifier_has_dcc(modifier) && !force_disable_dcc) { - uint64_t dcc_address = afb->address + afb->base.offsets[1]; - bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); - bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); - - dcc->enable = 1; - dcc->meta_pitch = afb->base.pitches[1]; - dcc->independent_64b_blks = independent_64b_blks; - if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { - if (independent_64b_blks && independent_128b_blks) - dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; - else if (independent_128b_blks) - dcc->dcc_ind_blk = hubp_ind_block_128b; - else if (independent_64b_blks && !independent_128b_blks) - dcc->dcc_ind_blk = hubp_ind_block_64b; - else - dcc->dcc_ind_blk = hubp_ind_block_unconstrained; - } else { - if (independent_64b_blks) - dcc->dcc_ind_blk = hubp_ind_block_64b; - else - dcc->dcc_ind_blk = hubp_ind_block_unconstrained; - } - - address->grph.meta_addr.low_part = lower_32_bits(dcc_address); - address->grph.meta_addr.high_part = upper_32_bits(dcc_address); - } - - ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); - if (ret) - drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret); - - return ret; -} - -static int -fill_plane_buffer_attributes(struct amdgpu_device *adev, - const struct amdgpu_framebuffer *afb, - const enum surface_pixel_format format, - const enum dc_rotation_angle rotation, - const uint64_t tiling_flags, - union dc_tiling_info *tiling_info, - struct plane_size *plane_size, - struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address, - bool tmz_surface, - bool force_disable_dcc) -{ - const struct drm_framebuffer *fb = &afb->base; - int ret; - - memset(tiling_info, 0, sizeof(*tiling_info)); - memset(plane_size, 0, sizeof(*plane_size)); - memset(dcc, 0, sizeof(*dcc)); - memset(address, 0, sizeof(*address)); - - address->tmz_surface = tmz_surface; - - if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { - uint64_t addr = afb->address + fb->offsets[0]; - - plane_size->surface_size.x = 0; - plane_size->surface_size.y = 0; - plane_size->surface_size.width = fb->width; - plane_size->surface_size.height = fb->height; - plane_size->surface_pitch = - fb->pitches[0] / fb->format->cpp[0]; - - address->type = PLN_ADDR_TYPE_GRAPHICS; - address->grph.addr.low_part = lower_32_bits(addr); - address->grph.addr.high_part = upper_32_bits(addr); - } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { - uint64_t luma_addr = afb->address + fb->offsets[0]; - uint64_t chroma_addr = afb->address + fb->offsets[1]; - - plane_size->surface_size.x = 0; - plane_size->surface_size.y = 0; - plane_size->surface_size.width = fb->width; - plane_size->surface_size.height = fb->height; - plane_size->surface_pitch = - fb->pitches[0] / fb->format->cpp[0]; - - plane_size->chroma_size.x = 0; - plane_size->chroma_size.y = 0; - /* TODO: set these based on surface format */ - plane_size->chroma_size.width = fb->width / 2; - plane_size->chroma_size.height = fb->height / 2; - - plane_size->chroma_pitch = - fb->pitches[1] / fb->format->cpp[1]; - - address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; - address->video_progressive.luma_addr.low_part = - lower_32_bits(luma_addr); - address->video_progressive.luma_addr.high_part = - upper_32_bits(luma_addr); - address->video_progressive.chroma_addr.low_part = - lower_32_bits(chroma_addr); - address->video_progressive.chroma_addr.high_part = - upper_32_bits(chroma_addr); - } - - if (adev->family >= AMDGPU_FAMILY_AI) { - ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, - rotation, plane_size, - tiling_info, dcc, - address, - force_disable_dcc); - if (ret) - return ret; - } else { - fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); - } - - return 0; -} - -static void -fill_blending_from_plane_state(const struct drm_plane_state *plane_state, - bool *per_pixel_alpha, bool *global_alpha, - int *global_alpha_value) -{ - *per_pixel_alpha = false; - *global_alpha = false; - *global_alpha_value = 0xff; - - if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY) - return; - - if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { - static const uint32_t alpha_formats[] = { - DRM_FORMAT_ARGB8888, - DRM_FORMAT_RGBA8888, - DRM_FORMAT_ABGR8888, - }; - uint32_t format = plane_state->fb->format->format; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { - if (format == alpha_formats[i]) { - *per_pixel_alpha = true; - break; - } - } - } - - if (plane_state->alpha < 0xffff) { - *global_alpha = true; - *global_alpha_value = plane_state->alpha >> 8; - } -} - static int fill_plane_color_attributes(const struct drm_plane_state *plane_state, const enum surface_pixel_format format, @@ -5503,10 +4748,11 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, break; } + plane_info->visible = true; plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; - plane_info->layer_index = 0; + plane_info->layer_index = plane_state->normalized_zpos; ret = fill_plane_color_attributes(plane_state, plane_info->format, &plane_info->color_space); @@ -5517,13 +4763,13 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, plane_info->rotation, tiling_flags, &plane_info->tiling_info, &plane_info->plane_size, - &plane_info->dcc, address, tmz_surface, - force_disable_dcc); + &plane_info->dcc, address, + tmz_surface, force_disable_dcc); if (ret) return ret; fill_blending_from_plane_state( - plane_state, &plane_info->per_pixel_alpha, + plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha, &plane_info->global_alpha, &plane_info->global_alpha_value); return 0; @@ -5570,10 +4816,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, dc_plane_state->tiling_info = plane_info.tiling_info; dc_plane_state->visible = plane_info.visible; dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; + dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha; dc_plane_state->global_alpha = plane_info.global_alpha; dc_plane_state->global_alpha_value = plane_info.global_alpha_value; dc_plane_state->dcc = plane_info.dcc; - dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0 + dc_plane_state->layer_index = plane_info.layer_index; dc_plane_state->flip_int_enabled = true; /* @@ -5587,6 +4834,117 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, return 0; } +/** + * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates + * + * @plane: DRM plane containing dirty regions that need to be flushed to the eDP + * remote fb + * @old_plane_state: Old state of @plane + * @new_plane_state: New state of @plane + * @crtc_state: New state of CRTC connected to the @plane + * @flip_addrs: DC flip tracking struct, which also tracts dirty rects + * + * For PSR SU, DC informs the DMUB uController of dirty rectangle regions + * (referred to as "damage clips" in DRM nomenclature) that require updating on + * the eDP remote buffer. The responsibility of specifying the dirty regions is + * amdgpu_dm's. + * + * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the + * plane with regions that require flushing to the eDP remote buffer. In + * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - + * implicitly provide damage clips without any client support via the plane + * bounds. + * + * Today, amdgpu_dm only supports the MPO and cursor usecase. + * + * TODO: Also enable for FB_DAMAGE_CLIPS + */ +static void fill_dc_dirty_rects(struct drm_plane *plane, + struct drm_plane_state *old_plane_state, + struct drm_plane_state *new_plane_state, + struct drm_crtc_state *crtc_state, + struct dc_flip_addrs *flip_addrs) +{ + struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); + struct rect *dirty_rects = flip_addrs->dirty_rects; + uint32_t num_clips; + bool bb_changed; + bool fb_changed; + uint32_t i = 0; + + flip_addrs->dirty_rect_count = 0; + + /* + * Cursor plane has it's own dirty rect update interface. See + * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data + */ + if (plane->type == DRM_PLANE_TYPE_CURSOR) + return; + + /* + * Today, we only consider MPO use-case for PSR SU. If MPO not + * requested, and there is a plane update, do FFU. + */ + if (!dm_crtc_state->mpo_requested) { + dirty_rects[0].x = 0; + dirty_rects[0].y = 0; + dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay; + dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay; + flip_addrs->dirty_rect_count = 1; + DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", + new_plane_state->plane->base.id, + dm_crtc_state->base.mode.crtc_hdisplay, + dm_crtc_state->base.mode.crtc_vdisplay); + return; + } + + /* + * MPO is requested. Add entire plane bounding box to dirty rects if + * flipped to or damaged. + * + * If plane is moved or resized, also add old bounding box to dirty + * rects. + */ + num_clips = drm_plane_get_damage_clips_count(new_plane_state); + fb_changed = old_plane_state->fb->base.id != + new_plane_state->fb->base.id; + bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || + old_plane_state->crtc_y != new_plane_state->crtc_y || + old_plane_state->crtc_w != new_plane_state->crtc_w || + old_plane_state->crtc_h != new_plane_state->crtc_h); + + DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", + new_plane_state->plane->base.id, + bb_changed, fb_changed, num_clips); + + if (num_clips || fb_changed || bb_changed) { + dirty_rects[i].x = new_plane_state->crtc_x; + dirty_rects[i].y = new_plane_state->crtc_y; + dirty_rects[i].width = new_plane_state->crtc_w; + dirty_rects[i].height = new_plane_state->crtc_h; + DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n", + new_plane_state->plane->base.id, + dirty_rects[i].x, dirty_rects[i].y, + dirty_rects[i].width, dirty_rects[i].height); + i += 1; + } + + /* Add old plane bounding-box if plane is moved or resized */ + if (bb_changed) { + dirty_rects[i].x = old_plane_state->crtc_x; + dirty_rects[i].y = old_plane_state->crtc_y; + dirty_rects[i].width = old_plane_state->crtc_w; + dirty_rects[i].height = old_plane_state->crtc_h; + DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n", + old_plane_state->plane->base.id, + dirty_rects[i].x, dirty_rects[i].y, + dirty_rects[i].width, dirty_rects[i].height); + i += 1; + } + + flip_addrs->dirty_rect_count = i; +} + static void update_stream_scaling_settings(const struct drm_display_mode *mode, const struct dm_connector_state *dm_state, struct dc_stream_state *stream) @@ -5818,7 +5176,7 @@ static void fill_stream_properties_from_drm_display_mode( else if (drm_mode_is_420_also(info, mode_in) && aconnector->force_yuv420_output) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; - else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) + else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; else @@ -5832,7 +5190,7 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->scan_type = SCANNING_TYPE_NODATA; timing_out->hdmi_vic = 0; - if(old_stream) { + if (old_stream) { timing_out->vic = old_stream->timing.vic; timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; @@ -6054,69 +5412,6 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) } } -#if defined(CONFIG_DRM_AMD_DC_DCN) -static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, - struct dc_sink *sink, struct dc_stream_state *stream, - struct dsc_dec_dpcd_caps *dsc_caps) -{ - stream->timing.flags.DSC = 0; - - if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { - dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, - aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, - aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, - dsc_caps); - } -} - -static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, - struct dc_sink *sink, struct dc_stream_state *stream, - struct dsc_dec_dpcd_caps *dsc_caps) -{ - struct drm_connector *drm_connector = &aconnector->base; - uint32_t link_bandwidth_kbps; - uint32_t max_dsc_target_bpp_limit_override = 0; - - link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, - dc_link_get_link_cap(aconnector->dc_link)); - - if (stream->link && stream->link->local_sink) - max_dsc_target_bpp_limit_override = - stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit; - - /* Set DSC policy according to dsc_clock_en */ - dc_dsc_policy_set_enable_dsc_when_not_needed( - aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); - - if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { - - if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], - dsc_caps, - aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, - max_dsc_target_bpp_limit_override, - link_bandwidth_kbps, - &stream->timing, - &stream->timing.dsc_cfg)) { - stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); - } - } - - /* Overwrite the stream flag if DSC is enabled through debugfs */ - if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) - stream->timing.flags.DSC = 1; - - if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) - stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; - - if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) - stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; - - if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) - stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; -} -#endif /* CONFIG_DRM_AMD_DC_DCN */ - /** * DOC: FreeSync Video * @@ -6133,7 +5428,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, * - Cinema HFR (48 FPS) * - TV/PAL (50 FPS) * - Commonly used (60 FPS) - * - Multiples of 24 (48,72,96,120 FPS) + * - Multiples of 24 (48,72,96 FPS) * * The list of standards video format is not huge and can be added to the * connector modeset list beforehand. With that, userspace can leverage @@ -6149,13 +5444,13 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, */ static struct drm_display_mode * get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, - bool use_probed_modes) + bool use_probed_modes) { struct drm_display_mode *m, *m_pref = NULL; u16 current_refresh, highest_refresh; struct list_head *list_head = use_probed_modes ? - &aconnector->base.probed_modes : - &aconnector->base.modes; + &aconnector->base.probed_modes : + &aconnector->base.modes; if (aconnector->freesync_vid_base.clock != 0) return &aconnector->freesync_vid_base; @@ -6171,7 +5466,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, if (!m_pref) { /* Probably an EDID with no preferred mode. Fallback to first entry */ m_pref = list_first_entry_or_null( - &aconnector->base.modes, struct drm_display_mode, head); + &aconnector->base.modes, struct drm_display_mode, head); if (!m_pref) { DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); return NULL; @@ -6196,12 +5491,12 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, } } - aconnector->freesync_vid_base = *m_pref; + drm_mode_copy(&aconnector->freesync_vid_base, m_pref); return m_pref; } static bool is_freesync_video_mode(const struct drm_display_mode *mode, - struct amdgpu_dm_connector *aconnector) + struct amdgpu_dm_connector *aconnector) { struct drm_display_mode *high_mode; int timing_diff; @@ -6227,6 +5522,161 @@ static bool is_freesync_video_mode(const struct drm_display_mode *mode, return true; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, + struct dc_sink *sink, struct dc_stream_state *stream, + struct dsc_dec_dpcd_caps *dsc_caps) +{ + stream->timing.flags.DSC = 0; + dsc_caps->is_dsc_supported = false; + + if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || + sink->sink_signal == SIGNAL_TYPE_EDP)) { + if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || + sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) + dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, + aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, + aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, + dsc_caps); + } +} + + +static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, + struct dc_sink *sink, struct dc_stream_state *stream, + struct dsc_dec_dpcd_caps *dsc_caps, + uint32_t max_dsc_target_bpp_limit_override) +{ + const struct dc_link_settings *verified_link_cap = NULL; + uint32_t link_bw_in_kbps; + uint32_t edp_min_bpp_x16, edp_max_bpp_x16; + struct dc *dc = sink->ctx->dc; + struct dc_dsc_bw_range bw_range = {0}; + struct dc_dsc_config dsc_cfg = {0}; + + verified_link_cap = dc_link_get_link_cap(stream->link); + link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); + edp_min_bpp_x16 = 8 * 16; + edp_max_bpp_x16 = 8 * 16; + + if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) + edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; + + if (edp_max_bpp_x16 < edp_min_bpp_x16) + edp_min_bpp_x16 = edp_max_bpp_x16; + + if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], + dc->debug.dsc_min_slice_height_override, + edp_min_bpp_x16, edp_max_bpp_x16, + dsc_caps, + &stream->timing, + &bw_range)) { + + if (bw_range.max_kbps < link_bw_in_kbps) { + if (dc_dsc_compute_config(dc->res_pool->dscs[0], + dsc_caps, + dc->debug.dsc_min_slice_height_override, + max_dsc_target_bpp_limit_override, + 0, + &stream->timing, + &dsc_cfg)) { + stream->timing.dsc_cfg = dsc_cfg; + stream->timing.flags.DSC = 1; + stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; + } + return; + } + } + + if (dc_dsc_compute_config(dc->res_pool->dscs[0], + dsc_caps, + dc->debug.dsc_min_slice_height_override, + max_dsc_target_bpp_limit_override, + link_bw_in_kbps, + &stream->timing, + &dsc_cfg)) { + stream->timing.dsc_cfg = dsc_cfg; + stream->timing.flags.DSC = 1; + } +} + + +static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, + struct dc_sink *sink, struct dc_stream_state *stream, + struct dsc_dec_dpcd_caps *dsc_caps) +{ + struct drm_connector *drm_connector = &aconnector->base; + uint32_t link_bandwidth_kbps; + uint32_t max_dsc_target_bpp_limit_override = 0; + struct dc *dc = sink->ctx->dc; + uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps; + uint32_t dsc_max_supported_bw_in_kbps; + + link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, + dc_link_get_link_cap(aconnector->dc_link)); + if (stream->link && stream->link->local_sink) + max_dsc_target_bpp_limit_override = + stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit; + + /* Set DSC policy according to dsc_clock_en */ + dc_dsc_policy_set_enable_dsc_when_not_needed( + aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); + + if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && + !aconnector->dc_link->panel_config.dsc.disable_dsc_edp && + dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { + + apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); + + } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { + if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], + dsc_caps, + aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, + max_dsc_target_bpp_limit_override, + link_bandwidth_kbps, + &stream->timing, + &stream->timing.dsc_cfg)) { + stream->timing.flags.DSC = 1; + DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); + } + } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { + timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); + max_supported_bw_in_kbps = link_bandwidth_kbps; + dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; + + if (timing_bw_in_kbps > max_supported_bw_in_kbps && + max_supported_bw_in_kbps > 0 && + dsc_max_supported_bw_in_kbps > 0) + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], + dsc_caps, + aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, + max_dsc_target_bpp_limit_override, + dsc_max_supported_bw_in_kbps, + &stream->timing, + &stream->timing.dsc_cfg)) { + stream->timing.flags.DSC = 1; + DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", + __func__, drm_connector->name); + } + } + } + + /* Overwrite the stream flag if DSC is enabled through debugfs */ + if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) + stream->timing.flags.DSC = 1; + + if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) + stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; + + if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) + stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; + + if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) + stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; +} +#endif /* CONFIG_DRM_AMD_DC_DCN */ + static struct dc_stream_state * create_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_display_mode *drm_mode, @@ -6250,6 +5700,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, #if defined(CONFIG_DRM_AMD_DC_DCN) struct dsc_dec_dpcd_caps dsc_caps; #endif + struct dc_sink *sink = NULL; memset(&saved_mode, 0, sizeof(saved_mode)); @@ -6306,15 +5757,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, */ DRM_DEBUG_DRIVER("No preferred mode found\n"); } else { - recalculate_timing = amdgpu_freesync_vid_mode && - is_freesync_video_mode(&mode, aconnector); + recalculate_timing = is_freesync_video_mode(&mode, aconnector); if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); - saved_mode = mode; - mode = *freesync_mode; + drm_mode_copy(&saved_mode, &mode); + drm_mode_copy(&mode, freesync_mode); } else { decide_crtc_timing_for_drm_display_mode( - &mode, preferred_mode, scale); + &mode, preferred_mode, scale); preferred_refresh = drm_mode_vrefresh(preferred_mode); } @@ -6325,7 +5775,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, else if (!dm_state) drm_mode_set_crtcinfo(&mode, 0); - /* + /* * If scaling is enabled and refresh rate didn't change * we copy the vic and polarities of the old timings */ @@ -6370,7 +5820,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) stream->use_vsc_sdp_for_colorimetry = true; } - mod_build_vsc_infopacket(stream, &stream->vsc_infopacket); + mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space); aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; } @@ -6380,186 +5830,6 @@ finish: return stream; } -static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) -{ - drm_crtc_cleanup(crtc); - kfree(crtc); -} - -static void dm_crtc_destroy_state(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ - struct dm_crtc_state *cur = to_dm_crtc_state(state); - - /* TODO Destroy dc_stream objects are stream object is flattened */ - if (cur->stream) - dc_stream_release(cur->stream); - - - __drm_atomic_helper_crtc_destroy_state(state); - - - kfree(state); -} - -static void dm_crtc_reset_state(struct drm_crtc *crtc) -{ - struct dm_crtc_state *state; - - if (crtc->state) - dm_crtc_destroy_state(crtc, crtc->state); - - state = kzalloc(sizeof(*state), GFP_KERNEL); - if (WARN_ON(!state)) - return; - - __drm_atomic_helper_crtc_reset(crtc, &state->base); -} - -static struct drm_crtc_state * -dm_crtc_duplicate_state(struct drm_crtc *crtc) -{ - struct dm_crtc_state *state, *cur; - - cur = to_dm_crtc_state(crtc->state); - - if (WARN_ON(!crtc->state)) - return NULL; - - state = kzalloc(sizeof(*state), GFP_KERNEL); - if (!state) - return NULL; - - __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); - - if (cur->stream) { - state->stream = cur->stream; - dc_stream_retain(state->stream); - } - - state->active_planes = cur->active_planes; - state->vrr_infopacket = cur->vrr_infopacket; - state->abm_level = cur->abm_level; - state->vrr_supported = cur->vrr_supported; - state->freesync_config = cur->freesync_config; - state->cm_has_degamma = cur->cm_has_degamma; - state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; - state->force_dpms_off = cur->force_dpms_off; - /* TODO Duplicate dc_stream after objects are stream object is flattened */ - - return &state->base; -} - -#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY -static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) -{ - crtc_debugfs_init(crtc); - - return 0; -} -#endif - -static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) -{ - enum dc_irq_source irq_source; - struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = drm_to_adev(crtc->dev); - int rc; - - irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; - - rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; - - DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", - acrtc->crtc_id, enable ? "en" : "dis", rc); - return rc; -} - -static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) -{ - enum dc_irq_source irq_source; - struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = drm_to_adev(crtc->dev); - struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); -#if defined(CONFIG_DRM_AMD_DC_DCN) - struct amdgpu_display_manager *dm = &adev->dm; - struct vblank_control_work *work; -#endif - int rc = 0; - - if (enable) { - /* vblank irq on -> Only need vupdate irq in vrr mode */ - if (amdgpu_dm_vrr_active(acrtc_state)) - rc = dm_set_vupdate_irq(crtc, true); - } else { - /* vblank irq off -> vupdate irq off */ - rc = dm_set_vupdate_irq(crtc, false); - } - - if (rc) - return rc; - - irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; - - if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) - return -EBUSY; - - if (amdgpu_in_reset(adev)) - return 0; - -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dm->vblank_control_workqueue) { - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) - return -ENOMEM; - - INIT_WORK(&work->work, vblank_control_worker); - work->dm = dm; - work->acrtc = acrtc; - work->enable = enable; - - if (acrtc_state->stream) { - dc_stream_retain(acrtc_state->stream); - work->stream = acrtc_state->stream; - } - - queue_work(dm->vblank_control_workqueue, &work->work); - } -#endif - - return 0; -} - -static int dm_enable_vblank(struct drm_crtc *crtc) -{ - return dm_set_vblank(crtc, true); -} - -static void dm_disable_vblank(struct drm_crtc *crtc) -{ - dm_set_vblank(crtc, false); -} - -/* Implemented only the options currently availible for the driver */ -static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { - .reset = dm_crtc_reset_state, - .destroy = amdgpu_dm_crtc_destroy, - .set_config = drm_atomic_helper_set_config, - .page_flip = drm_atomic_helper_page_flip, - .atomic_duplicate_state = dm_crtc_duplicate_state, - .atomic_destroy_state = dm_crtc_destroy_state, - .set_crc_source = amdgpu_dm_crtc_set_crc_source, - .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, - .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, - .get_vblank_counter = amdgpu_get_vblank_counter_kms, - .enable_vblank = dm_enable_vblank, - .disable_vblank = dm_disable_vblank, - .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - .late_register = amdgpu_dm_crtc_late_register, -#endif -}; - static enum drm_connector_status amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) { @@ -6577,7 +5847,8 @@ amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) !aconnector->fake_enable) connected = (aconnector->dc_sink != NULL); else - connected = (aconnector->base.force == DRM_FORCE_ON); + connected = (aconnector->base.force == DRM_FORCE_ON || + aconnector->base.force == DRM_FORCE_ON_DIGITAL); update_subconnector_property(aconnector); @@ -6701,7 +5972,7 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) int i; /* - * Call only if mst_mgr was iniitalized before since it's not done + * Call only if mst_mgr was initialized before since it's not done * for all connector types. */ if (aconnector->mst_mgr.dev) @@ -6756,6 +6027,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) state->base.max_requested_bpc = 8; state->vcpi_slots = 0; state->pbn = 0; + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) state->abm_level = amdgpu_dm_abm_level; @@ -6881,7 +6153,7 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) create_eml_sink(aconnector); } -static struct dc_stream_state * +struct dc_stream_state * create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_display_mode *drm_mode, const struct dm_connector_state *dm_state, @@ -6904,6 +6176,8 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, } dc_result = dc_validate_stream(adev->dm.dc, stream); + if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); if (dc_result != DC_OK) { DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", @@ -7041,10 +6315,17 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, drm_atomic_get_old_connector_state(state, conn); struct drm_crtc *crtc = new_con_state->crtc; struct drm_crtc_state *new_crtc_state; + struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); int ret; trace_amdgpu_dm_connector_atomic_check(new_con_state); + if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { + ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr); + if (ret < 0) + return ret; + } + if (!crtc) return 0; @@ -7091,136 +6372,29 @@ amdgpu_dm_connector_helper_funcs = { .atomic_check = amdgpu_dm_connector_atomic_check, }; -static void dm_crtc_helper_disable(struct drm_crtc *crtc) -{ -} - -static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) -{ - struct drm_atomic_state *state = new_crtc_state->state; - struct drm_plane *plane; - int num_active = 0; - - drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) { - struct drm_plane_state *new_plane_state; - - /* Cursor planes are "fake". */ - if (plane->type == DRM_PLANE_TYPE_CURSOR) - continue; - - new_plane_state = drm_atomic_get_new_plane_state(state, plane); - - if (!new_plane_state) { - /* - * The plane is enable on the CRTC and hasn't changed - * state. This means that it previously passed - * validation and is therefore enabled. - */ - num_active += 1; - continue; - } - - /* We need a framebuffer to be considered enabled. */ - num_active += (new_plane_state->fb != NULL); - } - - return num_active; -} - -static void dm_update_crtc_active_planes(struct drm_crtc *crtc, - struct drm_crtc_state *new_crtc_state) -{ - struct dm_crtc_state *dm_new_crtc_state = - to_dm_crtc_state(new_crtc_state); - - dm_new_crtc_state->active_planes = 0; - - if (!dm_new_crtc_state->stream) - return; - - dm_new_crtc_state->active_planes = - count_crtc_active_planes(new_crtc_state); -} - -static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ - struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, - crtc); - struct amdgpu_device *adev = drm_to_adev(crtc->dev); - struct dc *dc = adev->dm.dc; - struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); - int ret = -EINVAL; - - trace_amdgpu_dm_crtc_atomic_check(crtc_state); - - dm_update_crtc_active_planes(crtc, crtc_state); - - if (WARN_ON(unlikely(!dm_crtc_state->stream && - modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) { - return ret; - } - - /* - * We require the primary plane to be enabled whenever the CRTC is, otherwise - * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other - * planes are disabled, which is not supported by the hardware. And there is legacy - * userspace which stops using the HW cursor altogether in response to the resulting EINVAL. - */ - if (crtc_state->enable && - !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) { - DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n"); - return -EINVAL; - } - - /* In some use cases, like reset, no stream is attached */ - if (!dm_crtc_state->stream) - return 0; - - if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) - return 0; - - DRM_DEBUG_ATOMIC("Failed DC stream validation\n"); - return ret; -} - -static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - -static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { - .disable = dm_crtc_helper_disable, - .atomic_check = dm_crtc_helper_atomic_check, - .mode_fixup = dm_crtc_helper_mode_fixup, - .get_scanout_position = amdgpu_crtc_get_scanout_position, -}; - static void dm_encoder_helper_disable(struct drm_encoder *encoder) { } -static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth) +int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth) { switch (display_color_depth) { - case COLOR_DEPTH_666: - return 6; - case COLOR_DEPTH_888: - return 8; - case COLOR_DEPTH_101010: - return 10; - case COLOR_DEPTH_121212: - return 12; - case COLOR_DEPTH_141414: - return 14; - case COLOR_DEPTH_161616: - return 16; - default: - break; - } + case COLOR_DEPTH_666: + return 6; + case COLOR_DEPTH_888: + return 8; + case COLOR_DEPTH_101010: + return 10; + case COLOR_DEPTH_121212: + return 12; + case COLOR_DEPTH_141414: + return 14; + case COLOR_DEPTH_161616: + return 16; + default: + break; + } return 0; } @@ -7235,6 +6409,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; struct drm_dp_mst_topology_mgr *mst_mgr; struct drm_dp_mst_port *mst_port; + struct drm_dp_mst_topology_state *mst_state; enum dc_color_depth color_depth; int clock, bpp = 0; bool is_y420 = false; @@ -7248,10 +6423,17 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, if (!crtc_state->connectors_changed && !crtc_state->mode_changed) return 0; + mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr); + if (IS_ERR(mst_state)) + return PTR_ERR(mst_state); + + if (!mst_state->pbn_div) + mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link); + if (!state->duplicated) { int max_bpc = conn_state->max_requested_bpc; is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && - aconnector->force_yuv420_output; + aconnector->force_yuv420_output; color_depth = convert_color_depth_from_display_info(connector, is_y420, max_bpc); @@ -7259,11 +6441,10 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, clock = adjusted_mode->clock; dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); } - dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state, - mst_mgr, - mst_port, - dm_new_connector_state->pbn, - dm_mst_get_pbn_divider(aconnector->dc_link)); + + dm_new_connector_state->vcpi_slots = + drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port, + dm_new_connector_state->pbn); if (dm_new_connector_state->vcpi_slots < 0) { DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); return dm_new_connector_state->vcpi_slots; @@ -7306,7 +6487,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, if (!stream) continue; - if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector) + if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector) break; stream = NULL; @@ -7333,18 +6514,12 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, dm_conn_state->pbn = pbn; dm_conn_state->vcpi_slots = slot_num; - drm_dp_mst_atomic_enable_dsc(state, - aconnector->port, - dm_conn_state->pbn, - 0, + drm_dp_mst_atomic_enable_dsc(state, aconnector->port, dm_conn_state->pbn, false); continue; } - vcpi = drm_dp_mst_atomic_enable_dsc(state, - aconnector->port, - pbn, pbn_div, - true); + vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, true); if (vcpi < 0) return vcpi; @@ -7355,522 +6530,6 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, } #endif -static void dm_drm_plane_reset(struct drm_plane *plane) -{ - struct dm_plane_state *amdgpu_state = NULL; - - if (plane->state) - plane->funcs->atomic_destroy_state(plane, plane->state); - - amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); - WARN_ON(amdgpu_state == NULL); - - if (amdgpu_state) - __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); -} - -static struct drm_plane_state * -dm_drm_plane_duplicate_state(struct drm_plane *plane) -{ - struct dm_plane_state *dm_plane_state, *old_dm_plane_state; - - old_dm_plane_state = to_dm_plane_state(plane->state); - dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); - if (!dm_plane_state) - return NULL; - - __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); - - if (old_dm_plane_state->dc_state) { - dm_plane_state->dc_state = old_dm_plane_state->dc_state; - dc_plane_state_retain(dm_plane_state->dc_state); - } - - return &dm_plane_state->base; -} - -static void dm_drm_plane_destroy_state(struct drm_plane *plane, - struct drm_plane_state *state) -{ - struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); - - if (dm_plane_state->dc_state) - dc_plane_state_release(dm_plane_state->dc_state); - - drm_atomic_helper_plane_destroy_state(plane, state); -} - -static const struct drm_plane_funcs dm_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = drm_primary_helper_destroy, - .reset = dm_drm_plane_reset, - .atomic_duplicate_state = dm_drm_plane_duplicate_state, - .atomic_destroy_state = dm_drm_plane_destroy_state, - .format_mod_supported = dm_plane_format_mod_supported, -}; - -static int dm_plane_helper_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) -{ - struct amdgpu_framebuffer *afb; - struct drm_gem_object *obj; - struct amdgpu_device *adev; - struct amdgpu_bo *rbo; - struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; - struct list_head list; - struct ttm_validate_buffer tv; - struct ww_acquire_ctx ticket; - uint32_t domain; - int r; - - if (!new_state->fb) { - DRM_DEBUG_KMS("No FB bound\n"); - return 0; - } - - afb = to_amdgpu_framebuffer(new_state->fb); - obj = new_state->fb->obj[0]; - rbo = gem_to_amdgpu_bo(obj); - adev = amdgpu_ttm_adev(rbo->tbo.bdev); - INIT_LIST_HEAD(&list); - - tv.bo = &rbo->tbo; - tv.num_shared = 1; - list_add(&tv.head, &list); - - r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); - if (r) { - dev_err(adev->dev, "fail to reserve bo (%d)\n", r); - return r; - } - - if (plane->type != DRM_PLANE_TYPE_CURSOR) - domain = amdgpu_display_supported_domains(adev, rbo->flags); - else - domain = AMDGPU_GEM_DOMAIN_VRAM; - - r = amdgpu_bo_pin(rbo, domain); - if (unlikely(r != 0)) { - if (r != -ERESTARTSYS) - DRM_ERROR("Failed to pin framebuffer with error %d\n", r); - ttm_eu_backoff_reservation(&ticket, &list); - return r; - } - - r = amdgpu_ttm_alloc_gart(&rbo->tbo); - if (unlikely(r != 0)) { - amdgpu_bo_unpin(rbo); - ttm_eu_backoff_reservation(&ticket, &list); - DRM_ERROR("%p bind failed\n", rbo); - return r; - } - - ttm_eu_backoff_reservation(&ticket, &list); - - afb->address = amdgpu_bo_gpu_offset(rbo); - - amdgpu_bo_ref(rbo); - - /** - * We don't do surface updates on planes that have been newly created, - * but we also don't have the afb->address during atomic check. - * - * Fill in buffer attributes depending on the address here, but only on - * newly created planes since they're not being used by DC yet and this - * won't modify global state. - */ - dm_plane_state_old = to_dm_plane_state(plane->state); - dm_plane_state_new = to_dm_plane_state(new_state); - - if (dm_plane_state_new->dc_state && - dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { - struct dc_plane_state *plane_state = - dm_plane_state_new->dc_state; - bool force_disable_dcc = !plane_state->dcc.enable; - - fill_plane_buffer_attributes( - adev, afb, plane_state->format, plane_state->rotation, - afb->tiling_flags, - &plane_state->tiling_info, &plane_state->plane_size, - &plane_state->dcc, &plane_state->address, - afb->tmz_surface, force_disable_dcc); - } - - return 0; -} - -static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct amdgpu_bo *rbo; - int r; - - if (!old_state->fb) - return; - - rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); - r = amdgpu_bo_reserve(rbo, false); - if (unlikely(r)) { - DRM_ERROR("failed to reserve rbo before unpin\n"); - return; - } - - amdgpu_bo_unpin(rbo); - amdgpu_bo_unreserve(rbo); - amdgpu_bo_unref(&rbo); -} - -static int dm_plane_helper_check_state(struct drm_plane_state *state, - struct drm_crtc_state *new_crtc_state) -{ - struct drm_framebuffer *fb = state->fb; - int min_downscale, max_upscale; - int min_scale = 0; - int max_scale = INT_MAX; - - /* Plane enabled? Validate viewport and get scaling factors from plane caps. */ - if (fb && state->crtc) { - /* Validate viewport to cover the case when only the position changes */ - if (state->plane->type != DRM_PLANE_TYPE_CURSOR) { - int viewport_width = state->crtc_w; - int viewport_height = state->crtc_h; - - if (state->crtc_x < 0) - viewport_width += state->crtc_x; - else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay) - viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x; - - if (state->crtc_y < 0) - viewport_height += state->crtc_y; - else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay) - viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y; - - if (viewport_width < 0 || viewport_height < 0) { - DRM_DEBUG_ATOMIC("Plane completely outside of screen\n"); - return -EINVAL; - } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */ - DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2); - return -EINVAL; - } else if (viewport_height < MIN_VIEWPORT_SIZE) { - DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE); - return -EINVAL; - } - - } - - /* Get min/max allowed scaling factors from plane caps. */ - get_min_max_dc_plane_scaling(state->crtc->dev, fb, - &min_downscale, &max_upscale); - /* - * Convert to drm convention: 16.16 fixed point, instead of dc's - * 1.0 == 1000. Also drm scaling is src/dst instead of dc's - * dst/src, so min_scale = 1.0 / max_upscale, etc. - */ - min_scale = (1000 << 16) / max_upscale; - max_scale = (1000 << 16) / min_downscale; - } - - return drm_atomic_helper_check_plane_state( - state, new_crtc_state, min_scale, max_scale, true, true); -} - -static int dm_plane_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, - plane); - struct amdgpu_device *adev = drm_to_adev(plane->dev); - struct dc *dc = adev->dm.dc; - struct dm_plane_state *dm_plane_state; - struct dc_scaling_info scaling_info; - struct drm_crtc_state *new_crtc_state; - int ret; - - trace_amdgpu_dm_plane_atomic_check(new_plane_state); - - dm_plane_state = to_dm_plane_state(new_plane_state); - - if (!dm_plane_state->dc_state) - return 0; - - new_crtc_state = - drm_atomic_get_new_crtc_state(state, - new_plane_state->crtc); - if (!new_crtc_state) - return -EINVAL; - - ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state); - if (ret) - return ret; - - ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info); - if (ret) - return ret; - - if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) - return 0; - - return -EINVAL; -} - -static int dm_plane_atomic_async_check(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - /* Only support async updates on cursor planes. */ - if (plane->type != DRM_PLANE_TYPE_CURSOR) - return -EINVAL; - - return 0; -} - -static void dm_plane_atomic_async_update(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, - plane); - struct drm_plane_state *old_state = - drm_atomic_get_old_plane_state(state, plane); - - trace_amdgpu_dm_atomic_update_cursor(new_state); - - swap(plane->state->fb, new_state->fb); - - plane->state->src_x = new_state->src_x; - plane->state->src_y = new_state->src_y; - plane->state->src_w = new_state->src_w; - plane->state->src_h = new_state->src_h; - plane->state->crtc_x = new_state->crtc_x; - plane->state->crtc_y = new_state->crtc_y; - plane->state->crtc_w = new_state->crtc_w; - plane->state->crtc_h = new_state->crtc_h; - - handle_cursor_update(plane, old_state); -} - -static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { - .prepare_fb = dm_plane_helper_prepare_fb, - .cleanup_fb = dm_plane_helper_cleanup_fb, - .atomic_check = dm_plane_atomic_check, - .atomic_async_check = dm_plane_atomic_async_check, - .atomic_async_update = dm_plane_atomic_async_update -}; - -/* - * TODO: these are currently initialized to rgb formats only. - * For future use cases we should either initialize them dynamically based on - * plane capabilities, or initialize this array to all formats, so internal drm - * check will succeed, and let DC implement proper check - */ -static const uint32_t rgb_formats[] = { - DRM_FORMAT_XRGB8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_RGBA8888, - DRM_FORMAT_XRGB2101010, - DRM_FORMAT_XBGR2101010, - DRM_FORMAT_ARGB2101010, - DRM_FORMAT_ABGR2101010, - DRM_FORMAT_XRGB16161616, - DRM_FORMAT_XBGR16161616, - DRM_FORMAT_ARGB16161616, - DRM_FORMAT_ABGR16161616, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_RGB565, -}; - -static const uint32_t overlay_formats[] = { - DRM_FORMAT_XRGB8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_RGBA8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_RGB565 -}; - -static const u32 cursor_formats[] = { - DRM_FORMAT_ARGB8888 -}; - -static int get_plane_formats(const struct drm_plane *plane, - const struct dc_plane_cap *plane_cap, - uint32_t *formats, int max_formats) -{ - int i, num_formats = 0; - - /* - * TODO: Query support for each group of formats directly from - * DC plane caps. This will require adding more formats to the - * caps list. - */ - - switch (plane->type) { - case DRM_PLANE_TYPE_PRIMARY: - for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { - if (num_formats >= max_formats) - break; - - formats[num_formats++] = rgb_formats[i]; - } - - if (plane_cap && plane_cap->pixel_format_support.nv12) - formats[num_formats++] = DRM_FORMAT_NV12; - if (plane_cap && plane_cap->pixel_format_support.p010) - formats[num_formats++] = DRM_FORMAT_P010; - if (plane_cap && plane_cap->pixel_format_support.fp16) { - formats[num_formats++] = DRM_FORMAT_XRGB16161616F; - formats[num_formats++] = DRM_FORMAT_ARGB16161616F; - formats[num_formats++] = DRM_FORMAT_XBGR16161616F; - formats[num_formats++] = DRM_FORMAT_ABGR16161616F; - } - break; - - case DRM_PLANE_TYPE_OVERLAY: - for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { - if (num_formats >= max_formats) - break; - - formats[num_formats++] = overlay_formats[i]; - } - break; - - case DRM_PLANE_TYPE_CURSOR: - for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { - if (num_formats >= max_formats) - break; - - formats[num_formats++] = cursor_formats[i]; - } - break; - } - - return num_formats; -} - -static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, - struct drm_plane *plane, - unsigned long possible_crtcs, - const struct dc_plane_cap *plane_cap) -{ - uint32_t formats[32]; - int num_formats; - int res = -EPERM; - unsigned int supported_rotations; - uint64_t *modifiers = NULL; - - num_formats = get_plane_formats(plane, plane_cap, formats, - ARRAY_SIZE(formats)); - - res = get_plane_modifiers(dm->adev, plane->type, &modifiers); - if (res) - return res; - - res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs, - &dm_plane_funcs, formats, num_formats, - modifiers, plane->type, NULL); - kfree(modifiers); - if (res) - return res; - - if (plane->type == DRM_PLANE_TYPE_OVERLAY && - plane_cap && plane_cap->per_pixel_alpha) { - unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | - BIT(DRM_MODE_BLEND_PREMULTI); - - drm_plane_create_alpha_property(plane); - drm_plane_create_blend_mode_property(plane, blend_caps); - } - - if (plane->type == DRM_PLANE_TYPE_PRIMARY && - plane_cap && - (plane_cap->pixel_format_support.nv12 || - plane_cap->pixel_format_support.p010)) { - /* This only affects YUV formats. */ - drm_plane_create_color_properties( - plane, - BIT(DRM_COLOR_YCBCR_BT601) | - BIT(DRM_COLOR_YCBCR_BT709) | - BIT(DRM_COLOR_YCBCR_BT2020), - BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | - BIT(DRM_COLOR_YCBCR_FULL_RANGE), - DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); - } - - supported_rotations = - DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | - DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; - - if (dm->adev->asic_type >= CHIP_BONAIRE && - plane->type != DRM_PLANE_TYPE_CURSOR) - drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, - supported_rotations); - - drm_plane_helper_add(plane, &dm_plane_helper_funcs); - - /* Create (reset) the plane state */ - if (plane->funcs->reset) - plane->funcs->reset(plane); - - return 0; -} - -static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, - struct drm_plane *plane, - uint32_t crtc_index) -{ - struct amdgpu_crtc *acrtc = NULL; - struct drm_plane *cursor_plane; - - int res = -ENOMEM; - - cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); - if (!cursor_plane) - goto fail; - - cursor_plane->type = DRM_PLANE_TYPE_CURSOR; - res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL); - - acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); - if (!acrtc) - goto fail; - - res = drm_crtc_init_with_planes( - dm->ddev, - &acrtc->base, - plane, - cursor_plane, - &amdgpu_dm_crtc_funcs, NULL); - - if (res) - goto fail; - - drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs); - - /* Create (reset) the plane state */ - if (acrtc->base.funcs->reset) - acrtc->base.funcs->reset(&acrtc->base); - - acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size; - acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size; - - acrtc->crtc_id = crtc_index; - acrtc->base.enabled = false; - acrtc->otg_inst = -1; - - dm->adev->mode_info.crtcs[crtc_index] = acrtc; - drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES, - true, MAX_COLOR_LUT_ENTRIES); - drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); - - return 0; - -fail: - kfree(acrtc); - kfree(cursor_plane); - return res; -} - - static int to_drm_connector_type(enum signal_type st) { switch (st) { @@ -8014,6 +6673,9 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, mode = amdgpu_dm_create_common_mode(encoder, common_modes[i].name, common_modes[i].w, common_modes[i].h); + if (!mode) + continue; + drm_mode_probed_add(connector, mode); amdgpu_dm_connector->num_modes++; } @@ -8029,6 +6691,10 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector) connector->connector_type != DRM_MODE_CONNECTOR_LVDS) return; + mutex_lock(&connector->dev->mode_config.mutex); + amdgpu_dm_connector_get_modes(connector); + mutex_unlock(&connector->dev->mode_config.mutex); + encoder = amdgpu_dm_connector_to_encoder(connector); if (!encoder) return; @@ -8073,8 +6739,6 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, * restored here. */ amdgpu_dm_update_freesync_caps(connector, edid); - - amdgpu_set_panel_orientation(connector); } else { amdgpu_dm_connector->num_modes = 0; } @@ -8175,7 +6839,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (!(amdgpu_freesync_vid_mode && edid)) + if (!edid) return; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -8242,15 +6906,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, break; case DRM_MODE_CONNECTOR_DisplayPort: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) { - link->link_enc = - link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - if (!link->link_enc) - link->link_enc = - link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); - } - + link->link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link->link_enc); if (link->link_enc) aconnector->base.ycbcr_420_allowed = link->link_enc->features.dp_ycbcr420_supported ? true : false; @@ -8331,7 +6988,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, if (dc_submit_i2c( ddc_service->ctx->dc, - ddc_service->ddc_pin->hw_info.ddc_channel, + ddc_service->link->link_index, &cmd)) result = num; @@ -8367,8 +7024,6 @@ create_i2c(struct ddc_service *ddc_service, snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); i2c_set_adapdata(&i2c->base, i2c); i2c->ddc_service = ddc_service; - if (i2c->ddc_service->ddc_pin) - i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index; return i2c; } @@ -8659,114 +7314,6 @@ static void remove_stream(struct amdgpu_device *adev, acrtc->enabled = false; } -static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, - struct dc_cursor_position *position) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int x, y; - int xorigin = 0, yorigin = 0; - - if (!crtc || !plane->state->fb) - return 0; - - if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || - (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { - DRM_ERROR("%s: bad cursor width or height %d x %d\n", - __func__, - plane->state->crtc_w, - plane->state->crtc_h); - return -EINVAL; - } - - x = plane->state->crtc_x; - y = plane->state->crtc_y; - - if (x <= -amdgpu_crtc->max_cursor_width || - y <= -amdgpu_crtc->max_cursor_height) - return 0; - - if (x < 0) { - xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); - x = 0; - } - if (y < 0) { - yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); - y = 0; - } - position->enable = true; - position->translate_by_source = true; - position->x = x; - position->y = y; - position->x_hotspot = xorigin; - position->y_hotspot = yorigin; - - return 0; -} - -static void handle_cursor_update(struct drm_plane *plane, - struct drm_plane_state *old_plane_state) -{ - struct amdgpu_device *adev = drm_to_adev(plane->dev); - struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); - struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; - struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - uint64_t address = afb ? afb->address : 0; - struct dc_cursor_position position = {0}; - struct dc_cursor_attributes attributes; - int ret; - - if (!plane->state->fb && !old_plane_state->fb) - return; - - DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n", - __func__, - amdgpu_crtc->crtc_id, - plane->state->crtc_w, - plane->state->crtc_h); - - ret = get_cursor_position(plane, crtc, &position); - if (ret) - return; - - if (!position.enable) { - /* turn off cursor */ - if (crtc_state && crtc_state->stream) { - mutex_lock(&adev->dm.dc_lock); - dc_stream_set_cursor_position(crtc_state->stream, - &position); - mutex_unlock(&adev->dm.dc_lock); - } - return; - } - - amdgpu_crtc->cursor_width = plane->state->crtc_w; - amdgpu_crtc->cursor_height = plane->state->crtc_h; - - memset(&attributes, 0, sizeof(attributes)); - attributes.address.high_part = upper_32_bits(address); - attributes.address.low_part = lower_32_bits(address); - attributes.width = plane->state->crtc_w; - attributes.height = plane->state->crtc_h; - attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; - attributes.rotation_angle = 0; - attributes.attribute_flags.value = 0; - - attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; - - if (crtc_state->stream) { - mutex_lock(&adev->dm.dc_lock); - if (!dc_stream_set_cursor_attributes(crtc_state->stream, - &attributes)) - DRM_ERROR("DC failed to set cursor attributes\n"); - - if (!dc_stream_set_cursor_position(crtc_state->stream, - &position)) - DRM_ERROR("DC failed to set cursor position\n"); - mutex_unlock(&adev->dm.dc_lock); - } -} - static void prepare_flip_isr(struct amdgpu_crtc *acrtc) { @@ -8842,11 +7389,6 @@ static void update_freesync_state_on_stream( &vrr_infopacket, pack_sdp_v1_3); - new_crtc_state->freesync_timing_changed |= - (memcmp(&acrtc->dm_irq_params.vrr_params.adjust, - &vrr_params.adjust, - sizeof(vrr_params.adjust)) != 0); - new_crtc_state->freesync_vrr_info_changed |= (memcmp(&new_crtc_state->vrr_infopacket, &vrr_infopacket, @@ -8855,7 +7397,6 @@ static void update_freesync_state_on_stream( acrtc->dm_irq_params.vrr_params = vrr_params; new_crtc_state->vrr_infopacket = vrr_infopacket; - new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust; new_stream->vrr_infopacket = vrr_infopacket; if (new_crtc_state->freesync_vrr_info_changed) @@ -8918,10 +7459,6 @@ static void update_stream_irq_parameters( new_stream, &config, &vrr_params); - new_crtc_state->freesync_timing_changed |= - (memcmp(&acrtc->dm_irq_params.vrr_params.adjust, - &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0); - new_crtc_state->freesync_config = config; /* Copy state for access from DM IRQ handler */ acrtc->dm_irq_params.freesync_config = config; @@ -8945,15 +7482,15 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, * We also need vupdate irq for the actual core vblank handling * at end of vblank. */ - dm_set_vupdate_irq(new_state->base.crtc, true); - drm_crtc_vblank_get(new_state->base.crtc); + WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0); + WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", __func__, new_state->base.crtc->base.id); } else if (old_vrr_active && !new_vrr_active) { /* Transition VRR active -> inactive: * Allow vblank irq disable again for fixed refresh rate. */ - dm_set_vupdate_irq(new_state->base.crtc, false); + WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0); drm_crtc_vblank_put(new_state->base.crtc); DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", __func__, new_state->base.crtc->base.id); @@ -8993,11 +7530,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); int planes_count = 0, vpos, hpos; - long r; unsigned long flags; - struct amdgpu_bo *abo; uint32_t target_vblank, last_flip_vblank; bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); + bool cursor_update = false; bool pflip_present = false; struct { struct dc_surface_update surface_updates[MAX_SURFACES]; @@ -9033,8 +7569,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); /* Cursor plane is handled after stream updates */ - if (plane->type == DRM_PLANE_TYPE_CURSOR) + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + if ((fb && crtc == pcrtc) || + (old_plane_state->fb && old_plane_state->crtc == pcrtc)) + cursor_update = true; + continue; + } if (!fb || !crtc || pcrtc != crtc) continue; @@ -9067,18 +7608,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, continue; } - abo = gem_to_amdgpu_bo(fb->obj[0]); - - /* - * Wait for all fences on this FB. Do limited wait to avoid - * deadlock during GPU reset when this fence will not signal - * but we hold reservation lock for the BO. - */ - r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false, - msecs_to_jiffies(5000)); - if (unlikely(r <= 0)) - DRM_ERROR("Waiting for fences timed out!"); - fill_dc_plane_info_and_addr( dm->adev, new_plane_state, afb->tiling_flags, @@ -9086,13 +7615,18 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, &bundle->flip_addrs[planes_count].address, afb->tmz_surface, false); - DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n", + drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n", new_plane_state->plane->index, bundle->plane_infos[planes_count].dcc.enable); bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; + if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) + fill_dc_dirty_rects(plane, old_plane_state, + new_plane_state, new_crtc_state, + &bundle->flip_addrs[planes_count]); + /* * Only allow immediate flips for fast updates that don't * change FB pitch, DCC state, rotation or mirroing. @@ -9120,7 +7654,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, dc_plane, bundle->flip_addrs[planes_count].flip_timestamp_in_us); - DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n", + drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n", __func__, bundle->flip_addrs[planes_count].address.grph.addr.high_part, bundle->flip_addrs[planes_count].address.grph.addr.low_part); @@ -9179,8 +7713,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * and rely on sending it from software. */ if (acrtc_attach->base.state->event && - acrtc_state->active_planes > 0 && - !acrtc_state->force_dpms_off) { + acrtc_state->active_planes > 0) { drm_crtc_vblank_get(pcrtc); spin_lock_irqsave(&pcrtc->dev->event_lock, flags); @@ -9196,19 +7729,27 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->stream_update.vrr_infopacket = &acrtc_state->stream->vrr_infopacket; } + } else if (cursor_update && acrtc_state->active_planes > 0 && + acrtc_attach->base.state->event) { + drm_crtc_vblank_get(pcrtc); + + spin_lock_irqsave(&pcrtc->dev->event_lock, flags); + + acrtc_attach->event = acrtc_attach->base.state->event; + acrtc_attach->base.state->event = NULL; + + spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } /* Update the planes if changed or disable if we don't have any. */ if ((planes_count || acrtc_state->active_planes == 0) && acrtc_state->stream) { -#if defined(CONFIG_DRM_AMD_DC_DCN) /* * If PSR or idle optimizations are enabled then flush out * any pending work before hardware programming. */ if (dm->vblank_control_workqueue) flush_workqueue(dm->vblank_control_workqueue); -#endif bundle->stream_update.stream = acrtc_state->stream; if (new_pcrtc_state->mode_changed) { @@ -9290,6 +7831,18 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, /* Allow PSR when skip count is 0. */ acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; + + /* + * If sink supports PSR SU, there is no need to rely on + * a vblank event disable request to enable PSR. PSR SU + * can be enabled immediately once OS demonstrates an + * adequate number of fast atomic commits to notify KMD + * of update events. See `vblank_control_worker()`. + */ + if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && + acrtc_attach->dm_irq_params.allow_psr_entry && + !acrtc_state->stream->link->psr_settings.psr_allow_active) + amdgpu_dm_psr_enable(acrtc_state->stream); } else { acrtc_attach->dm_irq_params.allow_psr_entry = false; } @@ -9423,10 +7976,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; int crtc_disable_count = 0; bool mode_set_reset_required = false; + int r; trace_amdgpu_dm_atomic_commit_tail_begin(state); + r = drm_atomic_helper_wait_for_fences(dev, state, false); + if (unlikely(r)) + DRM_ERROR("Waiting for fences timed out!"); + drm_atomic_helper_update_legacy_modeset_state(dev, state); + drm_dp_mst_atomic_wait_for_dependencies(state); dm_state = dm_atomic_get_new_state(state); if (dm_state && dm_state->context) { @@ -9462,7 +8021,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); - DRM_DEBUG_ATOMIC( + drm_dbg_state(state->dev, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " "planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n", @@ -9541,21 +8100,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (dc_state) { /* if there mode set or reset, disable eDP PSR */ if (mode_set_reset_required) { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dm->vblank_control_workqueue) flush_workqueue(dm->vblank_control_workqueue); -#endif + amdgpu_dm_psr_disable_all(dm); } dm_enable_per_frame_crtc_master_sync(dc_state); mutex_lock(&dm->dc_lock); WARN_ON(!dc_commit_state(dm->dc, dc_state)); -#if defined(CONFIG_DRM_AMD_DC_DCN) - /* Allow idle optimization when vblank count is 0 for display off */ - if (dm->active_vblank_irq_count == 0) - dc_allow_idle_optimizations(dm->dc,true); -#endif + + /* Allow idle optimization when vblank count is 0 for display off */ + if (dm->active_vblank_irq_count == 0) + dc_allow_idle_optimizations(dm->dc, true); mutex_unlock(&dm->dc_lock); } @@ -9690,23 +8247,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) mutex_unlock(&dm->dc_lock); } - /* Count number of newly disabled CRTCs for dropping PM refs later. */ - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - if (old_crtc_state->active && !new_crtc_state->active) - crtc_disable_count++; - - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); - - /* For freesync config update on crtc state and params for irq */ - update_stream_irq_parameters(dm, dm_new_crtc_state); - - /* Handle vrr on->off / off->on transitions */ - amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, - dm_new_crtc_state); - } - /** * Enable interrupts for CRTCs that are newly enabled or went through * a modeset. It was intentionally deferred until after the front end @@ -9716,16 +8256,29 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); #ifdef CONFIG_DEBUG_FS - bool configure_crc = false; enum amdgpu_dm_pipe_crc_source cur_crc_src; #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk; + struct crc_rd_work *crc_rd_wrk; +#endif +#endif + /* Count number of newly disabled CRTCs for dropping PM refs later. */ + if (old_crtc_state->active && !new_crtc_state->active) + crtc_disable_count++; + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + /* For freesync config update on crtc state and params for irq */ + update_stream_irq_parameters(dm, dm_new_crtc_state); + +#ifdef CONFIG_DEBUG_FS +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + crc_rd_wrk = dm->crc_rd_wrk; #endif spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); cur_crc_src = acrtc->dm_irq_params.crc_src; spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); #endif - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (new_crtc_state->active && (!old_crtc_state->active || @@ -9733,16 +8286,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dc_stream_retain(dm_new_crtc_state->stream); acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; manage_dm_interrupts(adev, acrtc, true); + } + /* Handle vrr on->off / off->on transitions */ + amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); #ifdef CONFIG_DEBUG_FS + if (new_crtc_state->active && + (!old_crtc_state->active || + drm_atomic_crtc_needs_modeset(new_crtc_state))) { /** * Frontend may have changed so reapply the CRC capture * settings for the stream. */ - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { - configure_crc = true; #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) if (amdgpu_dm_crc_window_is_activated(crtc)) { spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); @@ -9754,14 +8310,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } #endif - } - - if (configure_crc) if (amdgpu_dm_crtc_configure_crc_source( crtc, dm_new_crtc_state, cur_crc_src)) DRM_DEBUG_DRIVER("Failed to configure crc source"); -#endif + } } +#endif } for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) @@ -9780,15 +8334,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) /* Update audio instances for each connector. */ amdgpu_dm_commit_audio(dev, state); -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ - defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) /* restore the backlight level */ for (i = 0; i < dm->num_of_edps; i++) { if (dm->backlight_dev[i] && - (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i])) + (dm->actual_brightness[i] != dm->brightness[i])) amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); } -#endif + /* * send vblank event on all events not handled in flip and * mark consumed event for drm_atomic_helper_commit_hw_done @@ -9829,7 +8381,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dc_release_state(dc_state_temp); } - static int dm_force_atomic_commit(struct drm_connector *connector) { int ret = 0; @@ -10019,27 +8570,27 @@ static bool is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state) { - struct drm_display_mode old_mode, new_mode; + const struct drm_display_mode *old_mode, *new_mode; if (!old_crtc_state || !new_crtc_state) return false; - old_mode = old_crtc_state->mode; - new_mode = new_crtc_state->mode; - - if (old_mode.clock == new_mode.clock && - old_mode.hdisplay == new_mode.hdisplay && - old_mode.vdisplay == new_mode.vdisplay && - old_mode.htotal == new_mode.htotal && - old_mode.vtotal != new_mode.vtotal && - old_mode.hsync_start == new_mode.hsync_start && - old_mode.vsync_start != new_mode.vsync_start && - old_mode.hsync_end == new_mode.hsync_end && - old_mode.vsync_end != new_mode.vsync_end && - old_mode.hskew == new_mode.hskew && - old_mode.vscan == new_mode.vscan && - (old_mode.vsync_end - old_mode.vsync_start) == - (new_mode.vsync_end - new_mode.vsync_start)) + old_mode = &old_crtc_state->mode; + new_mode = &new_crtc_state->mode; + + if (old_mode->clock == new_mode->clock && + old_mode->hdisplay == new_mode->hdisplay && + old_mode->vdisplay == new_mode->vdisplay && + old_mode->htotal == new_mode->htotal && + old_mode->vtotal != new_mode->vtotal && + old_mode->hsync_start == new_mode->hsync_start && + old_mode->vsync_start != new_mode->vsync_start && + old_mode->hsync_end == new_mode->hsync_end && + old_mode->vsync_end != new_mode->vsync_end && + old_mode->hskew == new_mode->hskew && + old_mode->vscan == new_mode->vscan && + (old_mode->vsync_end - old_mode->vsync_start) == + (new_mode->vsync_end - new_mode->vsync_start)) return true; return false; @@ -10060,12 +8611,12 @@ static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { } static int dm_update_crtc_state(struct amdgpu_display_manager *dm, - struct drm_atomic_state *state, - struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state, - struct drm_crtc_state *new_crtc_state, - bool enable, - bool *lock_and_validation_needed) + struct drm_atomic_state *state, + struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state, + struct drm_crtc_state *new_crtc_state, + bool enable, + bool *lock_and_validation_needed) { struct dm_atomic_state *dm_state = NULL; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; @@ -10149,8 +8700,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * TODO: Refactor this function to allow this check to work * in all conditions. */ - if (amdgpu_freesync_vid_mode && - dm_new_crtc_state->stream && + if (dm_new_crtc_state->stream && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) goto skip_modeset; @@ -10167,7 +8717,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) goto skip_modeset; - DRM_DEBUG_ATOMIC( + drm_dbg_state(state->dev, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " "planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n", @@ -10185,7 +8735,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (!dm_old_crtc_state->stream) goto skip_modeset; - if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && + if (dm_new_crtc_state->stream && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) { new_crtc_state->mode_changed = false; @@ -10197,7 +8747,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, set_freesync_fixed_config(dm_new_crtc_state); goto skip_modeset; - } else if (amdgpu_freesync_vid_mode && aconnector && + } else if (aconnector && is_freesync_video_mode(&new_crtc_state->mode, aconnector)) { struct drm_display_mode *high_mode; @@ -10237,7 +8787,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * added MST connectors not found in existing crtc_state in the chained mode * TODO: need to dig out the root cause of that */ - if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port)) + if (!aconnector) goto skip_modeset; if (modereset_required(new_crtc_state)) @@ -10641,6 +9191,8 @@ static int dm_update_plane_state(struct dc *dc, dm_new_plane_state->dc_state = dc_new_plane_state; + dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); + /* Tell DC to do a full surface update every time there * is a plane change. Inefficient, but works for now. */ @@ -10653,6 +9205,24 @@ static int dm_update_plane_state(struct dc *dc, return ret; } +static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, + int *src_w, int *src_h) +{ + switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { + case DRM_MODE_ROTATE_90: + case DRM_MODE_ROTATE_270: + *src_w = plane_state->src_h >> 16; + *src_h = plane_state->src_w >> 16; + break; + case DRM_MODE_ROTATE_0: + case DRM_MODE_ROTATE_180: + default: + *src_w = plane_state->src_w >> 16; + *src_h = plane_state->src_h >> 16; + break; + } +} + static int dm_check_crtc_cursor(struct drm_atomic_state *state, struct drm_crtc *crtc, struct drm_crtc_state *new_crtc_state) @@ -10661,6 +9231,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, struct drm_plane_state *new_cursor_state, *new_underlying_state; int i; int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; + int cursor_src_w, cursor_src_h; + int underlying_src_w, underlying_src_h; /* On DCE and DCN there is no dedicated hardware cursor plane. We get a * cursor per pipe but it's going to inherit the scaling and @@ -10672,10 +9244,9 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, return 0; } - cursor_scale_w = new_cursor_state->crtc_w * 1000 / - (new_cursor_state->src_w >> 16); - cursor_scale_h = new_cursor_state->crtc_h * 1000 / - (new_cursor_state->src_h >> 16); + dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h); + cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w; + cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h; for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { /* Narrow down to non-cursor planes on the same CRTC as the cursor */ @@ -10686,10 +9257,10 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, if (!new_underlying_state->fb) continue; - underlying_scale_w = new_underlying_state->crtc_w * 1000 / - (new_underlying_state->src_w >> 16); - underlying_scale_h = new_underlying_state->crtc_h * 1000 / - (new_underlying_state->src_h >> 16); + dm_get_oriented_plane_size(new_underlying_state, + &underlying_src_w, &underlying_src_h); + underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w; + underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h; if (cursor_scale_w != underlying_scale_w || cursor_scale_h != underlying_scale_h) { @@ -10714,10 +9285,13 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) { struct drm_connector *connector; - struct drm_connector_state *conn_state; + struct drm_connector_state *conn_state, *old_conn_state; struct amdgpu_dm_connector *aconnector = NULL; int i; - for_each_new_connector_in_state(state, connector, conn_state, i) { + for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { + if (!conn_state->crtc) + conn_state = old_conn_state; + if (conn_state->crtc != crtc) continue; @@ -10737,6 +9311,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm /** * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. + * * @dev: The DRM device * @state: The atomic state to commit * @@ -10774,18 +9349,18 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, enum dc_status status; int ret, i; bool lock_and_validation_needed = false; - struct dm_crtc_state *dm_old_crtc_state; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; #if defined(CONFIG_DRM_AMD_DC_DCN) struct dsc_mst_fairness_vars vars[MAX_PIPES]; - struct drm_dp_mst_topology_state *mst_state; - struct drm_dp_mst_topology_mgr *mgr; #endif trace_amdgpu_dm_atomic_check_begin(state); ret = drm_atomic_helper_check_modeset(dev, state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n"); goto fail; + } /* Check connector changes */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { @@ -10793,14 +9368,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); /* Skip connectors that are disabled or part of modeset already. */ - if (!old_con_state->crtc && !new_con_state->crtc) - continue; - if (!new_con_state->crtc) continue; new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); if (IS_ERR(new_crtc_state)) { + DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n"); ret = PTR_ERR(new_crtc_state); goto fail; } @@ -10815,8 +9388,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { ret = add_affected_mst_dsc_crtcs(state, crtc); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n"); goto fail; + } } } } @@ -10831,19 +9406,25 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, continue; ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n"); goto fail; + } if (!new_crtc_state->enable) continue; ret = drm_atomic_add_affected_connectors(state, crtc); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n"); goto fail; + } ret = drm_atomic_add_affected_planes(state, crtc); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n"); goto fail; + } if (dm_old_crtc_state->dsc_force_changed) new_crtc_state->mode_changed = true; @@ -10880,11 +9461,20 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (IS_ERR(new_plane_state)) { ret = PTR_ERR(new_plane_state); + DRM_DEBUG_DRIVER("new_plane_state is BAD\n"); goto fail; } } } + /* + * DC consults the zpos (layer_index in DC terminology) to determine the + * hw plane on which to enable the hw cursor (see + * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in + * atomic state, so call drm helper to normalize zpos. + */ + drm_atomic_normalize_zpos(dev, state); + /* Remove exiting planes if they are modified */ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { ret = dm_update_plane_state(dc, state, plane, @@ -10892,8 +9482,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_plane_state, false, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); goto fail; + } } /* Disable all crtcs which require disable */ @@ -10903,8 +9495,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_crtc_state, false, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n"); goto fail; + } } /* Enable all crtcs which require enable */ @@ -10914,8 +9508,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_crtc_state, true, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n"); goto fail; + } } /* Add new/modified planes */ @@ -10925,20 +9521,41 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_plane_state, true, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); goto fail; + } + } + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dc_resource_is_dsc_encoding_supported(dc)) { + if (!pre_validate_dsc(state, &dm_state, vars)) { + ret = -EINVAL; + goto fail; + } } +#endif /* Run this here since we want to validate the streams we created */ ret = drm_atomic_helper_check_planes(dev, state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n"); goto fail; + } + + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + if (dm_new_crtc_state->mpo_requested) + DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc); + } /* Check cursor planes scaling */ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n"); goto fail; + } } if (state->legacy_cursor_update) { @@ -10983,33 +9600,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, lock_and_validation_needed = true; } -#if defined(CONFIG_DRM_AMD_DC_DCN) - /* set the slot info for each mst_state based on the link encoding format */ - for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { - struct amdgpu_dm_connector *aconnector; - struct drm_connector *connector; - struct drm_connector_list_iter iter; - u8 link_coding_cap; - - if (!mgr->mst_state ) - continue; - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) { - int id = connector->index; - - if (id == mst_state->mgr->conn_base_id) { - aconnector = to_amdgpu_dm_connector(connector); - link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); - drm_dp_mst_update_slots(mst_state, link_coding_cap); - - break; - } - } - drm_connector_list_iter_end(&iter); - - } -#endif /** * Streams and planes are reset when there are changes that affect * bandwidth. Anything that affects bandwidth needs to go through @@ -11025,20 +9615,29 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, */ if (lock_and_validation_needed) { ret = dm_atomic_get_state(state, &dm_state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n"); goto fail; + } ret = do_aquire_global_lock(dev, state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n"); goto fail; + } #if defined(CONFIG_DRM_AMD_DC_DCN) - if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) + if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) { + DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); + ret = -EINVAL; goto fail; + } ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n"); goto fail; + } #endif /* @@ -11048,12 +9647,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, * to get stuck in an infinite loop and hang eventually. */ ret = drm_dp_mst_atomic_check(state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n"); goto fail; - status = dc_validate_global_state(dc, dm_state->context, false); + } + status = dc_validate_global_state(dc, dm_state->context, true); if (status != DC_OK) { - drm_dbg_atomic(dev, - "DC global validation failure: %s (%d)", + DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)", dc_status_to_str(status), status); ret = -EINVAL; goto fail; @@ -11175,7 +9775,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); input->offset = offset; input->length = length; - input->total_length = total_length; + input->cea_total_length = total_length; memcpy(input->payload, data, length); res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd); @@ -11307,8 +9907,19 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, return valid_vsdb_found ? i : -ENODEV; } +/** + * amdgpu_dm_update_freesync_caps - Update Freesync capabilities + * + * @connector: Connector to query. + * @edid: EDID from monitor + * + * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep + * track of some of the display information in the internal data struct used by + * amdgpu_dm. This function checks which type of connector we need to set the + * FreeSync parameters. + */ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, - struct edid *edid) + struct edid *edid) { int i = 0; struct detailed_timing *timing; @@ -11321,8 +9932,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); - bool freesync_capable = false; struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; + bool freesync_capable = false; if (!connector->state) { DRM_ERROR("%s - Connector has no state", __func__); @@ -11351,7 +9962,6 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (!adev->dm.freesync_module) goto update; - if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || sink->sink_signal == SIGNAL_TYPE_EDP) { bool edid_check_required = false; @@ -11482,8 +10092,10 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, return value; } -int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx, - uint8_t status_type, uint32_t *operation_result) +static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, + struct dc_context *ctx, + uint8_t status_type, + uint32_t *operation_result) { struct amdgpu_device *adev = ctx->driver_context; int return_status = -1; @@ -11554,3 +10166,24 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS, (uint32_t *)operation_result); } + +/* + * Check whether seamless boot is supported. + * + * So far we only support seamless boot on CHIP_VANGOGH. + * If everything goes well, we may consider expanding + * seamless boot to other ASICs. + */ +bool check_seamless_boot_capability(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_VANGOGH: + if (!adev->mman.keep_stolen_vga_memory) + return true; + break; + default: + break; + } + + return false; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 37e61a88d49e..b5ce15c43bcc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -26,10 +26,10 @@ #ifndef __AMDGPU_DM_H__ #define __AMDGPU_DM_H__ +#include <drm/display/drm_dp_mst_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_connector.h> #include <drm/drm_crtc.h> -#include <drm/drm_dp_mst_helper.h> #include <drm/drm_plane.h> /* @@ -50,9 +50,9 @@ #define AMDGPU_DMUB_NOTIFICATION_MAX 5 -/** +/* * DMUB Async to Sync Mechanism Status - **/ + */ #define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1 #define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2 #define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3 @@ -242,6 +242,13 @@ struct hpd_rx_irq_offload_work { * @force_timing_sync: set via debugfs. When set, indicates that all connected * displays will be forced to synchronize. * @dmcub_trace_event_en: enable dmcub trace events + * @dmub_outbox_params: DMUB Outbox parameters + * @num_of_edps: number of backlight eDPs + * @disable_hpd_irq: disables all HPD and HPD RX interrupt handling in the + * driver when true + * @dmub_aux_transfer_done: struct completion used to indicate when DMUB + * transfers are done + * @delayed_hpd_wq: work queue used to delay DMUB HPD work */ struct amdgpu_display_manager { @@ -358,14 +365,12 @@ struct amdgpu_display_manager { */ struct mutex audio_lock; -#if defined(CONFIG_DRM_AMD_DC_DCN) /** * @vblank_lock: * * Guards access to deferred vblank work state. */ spinlock_t vblank_lock; -#endif /** * @audio_component: @@ -469,14 +474,12 @@ struct amdgpu_display_manager { struct hdcp_workqueue *hdcp_workqueue; #endif -#if defined(CONFIG_DRM_AMD_DC_DCN) /** * @vblank_control_workqueue: * * Deferred work for vblank control events. */ struct workqueue_struct *vblank_control_workqueue; -#endif struct drm_atomic_state *cached_state; struct dc_state *cached_dc_state; @@ -493,14 +496,12 @@ struct amdgpu_display_manager { */ const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; -#if defined(CONFIG_DRM_AMD_DC_DCN) /** * @active_vblank_irq_count: * * number of currently active vblank irqs */ uint32_t active_vblank_irq_count; -#endif #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) /** @@ -540,6 +541,20 @@ struct amdgpu_display_manager { * cached backlight values. */ u32 brightness[AMDGPU_DM_MAX_NUM_EDP]; + /** + * @actual_brightness: + * + * last successfully applied backlight values. + */ + u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; + + /** + * @aux_hpd_discon_quirk: + * + * quirk for hpd discon while aux is on-going. + * occurred on certain intel platform + */ + bool aux_hpd_discon_quirk; }; enum dsc_clock_force_state { @@ -556,6 +571,14 @@ struct dsc_preferred_settings { bool dsc_force_disable_passthrough; }; +enum mst_progress_status { + MST_STATUS_DEFAULT = 0, + MST_PROBE = BIT(0), + MST_REMOTE_EDID = BIT(1), + MST_ALLOCATE_NEW_PAYLOAD = BIT(2), + MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3), +}; + struct amdgpu_dm_connector { struct drm_connector base; @@ -575,6 +598,10 @@ struct amdgpu_dm_connector { * The 'current' sink is in dc_link->sink. */ struct dc_sink *dc_sink; struct dc_link *dc_link; + + /** + * @dc_em_sink: Reference to the emulated (virtual) sink. + */ struct dc_sink *dc_em_sink; /* DM only */ @@ -583,12 +610,20 @@ struct amdgpu_dm_connector { struct drm_dp_mst_port *port; struct amdgpu_dm_connector *mst_port; struct drm_dp_aux *dsc_aux; - /* TODO see if we can merge with ddc_bus or make a dm_connector */ struct amdgpu_i2c_adapter *i2c; /* Monitor range limits */ - int min_vfreq ; + /** + * @min_vfreq: Minimal frequency supported by the display in Hz. This + * value is set to zero when there is no FreeSync support. + */ + int min_vfreq; + + /** + * @max_vfreq: Maximum frequency supported by the display in Hz. This + * value is set to zero when there is no FreeSync support. + */ int max_vfreq ; int pixel_clock_mhz; @@ -604,12 +639,25 @@ struct amdgpu_dm_connector { #endif bool force_yuv420_output; struct dsc_preferred_settings dsc_settings; + union dp_downstream_port_present mst_downstream_port_present; /* Cached display modes */ struct drm_display_mode freesync_vid_base; int psr_skip_count; + + /* Record progress status of mst*/ + uint8_t mst_status; }; +static inline void amdgpu_dm_set_mst_status(uint8_t *status, + uint8_t flags, bool set) +{ + if (set) + *status |= flags; + else + *status &= ~flags; +} + #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) extern const struct amdgpu_ip_block_version dm_ip_block; @@ -626,18 +674,17 @@ struct dm_crtc_state { bool cm_has_degamma; bool cm_is_degamma_srgb; + bool mpo_requested; + int update_type; int active_planes; int crc_skip_count; - bool freesync_timing_changed; bool freesync_vrr_info_changed; bool dsc_force_changed; bool vrr_supported; - - bool force_dpms_off; struct mod_freesync_config freesync_config; struct dc_info_packet vrr_infopacket; @@ -670,11 +717,34 @@ struct dm_connector_state { uint64_t pbn; }; +/** + * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info + * + * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this + * struct is useful to keep track of the display-specific information about + * FreeSync. + */ struct amdgpu_hdmi_vsdb_info { - unsigned int amd_vsdb_version; /* VSDB version, should be used to determine which VSIF to send */ - bool freesync_supported; /* FreeSync Supported */ - unsigned int min_refresh_rate_hz; /* FreeSync Minimum Refresh Rate in Hz */ - unsigned int max_refresh_rate_hz; /* FreeSync Maximum Refresh Rate in Hz */ + /** + * @amd_vsdb_version: Vendor Specific Data Block Version, should be + * used to determine which Vendor Specific InfoFrame (VSIF) to send. + */ + unsigned int amd_vsdb_version; + + /** + * @freesync_supported: FreeSync Supported. + */ + bool freesync_supported; + + /** + * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz. + */ + unsigned int min_refresh_rate_hz; + + /** + * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz + */ + unsigned int max_refresh_rate_hz; }; @@ -731,4 +801,21 @@ extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx, unsigned int link_index, void *payload, void *operation_result); + +bool check_seamless_boot_capability(struct amdgpu_device *adev); + +struct dc_stream_state * + create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, + const struct drm_display_mode *drm_mode, + const struct dm_connector_state *dm_state, + const struct dc_stream_state *old_stream); + +int dm_atomic_get_state(struct drm_atomic_state *state, + struct dm_atomic_state **dm_state); + +struct amdgpu_dm_connector * +amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, + struct drm_crtc *crtc); + +int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth); #endif /* __AMDGPU_DM_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index a022e5bb30a5..a4cb23d059bd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -29,7 +29,9 @@ #include "modules/color/color_gamma.h" #include "basics/conversion.h" -/* +/** + * DOC: overview + * * The DC interface to HW gives us the following color management blocks * per pipe (surface): * @@ -71,8 +73,8 @@ #define MAX_DRM_LUT_VALUE 0xFFFF -/* - * Initialize the color module. +/** + * amdgpu_dm_init_color_mod - Initialize the color module. * * We're not using the full color module, only certain components. * Only call setup functions for components that we need. @@ -82,7 +84,14 @@ void amdgpu_dm_init_color_mod(void) setup_x_points_distribution(); } -/* Extracts the DRM lut and lut size from a blob. */ +/** + * __extract_blob_lut - Extracts the DRM lut and lut size from a blob. + * @blob: DRM color mgmt property blob + * @size: lut size + * + * Returns: + * DRM LUT or NULL + */ static const struct drm_color_lut * __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size) { @@ -90,13 +99,18 @@ __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size) return blob ? (struct drm_color_lut *)blob->data : NULL; } -/* - * Return true if the given lut is a linear mapping of values, i.e. it acts - * like a bypass LUT. +/** + * __is_lut_linear - check if the given lut is a linear mapping of values + * @lut: given lut to check values + * @size: lut size * * It is considered linear if the lut represents: - * f(a) = (0xFF00/MAX_COLOR_LUT_ENTRIES-1)a; for integer a in - * [0, MAX_COLOR_LUT_ENTRIES) + * f(a) = (0xFF00/MAX_COLOR_LUT_ENTRIES-1)a; for integer a in [0, + * MAX_COLOR_LUT_ENTRIES) + * + * Returns: + * True if the given lut is a linear mapping of values, i.e. it acts like a + * bypass LUT. Otherwise, false. */ static bool __is_lut_linear(const struct drm_color_lut *lut, uint32_t size) { @@ -119,9 +133,13 @@ static bool __is_lut_linear(const struct drm_color_lut *lut, uint32_t size) return true; } -/* - * Convert the drm_color_lut to dc_gamma. The conversion depends on the size - * of the lut - whether or not it's legacy. +/** + * __drm_lut_to_dc_gamma - convert the drm_color_lut to dc_gamma. + * @lut: DRM lookup table for color conversion + * @gamma: DC gamma to set entries + * @is_legacy: legacy or atomic gamma + * + * The conversion depends on the size of the lut - whether or not it's legacy. */ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut, struct dc_gamma *gamma, bool is_legacy) @@ -154,8 +172,11 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut, } } -/* - * Converts a DRM CTM to a DC CSC float matrix. +/** + * __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix + * @ctm: DRM color transformation matrix + * @matrix: DC CSC float matrix + * * The matrix needs to be a 3x4 (12 entry) matrix. */ static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm, @@ -189,7 +210,18 @@ static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm, } } -/* Calculates the legacy transfer function - only for sRGB input space. */ +/** + * __set_legacy_tf - Calculates the legacy transfer function + * @func: transfer function + * @lut: lookup table that defines the color space + * @lut_size: size of respective lut + * @has_rom: if ROM can be used for hardcoded curve + * + * Only for sRGB input space + * + * Returns: + * 0 in case of success, -ENOMEM if fails + */ static int __set_legacy_tf(struct dc_transfer_func *func, const struct drm_color_lut *lut, uint32_t lut_size, bool has_rom) @@ -218,7 +250,16 @@ static int __set_legacy_tf(struct dc_transfer_func *func, return res ? 0 : -ENOMEM; } -/* Calculates the output transfer function based on expected input space. */ +/** + * __set_output_tf - calculates the output transfer function based on expected input space. + * @func: transfer function + * @lut: lookup table that defines the color space + * @lut_size: size of respective lut + * @has_rom: if ROM can be used for hardcoded curve + * + * Returns: + * 0 in case of success. -ENOMEM if fails. + */ static int __set_output_tf(struct dc_transfer_func *func, const struct drm_color_lut *lut, uint32_t lut_size, bool has_rom) @@ -262,7 +303,16 @@ static int __set_output_tf(struct dc_transfer_func *func, return res ? 0 : -ENOMEM; } -/* Caculates the input transfer function based on expected input space. */ +/** + * __set_input_tf - calculates the input transfer function based on expected + * input space. + * @func: transfer function + * @lut: lookup table that defines the color space + * @lut_size: size of respective lut. + * + * Returns: + * 0 in case of success. -ENOMEM if fails. + */ static int __set_input_tf(struct dc_transfer_func *func, const struct drm_color_lut *lut, uint32_t lut_size) { @@ -285,9 +335,14 @@ static int __set_input_tf(struct dc_transfer_func *func, } /** - * Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of - * the expected size. - * Returns 0 on success. + * amdgpu_dm_verify_lut_sizes - verifies if DRM luts match the hw supported sizes + * @crtc_state: the DRM CRTC state + * + * Verifies that the Degamma and Gamma LUTs attached to the &crtc_state + * are of the expected size. + * + * Returns: + * 0 on success. -EINVAL if any lut sizes are invalid. */ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state) { @@ -323,9 +378,9 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state) * of the HW blocks as long as the CRTC CTM always comes before the * CRTC RGM and after the CRTC DGM. * - * The CRTC RGM block will be placed in the RGM LUT block if it is non-linear. - * The CRTC DGM block will be placed in the DGM LUT block if it is non-linear. - * The CRTC CTM will be placed in the gamut remap block if it is non-linear. + * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear. + * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear. + * - The CRTC CTM will be placed in the gamut remap block if it is non-linear. * * The RGM block is typically more fully featured and accurate across * all ASICs - DCE can't support a custom non-linear CRTC DGM. @@ -334,7 +389,8 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state) * management at once we have to either restrict the usage of CRTC properties * or blend adjustments together. * - * Returns 0 on success. + * Returns: + * 0 on success. Error code if setup fails. */ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) { @@ -389,7 +445,7 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) if (r) return r; } else if (has_regamma) { - /* CRTC RGM goes into RGM LUT. */ + /* If atomic regamma, CRTC RGM goes into RGM LUT. */ stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; @@ -446,9 +502,10 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) * * Update the underlying dc_stream_state's input transfer function (ITF) in * preparation for hardware commit. The transfer function used depends on - * the prepartion done on the stream for color management. + * the preparation done on the stream for color management. * - * Returns 0 on success. + * Returns: + * 0 on success. -ENOMEM if mem allocation fails. */ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, struct dc_plane_state *dc_plane_state) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c new file mode 100644 index 000000000000..594fe8a4d02b --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -0,0 +1,464 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include <drm/drm_vblank.h> +#include <drm/drm_atomic_helper.h> + +#include "dc.h" +#include "amdgpu.h" +#include "amdgpu_dm_psr.h" +#include "amdgpu_dm_crtc.h" +#include "amdgpu_dm_plane.h" +#include "amdgpu_dm_trace.h" +#include "amdgpu_dm_debugfs.h" + +void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc) +{ + struct drm_crtc *crtc = &acrtc->base; + struct drm_device *dev = crtc->dev; + unsigned long flags; + + drm_crtc_handle_vblank(crtc); + + spin_lock_irqsave(&dev->event_lock, flags); + + /* Send completion event for cursor-only commits */ + if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { + drm_crtc_send_vblank_event(crtc, acrtc->event); + drm_crtc_vblank_put(crtc); + acrtc->event = NULL; + } + + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +bool modeset_required(struct drm_crtc_state *crtc_state, + struct dc_stream_state *new_stream, + struct dc_stream_state *old_stream) +{ + return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); +} + +bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc) + +{ + return acrtc->dm_irq_params.freesync_config.state == + VRR_STATE_ACTIVE_VARIABLE || + acrtc->dm_irq_params.freesync_config.state == + VRR_STATE_ACTIVE_FIXED; +} + +int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) +{ + enum dc_irq_source irq_source; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + int rc; + + irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; + + rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; + + DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", + acrtc->crtc_id, enable ? "en" : "dis", rc); + return rc; +} + +bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) +{ + return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE || + dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; +} + +static void vblank_control_worker(struct work_struct *work) +{ + struct vblank_control_work *vblank_work = + container_of(work, struct vblank_control_work, work); + struct amdgpu_display_manager *dm = vblank_work->dm; + + mutex_lock(&dm->dc_lock); + + if (vblank_work->enable) + dm->active_vblank_irq_count++; + else if (dm->active_vblank_irq_count) + dm->active_vblank_irq_count--; + + dc_allow_idle_optimizations( + dm->dc, dm->active_vblank_irq_count == 0 ? true : false); + + DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); + + /* + * Control PSR based on vblank requirements from OS + * + * If panel supports PSR SU, there's no need to disable PSR when OS is + * submitting fast atomic commits (we infer this by whether the OS + * requests vblank events). Fast atomic commits will simply trigger a + * full-frame-update (FFU); a specific case of selective-update (SU) + * where the SU region is the full hactive*vactive region. See + * fill_dc_dirty_rects(). + */ + if (vblank_work->stream && vblank_work->stream->link) { + if (vblank_work->enable) { + if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && + vblank_work->stream->link->psr_settings.psr_allow_active) + amdgpu_dm_psr_disable(vblank_work->stream); + } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled && + !vblank_work->stream->link->psr_settings.psr_allow_active && + vblank_work->acrtc->dm_irq_params.allow_psr_entry) { + amdgpu_dm_psr_enable(vblank_work->stream); + } + } + + mutex_unlock(&dm->dc_lock); + + dc_stream_release(vblank_work->stream); + + kfree(vblank_work); +} + +static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) +{ + enum dc_irq_source irq_source; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); + struct amdgpu_display_manager *dm = &adev->dm; + struct vblank_control_work *work; + int rc = 0; + + if (enable) { + /* vblank irq on -> Only need vupdate irq in vrr mode */ + if (amdgpu_dm_vrr_active(acrtc_state)) + rc = dm_set_vupdate_irq(crtc, true); + } else { + /* vblank irq off -> vupdate irq off */ + rc = dm_set_vupdate_irq(crtc, false); + } + + if (rc) + return rc; + + irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; + + if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) + return -EBUSY; + + if (amdgpu_in_reset(adev)) + return 0; + + if (dm->vblank_control_workqueue) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return -ENOMEM; + + INIT_WORK(&work->work, vblank_control_worker); + work->dm = dm; + work->acrtc = acrtc; + work->enable = enable; + + if (acrtc_state->stream) { + dc_stream_retain(acrtc_state->stream); + work->stream = acrtc_state->stream; + } + + queue_work(dm->vblank_control_workqueue, &work->work); + } + + return 0; +} + +int dm_enable_vblank(struct drm_crtc *crtc) +{ + return dm_set_vblank(crtc, true); +} + +void dm_disable_vblank(struct drm_crtc *crtc) +{ + dm_set_vblank(crtc, false); +} + +static void dm_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct dm_crtc_state *cur = to_dm_crtc_state(state); + + /* TODO Destroy dc_stream objects are stream object is flattened */ + if (cur->stream) + dc_stream_release(cur->stream); + + + __drm_atomic_helper_crtc_destroy_state(state); + + + kfree(state); +} + +static struct drm_crtc_state *dm_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct dm_crtc_state *state, *cur; + + cur = to_dm_crtc_state(crtc->state); + + if (WARN_ON(!crtc->state)) + return NULL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); + + if (cur->stream) { + state->stream = cur->stream; + dc_stream_retain(state->stream); + } + + state->active_planes = cur->active_planes; + state->vrr_infopacket = cur->vrr_infopacket; + state->abm_level = cur->abm_level; + state->vrr_supported = cur->vrr_supported; + state->freesync_config = cur->freesync_config; + state->cm_has_degamma = cur->cm_has_degamma; + state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; + state->crc_skip_count = cur->crc_skip_count; + state->mpo_requested = cur->mpo_requested; + /* TODO Duplicate dc_stream after objects are stream object is flattened */ + + return &state->base; +} + +static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) +{ + drm_crtc_cleanup(crtc); + kfree(crtc); +} + +static void dm_crtc_reset_state(struct drm_crtc *crtc) +{ + struct dm_crtc_state *state; + + if (crtc->state) + dm_crtc_destroy_state(crtc, crtc->state); + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (WARN_ON(!state)) + return; + + __drm_atomic_helper_crtc_reset(crtc, &state->base); +} + +#ifdef CONFIG_DEBUG_FS +static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) +{ + crtc_debugfs_init(crtc); + + return 0; +} +#endif + +/* Implemented only the options currently available for the driver */ +static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { + .reset = dm_crtc_reset_state, + .destroy = amdgpu_dm_crtc_destroy, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = dm_crtc_duplicate_state, + .atomic_destroy_state = dm_crtc_destroy_state, + .set_crc_source = amdgpu_dm_crtc_set_crc_source, + .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, + .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, + .get_vblank_counter = amdgpu_get_vblank_counter_kms, + .enable_vblank = dm_enable_vblank, + .disable_vblank = dm_disable_vblank, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, +#if defined(CONFIG_DEBUG_FS) + .late_register = amdgpu_dm_crtc_late_register, +#endif +}; + +static void dm_crtc_helper_disable(struct drm_crtc *crtc) +{ +} + +static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) +{ + struct drm_atomic_state *state = new_crtc_state->state; + struct drm_plane *plane; + int num_active = 0; + + drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) { + struct drm_plane_state *new_plane_state; + + /* Cursor planes are "fake". */ + if (plane->type == DRM_PLANE_TYPE_CURSOR) + continue; + + new_plane_state = drm_atomic_get_new_plane_state(state, plane); + + if (!new_plane_state) { + /* + * The plane is enable on the CRTC and hasn't changed + * state. This means that it previously passed + * validation and is therefore enabled. + */ + num_active += 1; + continue; + } + + /* We need a framebuffer to be considered enabled. */ + num_active += (new_plane_state->fb != NULL); + } + + return num_active; +} + +static void dm_update_crtc_active_planes(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state) +{ + struct dm_crtc_state *dm_new_crtc_state = + to_dm_crtc_state(new_crtc_state); + + dm_new_crtc_state->active_planes = 0; + + if (!dm_new_crtc_state->stream) + return; + + dm_new_crtc_state->active_planes = + count_crtc_active_planes(new_crtc_state); +} + +static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + struct dc *dc = adev->dm.dc; + struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); + int ret = -EINVAL; + + trace_amdgpu_dm_crtc_atomic_check(crtc_state); + + dm_update_crtc_active_planes(crtc, crtc_state); + + if (WARN_ON(unlikely(!dm_crtc_state->stream && + modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) { + return ret; + } + + /* + * We require the primary plane to be enabled whenever the CRTC is, otherwise + * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other + * planes are disabled, which is not supported by the hardware. And there is legacy + * userspace which stops using the HW cursor altogether in response to the resulting EINVAL. + */ + if (crtc_state->enable && + !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) { + DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n"); + return -EINVAL; + } + + /* In some use cases, like reset, no stream is attached */ + if (!dm_crtc_state->stream) + return 0; + + if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) + return 0; + + DRM_DEBUG_ATOMIC("Failed DC stream validation\n"); + return ret; +} + +static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { + .disable = dm_crtc_helper_disable, + .atomic_check = dm_crtc_helper_atomic_check, + .mode_fixup = dm_crtc_helper_mode_fixup, + .get_scanout_position = amdgpu_crtc_get_scanout_position, +}; + +int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, + struct drm_plane *plane, + uint32_t crtc_index) +{ + struct amdgpu_crtc *acrtc = NULL; + struct drm_plane *cursor_plane; + + int res = -ENOMEM; + + cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); + if (!cursor_plane) + goto fail; + + cursor_plane->type = DRM_PLANE_TYPE_CURSOR; + res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL); + + acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); + if (!acrtc) + goto fail; + + res = drm_crtc_init_with_planes( + dm->ddev, + &acrtc->base, + plane, + cursor_plane, + &amdgpu_dm_crtc_funcs, NULL); + + if (res) + goto fail; + + drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs); + + /* Create (reset) the plane state */ + if (acrtc->base.funcs->reset) + acrtc->base.funcs->reset(&acrtc->base); + + acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size; + acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size; + + acrtc->crtc_id = crtc_index; + acrtc->base.enabled = false; + acrtc->otg_inst = -1; + + dm->adev->mode_info.crtcs[crtc_index] = acrtc; + drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES, + true, MAX_COLOR_LUT_ENTRIES); + drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); + + return 0; + +fail: + kfree(acrtc); + kfree(cursor_plane); + return res; +} + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h new file mode 100644 index 000000000000..1ac8692354cf --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_CRTC_H__ +#define __AMDGPU_DM_CRTC_H__ + +void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc); + +bool modeset_required(struct drm_crtc_state *crtc_state, + struct dc_stream_state *new_stream, + struct dc_stream_state *old_stream); + +int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable); + +bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc); + +bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state); + +int dm_enable_vblank(struct drm_crtc *crtc); + +void dm_disable_vblank(struct drm_crtc *crtc); + +int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, + struct drm_plane *plane, + uint32_t link_index); + +#endif + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 9d43ecb1f692..ee242d9d8b06 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -23,6 +23,7 @@ * */ +#include <linux/string_helpers.h> #include <linux/uaccess.h> #include "dc.h" @@ -49,10 +50,12 @@ struct dmub_debugfs_trace_entry { uint32_t param1; }; -static inline const char *yesno(bool v) -{ - return v ? "yes" : "no"; -} +static const char *const mst_progress_status[] = { + "probe", + "remote_edid", + "allocate_new_payload", + "clear_allocated_payload", +}; /* parse_write_buffer_into_params - Helper function to parse debugfs write buffer into an array * @@ -227,8 +230,10 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -245,6 +250,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, { struct amdgpu_dm_connector *connector = file_inode(f)->i_private; struct dc_link *link = connector->dc_link; + struct amdgpu_device *adev = drm_to_adev(connector->base.dev); struct dc *dc = (struct dc *)link->dc; struct dc_link_settings prefer_link_settings; char *wr_buf = NULL; @@ -292,9 +298,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, case LINK_RATE_RBR2: case LINK_RATE_HIGH2: case LINK_RATE_HIGH3: -#if defined(CONFIG_DRM_AMD_DC_DCN) case LINK_RATE_UHBR10: -#endif break; default: valid_input = false; @@ -304,6 +308,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, if (!valid_input) { kfree(wr_buf); DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n"); + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false); + mutex_unlock(&adev->dm.dc_lock); return size; } @@ -315,7 +322,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, prefer_link_settings.lane_count = param[0]; prefer_link_settings.link_rate = param[1]; - dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true); + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, false); + mutex_unlock(&adev->dm.dc_lock); kfree(wr_buf); return size; @@ -389,8 +398,10 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf, break; r = put_user((*(rd_buf + result)), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -536,11 +547,11 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf, /* apply phy settings from user */ for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) { - link_lane_settings.lane_settings[r].VOLTAGE_SWING = + link_lane_settings.hw_lane_settings[r].VOLTAGE_SWING = (enum dc_voltage_swing) (param[0]); - link_lane_settings.lane_settings[r].PRE_EMPHASIS = + link_lane_settings.hw_lane_settings[r].PRE_EMPHASIS = (enum dc_pre_emphasis) (param[1]); - link_lane_settings.lane_settings[r].POST_CURSOR2 = + link_lane_settings.hw_lane_settings[r].POST_CURSOR2 = (enum dc_post_cursor2) (param[2]); } @@ -734,7 +745,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us } for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++) - link_training_settings.lane_settings[i] = link->cur_lane_setting[i]; + link_training_settings.hw_lane_settings[i] = link->cur_lane_setting[i]; dc_link_set_test_pattern( link, @@ -824,29 +835,61 @@ static int dmub_fw_state_show(struct seq_file *m, void *data) return seq_write(m, state_base, state_size); } -/* - * Returns the current and maximum output bpc for the connector. - * Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc +/* psr_capability_show() - show eDP panel PSR capability + * + * The read function: sink_psr_capability_show + * Shows if sink has PSR capability or not. + * If yes - the PSR version is appended + * + * cat /sys/kernel/debug/dri/0/eDP-X/psr_capability + * + * Expected output: + * "Sink support: no\n" - if panel doesn't support PSR + * "Sink support: yes [0x01]\n" - if panel supports PSR1 + * "Driver support: no\n" - if driver doesn't support PSR + * "Driver support: yes [0x01]\n" - if driver supports PSR1 */ -static int output_bpc_show(struct seq_file *m, void *data) +static int psr_capability_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; - struct drm_device *dev = connector->dev; - struct drm_crtc *crtc = NULL; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dc_link *link = aconnector->dc_link; + + if (!link) + return -ENODEV; + + if (link->type == dc_connection_none) + return -ENODEV; + + if (!(link->connector_signal & SIGNAL_TYPE_EDP)) + return -ENODEV; + + seq_printf(m, "Sink support: %s", str_yes_no(link->dpcd_caps.psr_info.psr_version != 0)); + if (link->dpcd_caps.psr_info.psr_version) + seq_printf(m, " [0x%02x]", link->dpcd_caps.psr_info.psr_version); + seq_puts(m, "\n"); + + seq_printf(m, "Driver support: %s", str_yes_no(link->psr_settings.psr_feature_enabled)); + if (link->psr_settings.psr_version) + seq_printf(m, " [0x%02x]", link->psr_settings.psr_version); + seq_puts(m, "\n"); + + return 0; +} + +/* + * Returns the current bpc for the crtc. + * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/amdgpu_current_bpc + */ +static int amdgpu_current_bpc_show(struct seq_file *m, void *data) +{ + struct drm_crtc *crtc = m->private; + struct drm_device *dev = crtc->dev; struct dm_crtc_state *dm_crtc_state = NULL; int res = -ENODEV; unsigned int bpc; mutex_lock(&dev->mode_config.mutex); - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - - if (connector->state == NULL) - goto unlock; - - crtc = connector->state->crtc; - if (crtc == NULL) - goto unlock; - drm_modeset_lock(&crtc->mutex, NULL); if (crtc->state == NULL) goto unlock; @@ -876,18 +919,15 @@ static int output_bpc_show(struct seq_file *m, void *data) } seq_printf(m, "Current: %u\n", bpc); - seq_printf(m, "Maximum: %u\n", connector->display_info.bpc); res = 0; unlock: - if (crtc) - drm_modeset_unlock(&crtc->mutex); - - drm_modeset_unlock(&dev->mode_config.connection_mutex); + drm_modeset_unlock(&crtc->mutex); mutex_unlock(&dev->mode_config.mutex); return res; } +DEFINE_SHOW_ATTRIBUTE(amdgpu_current_bpc); /* * Example usage: @@ -1165,8 +1205,8 @@ static int dp_dsc_fec_support_show(struct seq_file *m, void *data) drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); - seq_printf(m, "FEC_Sink_Support: %s\n", yesno(is_fec_supported)); - seq_printf(m, "DSC_Sink_Support: %s\n", yesno(is_dsc_supported)); + seq_printf(m, "FEC_Sink_Support: %s\n", str_yes_no(is_fec_supported)); + seq_printf(m, "DSC_Sink_Support: %s\n", str_yes_no(is_dsc_supported)); return ret; } @@ -1193,12 +1233,14 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, struct drm_connector *connector = &aconnector->base; struct dc_link *link = NULL; struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); enum dc_connection_type new_connection_type = dc_connection_none; char *wr_buf = NULL; uint32_t wr_buf_size = 42; int max_param_num = 1; long param[1] = {0}; uint8_t param_nums = 0; + bool ret = false; if (!aconnector || !aconnector->dc_link) return -EINVAL; @@ -1221,20 +1263,32 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, return -EINVAL; } + kfree(wr_buf); + if (param_nums <= 0) { DRM_DEBUG_DRIVER("user data not be read\n"); - kfree(wr_buf); + return -EINVAL; + } + + mutex_lock(&aconnector->hpd_lock); + + /* Don't support for mst end device*/ + if (aconnector->mst_port) { + mutex_unlock(&aconnector->hpd_lock); return -EINVAL; } if (param[0] == 1) { - mutex_lock(&aconnector->hpd_lock); if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type) && new_connection_type != dc_connection_none) goto unlock; - if (!dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) + mutex_lock(&adev->dm.dc_lock); + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + mutex_unlock(&adev->dm.dc_lock); + + if (!ret) goto unlock; amdgpu_dm_update_connector_after_detect(aconnector); @@ -1243,7 +1297,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); } else if (param[0] == 0) { if (!aconnector->dc_link) goto unlock; @@ -1261,17 +1315,20 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, amdgpu_dm_update_connector_after_detect(aconnector); + /* If the aconnector is the root node in mst topology */ + if (aconnector->mst_mgr.mst_state == true) + reset_cur_dp_mst_topology(link); + drm_modeset_lock_all(dev); dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); } unlock: mutex_unlock(&aconnector->hpd_lock); - kfree(wr_buf); return size; } @@ -1312,13 +1369,15 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) - break; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1334,8 +1393,10 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1414,9 +1475,9 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) - break; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; } if (!pipe_ctx || !pipe_ctx->stream) @@ -1499,13 +1560,15 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) - break; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1521,8 +1584,10 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1599,9 +1664,9 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) - break; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; } if (!pipe_ctx || !pipe_ctx->stream) @@ -1684,13 +1749,15 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) - break; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1706,8 +1773,10 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1784,9 +1853,9 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) - break; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; } if (!pipe_ctx || !pipe_ctx->stream) @@ -1865,13 +1934,15 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) - break; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1887,8 +1958,10 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1962,9 +2035,9 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) - break; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; } if (!pipe_ctx || !pipe_ctx->stream) @@ -2041,13 +2114,15 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) - break; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2063,8 +2138,10 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2098,13 +2175,15 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) - break; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2120,8 +2199,10 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2170,13 +2251,15 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) - break; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2192,8 +2275,10 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2242,13 +2327,15 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) - break; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2264,8 +2351,10 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2458,15 +2547,103 @@ static int target_backlight_show(struct seq_file *m, void *unused) return 0; } +/* + * function description: Determine if the connector is mst connector + * + * This function helps to determine whether a connector is a mst connector. + * - "root" stands for the root connector of the topology + * - "branch" stands for branch device of the topology + * - "end" stands for leaf node connector of the topology + * - "no" stands for the connector is not a device of a mst topology + * Access it with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/is_mst_connector + * + */ +static int dp_is_mst_connector_show(struct seq_file *m, void *unused) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct drm_dp_mst_topology_mgr *mgr = NULL; + struct drm_dp_mst_port *port = NULL; + char *role = NULL; + + mutex_lock(&aconnector->hpd_lock); + + if (aconnector->mst_mgr.mst_state) { + role = "root"; + } else if (aconnector->mst_port && + aconnector->mst_port->mst_mgr.mst_state) { + + role = "end"; + + mgr = &aconnector->mst_port->mst_mgr; + port = aconnector->port; + + drm_modeset_lock(&mgr->base.lock, NULL); + if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING && + port->mcs) + role = "branch"; + drm_modeset_unlock(&mgr->base.lock); + + } else { + role = "no"; + } + + seq_printf(m, "%s\n", role); + + mutex_unlock(&aconnector->hpd_lock); + + return 0; +} + +/* + * function description: Read out the mst progress status + * + * This function helps to determine the mst progress status of + * a mst connector. + * + * Access it with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/mst_progress_status + * + */ +static int dp_mst_progress_status_show(struct seq_file *m, void *unused) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_device *adev = drm_to_adev(connector->dev); + int i; + + mutex_lock(&aconnector->hpd_lock); + mutex_lock(&adev->dm.dc_lock); + + if (aconnector->mst_status == MST_STATUS_DEFAULT) { + seq_puts(m, "disabled\n"); + } else { + for (i = 0; i < sizeof(mst_progress_status)/sizeof(char *); i++) + seq_printf(m, "%s:%s\n", + mst_progress_status[i], + aconnector->mst_status & BIT(i) ? "done" : "not_done"); + } + + mutex_unlock(&adev->dm.dc_lock); + mutex_unlock(&aconnector->hpd_lock); + + return 0; +} + DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support); DEFINE_SHOW_ATTRIBUTE(dmub_fw_state); DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer); -DEFINE_SHOW_ATTRIBUTE(output_bpc); DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status); #ifdef CONFIG_DRM_AMD_DC_HDCP DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability); #endif DEFINE_SHOW_ATTRIBUTE(internal_display); +DEFINE_SHOW_ATTRIBUTE(psr_capability); +DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector); +DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status); static const struct file_operations dp_dsc_clock_en_debugfs_fops = { .owner = THIS_MODULE, @@ -2610,6 +2787,8 @@ static const struct { {"dp_dsc_fec_support", &dp_dsc_fec_support_fops}, {"max_bpc", &dp_max_bpc_debugfs_fops}, {"dsc_disable_passthrough", &dp_dsc_disable_passthrough_debugfs_fops}, + {"is_mst_connector", &dp_is_mst_connector_fops}, + {"mst_progress_status", &dp_mst_progress_status_fops} }; #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -2707,11 +2886,144 @@ static const struct { const struct file_operations *fops; } connector_debugfs_entries[] = { {"force_yuv420_output", &force_yuv420_output_fops}, - {"output_bpc", &output_bpc_fops}, {"trigger_hotplug", &trigger_hotplug_debugfs_fops}, {"internal_display", &internal_display_fops} }; +/* + * Returns supported customized link rates by this eDP panel. + * Example usage: cat /sys/kernel/debug/dri/0/eDP-x/ilr_setting + */ +static int edp_ilr_show(struct seq_file *m, void *unused) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private); + struct dc_link *link = aconnector->dc_link; + uint8_t supported_link_rates[16]; + uint32_t link_rate_in_khz; + uint32_t entry = 0; + uint8_t dpcd_rev; + + memset(supported_link_rates, 0, sizeof(supported_link_rates)); + dm_helpers_dp_read_dpcd(link->ctx, link, DP_SUPPORTED_LINK_RATES, + supported_link_rates, sizeof(supported_link_rates)); + + dpcd_rev = link->dpcd_caps.dpcd_rev.raw; + + if (dpcd_rev >= DP_DPCD_REV_13 && + (supported_link_rates[entry+1] != 0 || supported_link_rates[entry] != 0)) { + + for (entry = 0; entry < 16; entry += 2) { + link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + + supported_link_rates[entry]) * 200; + seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz); + } + } else { + seq_printf(m, "ILR is not supported by this eDP panel.\n"); + } + + return 0; +} + +/* + * Set supported customized link rate to eDP panel. + * + * echo <lane_count> <link_rate option> > ilr_setting + * + * for example, supported ILR : [0] 1620000 kHz [1] 2160000 kHz [2] 2430000 kHz ... + * echo 4 1 > /sys/kernel/debug/dri/0/eDP-x/ilr_setting + * to set 4 lanes and 2.16 GHz + */ +static ssize_t edp_ilr_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *connector = file_inode(f)->i_private; + struct dc_link *link = connector->dc_link; + struct amdgpu_device *adev = drm_to_adev(connector->base.dev); + struct dc *dc = (struct dc *)link->dc; + struct dc_link_settings prefer_link_settings; + char *wr_buf = NULL; + const uint32_t wr_buf_size = 40; + /* 0: lane_count; 1: link_rate */ + int max_param_num = 2; + uint8_t param_nums = 0; + long param[2]; + bool valid_input = true; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + if (!wr_buf) + return -ENOMEM; + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + kfree(wr_buf); + return -EINVAL; + } + + switch (param[0]) { + case LANE_COUNT_ONE: + case LANE_COUNT_TWO: + case LANE_COUNT_FOUR: + break; + default: + valid_input = false; + break; + } + + if (param[1] >= link->dpcd_caps.edp_supported_link_rates_count) + valid_input = false; + + if (!valid_input) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("Invalid Input value. No HW will be programmed\n"); + prefer_link_settings.use_link_rate_set = false; + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false); + mutex_unlock(&adev->dm.dc_lock); + return size; + } + + /* save user force lane_count, link_rate to preferred settings + * spread spectrum will not be changed + */ + prefer_link_settings.link_spread = link->cur_link_settings.link_spread; + prefer_link_settings.lane_count = param[0]; + prefer_link_settings.use_link_rate_set = true; + prefer_link_settings.link_rate_set = param[1]; + prefer_link_settings.link_rate = link->dpcd_caps.edp_supported_link_rates[param[1]]; + + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, &prefer_link_settings, + NULL, link, false); + mutex_unlock(&adev->dm.dc_lock); + + kfree(wr_buf); + return size; +} + +static int edp_ilr_open(struct inode *inode, struct file *file) +{ + return single_open(file, edp_ilr_show, inode->i_private); +} + +static const struct file_operations edp_ilr_debugfs_fops = { + .owner = THIS_MODULE, + .open = edp_ilr_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = edp_ilr_write +}; + void connector_debugfs_init(struct amdgpu_dm_connector *connector) { int i; @@ -2726,11 +3038,14 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) } } if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { + debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops); debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops); debugfs_create_file("amdgpu_current_backlight_pwm", 0444, dir, connector, ¤t_backlight_fops); debugfs_create_file("amdgpu_target_backlight_pwm", 0444, dir, connector, &target_backlight_fops); + debugfs_create_file("ilr_setting", 0644, dir, connector, + &edp_ilr_debugfs_fops); } for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) { @@ -2909,10 +3224,13 @@ static int crc_win_update_set(void *data, u64 val) struct amdgpu_device *adev = drm_to_adev(new_crtc->dev); struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk; + if (!crc_rd_wrk) + return 0; + if (val) { spin_lock_irq(&adev_to_drm(adev)->event_lock); spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); - if (crc_rd_wrk && crc_rd_wrk->crtc) { + if (crc_rd_wrk->crtc) { old_crtc = crc_rd_wrk->crtc; old_acrtc = to_amdgpu_crtc(old_crtc); } @@ -2951,9 +3269,10 @@ static int crc_win_update_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(crc_win_update_fops, crc_win_update_get, crc_win_update_set, "%llu\n"); - +#endif void crtc_debugfs_init(struct drm_crtc *crtc) { +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY struct dentry *dir = debugfs_lookup("crc", crtc->debugfs_entry); if (!dir) @@ -2969,9 +3288,12 @@ void crtc_debugfs_init(struct drm_crtc *crtc) &crc_win_y_end_fops); debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc, &crc_win_update_fops); - -} + dput(dir); #endif + debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry, + crtc, &amdgpu_current_bpc_fops); +} + /* * Writes DTN log state to the user supplied buffer. * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log @@ -3083,7 +3405,10 @@ static int trigger_hpd_mst_set(void *data, u64 val) aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type == dc_connection_mst_branch && aconnector->mst_mgr.aux) { + mutex_lock(&adev->dm.dc_lock); dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + mutex_unlock(&adev->dm.dc_lock); + drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); } } @@ -3191,6 +3516,54 @@ DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get, disable_hpd_set, "%llu\n"); /* + * Temporary w/a to force sst sequence in M42D DP2 mst receiver + * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_set_mst_en_for_sst + */ +static int dp_force_sst_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + adev->dm.dc->debug.set_mst_en_for_sst = val; + + return 0; +} + +static int dp_force_sst_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.dc->debug.set_mst_en_for_sst; + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(dp_set_mst_en_for_sst_ops, dp_force_sst_get, + dp_force_sst_set, "%llu\n"); + +/* + * Force DP2 sequence without VESA certified cable. + * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_ignore_cable_id + */ +static int dp_ignore_cable_id_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + adev->dm.dc->debug.ignore_cable_id = val; + + return 0; +} + +static int dp_ignore_cable_id_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.dc->debug.ignore_cable_id; + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(dp_ignore_cable_id_ops, dp_ignore_cable_id_get, + dp_ignore_cable_id_set, "%llu\n"); + +/* * Sets the DC visual confirm debug option from the given string. * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm */ @@ -3220,6 +3593,40 @@ DEFINE_SHOW_ATTRIBUTE(mst_topo); DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get, visual_confirm_set, "%llu\n"); + +/* + * Sets the DC skip_detection_link_training debug option from the given string. + * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_skip_detection_link_training + */ +static int skip_detection_link_training_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + if (val == 0) + adev->dm.dc->debug.skip_detection_link_training = false; + else + adev->dm.dc->debug.skip_detection_link_training = true; + + return 0; +} + +/* + * Reads the DC skip_detection_link_training debug option value into the given buffer. + * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_skip_detection_link_training + */ +static int skip_detection_link_training_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.dc->debug.skip_detection_link_training; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(skip_detection_link_training_fops, + skip_detection_link_training_get, + skip_detection_link_training_set, "%llu\n"); + /* * Dumps the DCC_EN bit for each pipe. * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dcc_en @@ -3252,8 +3659,10 @@ static ssize_t dcc_en_bits_read( dc->hwss.get_dcc_en_bits(dc, dcc_en_bits); rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); - if (!rd_buf) + if (!rd_buf) { + kfree(dcc_en_bits); return -ENOMEM; + } for (i = 0; i < num_pipes; i++) offset += snprintf(rd_buf + offset, rd_buf_size - offset, @@ -3266,8 +3675,10 @@ static ssize_t dcc_en_bits_read( if (*pos >= rd_buf_size) break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; *pos += 1; @@ -3299,10 +3710,17 @@ void dtn_debugfs_init(struct amdgpu_device *adev) adev, &mst_topo_fops); debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev, &dtn_log_fops); + debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev, + &dp_set_mst_en_for_sst_ops); + debugfs_create_file("amdgpu_dm_dp_ignore_cable_id", 0644, root, adev, + &dp_ignore_cable_id_ops); debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev, &visual_confirm_fops); + debugfs_create_file_unsafe("amdgpu_dm_skip_detection_link_training", 0644, root, adev, + &skip_detection_link_training_fops); + debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root, adev, &dmub_tracebuffer_fops); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h index 3366cb644053..071200473c27 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h @@ -31,8 +31,6 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector); void dtn_debugfs_init(struct amdgpu_device *adev); -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) void crtc_debugfs_init(struct drm_crtc *crtc); -#endif #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 5bfdc66b5867..6202e31c7e3a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -27,7 +27,7 @@ #include "amdgpu.h" #include "amdgpu_dm.h" #include "dm_helpers.h" -#include <drm/drm_hdcp.h> +#include <drm/display/drm_hdcp_helper.h> #include "hdcp_psp.h" /* @@ -302,7 +302,7 @@ static void event_property_update(struct work_struct *work) mutex_lock(&hdcp_work->mutex); - if (aconnector->base.state->commit) { + if (aconnector->base.state && aconnector->base.state->commit) { ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ); if (ret == 0) { @@ -311,18 +311,26 @@ static void event_property_update(struct work_struct *work) } } - if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { - if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 && - hdcp_work->encryption_status <= MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) - drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); - else if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 && - hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) - drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); - } else { - drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED); + if (aconnector->base.state) { + if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { + if (aconnector->base.state->hdcp_content_type == + DRM_MODE_HDCP_CONTENT_TYPE0 && + hdcp_work->encryption_status <= + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) + drm_hdcp_update_content_protection(&aconnector->base, + DRM_MODE_CONTENT_PROTECTION_ENABLED); + else if (aconnector->base.state->hdcp_content_type == + DRM_MODE_HDCP_CONTENT_TYPE1 && + hdcp_work->encryption_status == + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) + drm_hdcp_update_content_protection(&aconnector->base, + DRM_MODE_CONTENT_PROTECTION_ENABLED); + } else { + drm_hdcp_update_content_protection(&aconnector->base, + DRM_MODE_CONTENT_PROTECTION_DESIRED); + } } - mutex_unlock(&hdcp_work->mutex); drm_modeset_unlock(&dev->mode_config.connection_mutex); } @@ -476,13 +484,16 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1; display->stream_enc_idx = config->stream_enc_idx; link->link_enc_idx = config->link_enc_idx; + link->dio_output_id = config->dio_output_idx; link->phy_idx = config->phy_idx; + if (sink) link_is_hdcp14 = dc_link_is_hdcp14(aconnector->dc_link, sink->sink_signal); link->hdcp_supported_informational = link_is_hdcp14; link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; link->dp.assr_enabled = config->assr_enabled; link->dp.mst_enabled = config->mst_enabled; + link->dp.usb4_enabled = config->usb4_enabled; display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; link->adjust.auth_delay = 3; link->adjust.hdcp1.disable = 0; @@ -492,7 +503,9 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) (!!aconnector->base.state) ? aconnector->base.state->content_protection : -1, (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1); - hdcp_update_display(hdcp_work, link_index, aconnector, conn_state->hdcp_content_type, false); + if (conn_state) + hdcp_update_display(hdcp_work, link_index, aconnector, + conn_state->hdcp_content_type, false); } @@ -663,7 +676,10 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate); hdcp_work[i].hdcp.config.psp.handle = &adev->psp; - if (dc->ctx->dce_version == DCN_VERSION_3_1) + if (dc->ctx->dce_version == DCN_VERSION_3_1 || + dc->ctx->dce_version == DCN_VERSION_3_14 || + dc->ctx->dce_version == DCN_VERSION_3_15 || + dc->ctx->dce_version == DCN_VERSION_3_16) hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1; hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 8cbeeb7c986d..f0b01c8dc4a6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -27,6 +27,7 @@ #include <linux/acpi.h> #include <linux/i2c.h> +#include <drm/drm_atomic.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include <drm/drm_edid.h> @@ -39,6 +40,7 @@ #include "amdgpu_dm_mst_types.h" #include "dm_helpers.h" +#include "ddc_service_types.h" struct monitor_patch_info { unsigned int manufacturer_id; @@ -83,16 +85,17 @@ static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps) * void * */ enum dc_edid_status dm_helpers_parse_edid_caps( - struct dc_context *ctx, + struct dc_link *link, const struct dc_edid *edid, struct dc_edid_caps *edid_caps) { - struct edid *edid_buf = (struct edid *) edid->raw_edid; + struct amdgpu_dm_connector *aconnector = link->priv; + struct drm_connector *connector = &aconnector->base; + struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL; struct cea_sad *sads; int sad_count = -1; int sadb_count = -1; int i = 0; - int j = 0; uint8_t *sadb = NULL; enum dc_edid_status result = EDID_OK; @@ -111,23 +114,11 @@ enum dc_edid_status dm_helpers_parse_edid_caps( edid_caps->manufacture_week = edid_buf->mfg_week; edid_caps->manufacture_year = edid_buf->mfg_year; - /* One of the four detailed_timings stores the monitor name. It's - * stored in an array of length 13. */ - for (i = 0; i < 4; i++) { - if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) { - while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) { - if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n') - break; - - edid_caps->display_name[j] = - edid_buf->detailed_timings[i].data.other_data.data.str.str[j]; - j++; - } - } - } + drm_edid_get_monitor_name(edid_buf, + edid_caps->display_name, + AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); - edid_caps->edid_hdmi = drm_detect_hdmi_monitor( - (struct edid *) edid->raw_edid); + edid_caps->edid_hdmi = connector->display_info.is_hdmi; sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); if (sad_count <= 0) @@ -163,41 +154,28 @@ enum dc_edid_status dm_helpers_parse_edid_caps( return result; } -static void get_payload_table( - struct amdgpu_dm_connector *aconnector, - struct dp_mst_stream_allocation_table *proposed_table) +static void +fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state, + struct amdgpu_dm_connector *aconnector, + struct dc_dp_mst_stream_allocation_table *table) { - int i; - struct drm_dp_mst_topology_mgr *mst_mgr = - &aconnector->mst_port->mst_mgr; - - mutex_lock(&mst_mgr->payload_lock); - - proposed_table->stream_count = 0; - - /* number of active streams */ - for (i = 0; i < mst_mgr->max_payloads; i++) { - if (mst_mgr->payloads[i].num_slots == 0) - break; /* end of vcp_id table */ - - ASSERT(mst_mgr->payloads[i].payload_state != - DP_PAYLOAD_DELETE_LOCAL); - - if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL || - mst_mgr->payloads[i].payload_state == - DP_PAYLOAD_REMOTE) { - - struct dp_mst_stream_allocation *sa = - &proposed_table->stream_allocations[ - proposed_table->stream_count]; - - sa->slot_count = mst_mgr->payloads[i].num_slots; - sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi; - proposed_table->stream_count++; - } + struct dc_dp_mst_stream_allocation_table new_table = { 0 }; + struct dc_dp_mst_stream_allocation *sa; + struct drm_dp_mst_atomic_payload *payload; + + /* Fill payload info*/ + list_for_each_entry(payload, &mst_state->payloads, next) { + if (payload->delete) + continue; + + sa = &new_table.stream_allocations[new_table.stream_count]; + sa->slot_count = payload->time_slots; + sa->vcp_id = payload->vcpi; + new_table.stream_count++; } - mutex_unlock(&mst_mgr->payload_lock); + /* Overwrite the old table */ + *table = new_table; } void dm_helpers_dp_update_branch_info( @@ -211,15 +189,13 @@ void dm_helpers_dp_update_branch_info( bool dm_helpers_dp_mst_write_payload_allocation_table( struct dc_context *ctx, const struct dc_stream_state *stream, - struct dp_mst_stream_allocation_table *proposed_table, + struct dc_dp_mst_stream_allocation_table *proposed_table, bool enable) { struct amdgpu_dm_connector *aconnector; - struct dm_connector_state *dm_conn_state; + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_mst_atomic_payload *payload; struct drm_dp_mst_topology_mgr *mst_mgr; - struct drm_dp_mst_port *mst_port; - bool ret; - u8 link_coding_cap = DP_8b_10b_ENCODING; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; /* Accessing the connector state is required for vcpi_slots allocation @@ -230,40 +206,21 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( if (!aconnector || !aconnector->mst_port) return false; - dm_conn_state = to_dm_connector_state(aconnector->base.state); - mst_mgr = &aconnector->mst_port->mst_mgr; - - if (!mst_mgr->mst_state) - return false; - - mst_port = aconnector->port; - -#if defined(CONFIG_DRM_AMD_DC_DCN) - link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); -#endif - - if (enable) { - - ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, - dm_conn_state->pbn, - dm_conn_state->vcpi_slots); - if (!ret) - return false; - - } else { - drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); - } + mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); /* It's OK for this to fail */ - drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1); + payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port); + if (enable) + drm_dp_add_payload_part1(mst_mgr, mst_state, payload); + else + drm_dp_remove_payload(mst_mgr, mst_state, payload); /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or * AUX message. The sequence is slot 1-63 allocated sequence for each * stream. AMD ASIC stream slot allocation should follow the same * sequence. copy DRM MST allocation to dc */ - - get_payload_table(aconnector, proposed_table); + fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table); return true; } @@ -320,26 +277,35 @@ bool dm_helpers_dp_mst_send_payload_allocation( bool enable) { struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_state *mst_state; struct drm_dp_mst_topology_mgr *mst_mgr; - struct drm_dp_mst_port *mst_port; + struct drm_dp_mst_atomic_payload *payload; + enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; + enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->mst_port) return false; - mst_port = aconnector->port; - mst_mgr = &aconnector->mst_port->mst_mgr; + mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); - if (!mst_mgr->mst_state) - return false; - - /* It's OK for this to fail */ - drm_dp_update_payload_part2(mst_mgr); + payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port); + if (!enable) { + set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; + clr_flag = MST_ALLOCATE_NEW_PAYLOAD; + } - if (!enable) - drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port); + if (enable && drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload)) { + amdgpu_dm_set_mst_status(&aconnector->mst_status, + set_flag, false); + } else { + amdgpu_dm_set_mst_status(&aconnector->mst_status, + set_flag, true); + amdgpu_dm_set_mst_status(&aconnector->mst_status, + clr_flag, false); + } return true; } @@ -456,16 +422,15 @@ bool dm_helpers_dp_mst_start_top_mgr( return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0); } -void dm_helpers_dp_mst_stop_top_mgr( +bool dm_helpers_dp_mst_stop_top_mgr( struct dc_context *ctx, struct dc_link *link) { struct amdgpu_dm_connector *aconnector = link->priv; - uint8_t i; if (!aconnector) { DRM_ERROR("Failed to find connector for link!"); - return; + return false; } DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", @@ -473,23 +438,10 @@ void dm_helpers_dp_mst_stop_top_mgr( if (aconnector->mst_mgr.mst_state == true) { drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); - - for (i = 0; i < MAX_SINKS_PER_LINK; i++) { - if (link->remote_sinks[i] == NULL) - continue; - - if (link->remote_sinks[i]->sink_signal == - SIGNAL_TYPE_DISPLAY_PORT_MST) { - dc_link_remove_remote_sink(link, link->remote_sinks[i]); - - if (aconnector->dc_sink) { - dc_sink_release(aconnector->dc_sink); - aconnector->dc_sink = NULL; - aconnector->dc_link->cur_link_settings.lane_count = 0; - } - } - } + link->cur_link_settings.lane_count = 0; } + + return false; } bool dm_helpers_dp_read_dpcd( @@ -563,13 +515,190 @@ bool dm_helpers_submit_i2c( return result; } + +#if defined(CONFIG_DRM_AMD_DC_DCN) +static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, + bool is_write_cmd, + unsigned char cmd, + unsigned int length, + unsigned int offset, + unsigned char *data) +{ + bool success = false; + unsigned char rc_data[16] = {0}; + unsigned char rc_offset[4] = {0}; + unsigned char rc_length[2] = {0}; + unsigned char rc_cmd = 0; + unsigned char rc_result = 0xFF; + unsigned char i = 0; + int ret; + + if (is_write_cmd) { + // write rc data + memmove(rc_data, data, length); + ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); + } + + // write rc offset + rc_offset[0] = (unsigned char) offset & 0xFF; + rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF; + rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; + rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; + ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); + + // write rc length + rc_length[0] = (unsigned char) length & 0xFF; + rc_length[1] = (unsigned char) (length >> 8) & 0xFF; + ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); + + // write rc cmd + rc_cmd = cmd | 0x80; + ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); + + if (ret < 0) { + DRM_ERROR(" execute_synaptics_rc_command - write cmd ..., err = %d\n", ret); + return false; + } + + // poll until active is 0 + for (i = 0; i < 10; i++) { + drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); + if (rc_cmd == cmd) + // active is 0 + break; + msleep(10); + } + + // read rc result + drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result)); + success = (rc_result == 0); + + if (success && !is_write_cmd) { + // read rc data + drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); + } + + DC_LOG_DC(" execute_synaptics_rc_command - success = %d\n", success); + + return success; +} + +static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) +{ + unsigned char data[16] = {0}; + + DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n"); + + // Step 2 + data[0] = 'P'; + data[1] = 'R'; + data[2] = 'I'; + data[3] = 'U'; + data[4] = 'S'; + + if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data)) + return; + + // Step 3 and 4 + if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) + return; + + data[0] &= (~(1 << 1)); // set bit 1 to 0 + if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) + return; + + if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) + return; + + data[0] &= (~(1 << 1)); // set bit 1 to 0 + if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data)) + return; + + if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) + return; + + data[0] &= (~(1 << 1)); // set bit 1 to 0 + if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) + return; + + // Step 3 and 5 + if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) + return; + + data[0] |= (1 << 1); // set bit 1 to 1 + if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) + return; + + if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) + return; + + data[0] |= (1 << 1); // set bit 1 to 1 + return; + + if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) + return; + + data[0] |= (1 << 1); // set bit 1 to 1 + if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) + return; + + // Step 6 + if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) + return; + + DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n"); +} + +static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( + struct drm_dp_aux *aux, + const struct dc_stream_state *stream, + bool enable) +{ + uint8_t ret = 0; + + DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n"); + + if (enable) { + /* When DSC is enabled on previous boot and reboot with the hub, + * there is a chance that Synaptics hub gets stuck during reboot sequence. + * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream + */ + if (!stream->link->link_status.link_active && + memcmp(stream->link->dpcd_caps.branch_dev_name, + (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0) + apply_synaptics_fifo_reset_wa(aux); + + ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); + DRM_INFO("Send DSC enable to synaptics\n"); + + } else { + /* Synaptics hub not support virtual dpcd, + * external monitor occur garbage while disable DSC, + * Disable DSC only when entire link status turn to false, + */ + if (!stream->link->link_status.link_active) { + ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); + DRM_INFO("Send DSC disable to synaptics\n"); + } + } + + return ret; +} +#endif + bool dm_helpers_dp_write_dsc_enable( struct dc_context *ctx, const struct dc_stream_state *stream, bool enable) { - uint8_t enable_dsc = enable ? 1 : 0; + static const uint8_t DSC_DISABLE; + static const uint8_t DSC_DECODING = 0x01; + static const uint8_t DSC_PASSTHROUGH = 0x02; + struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_port *port; + uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE; + uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE; uint8_t ret = 0; if (!stream) @@ -581,15 +710,64 @@ bool dm_helpers_dp_write_dsc_enable( if (!aconnector->dsc_aux) return false; - ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); +#if defined(CONFIG_DRM_AMD_DC_DCN) + // apply w/a to synaptics + if (needs_dsc_aux_workaround(aconnector->dc_link) && + (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3) + return write_dsc_enable_synaptics_non_virtual_dpcd_mst( + aconnector->dsc_aux, stream, enable_dsc); +#endif + + port = aconnector->port; + + if (enable) { + if (port->passthrough_aux) { + ret = drm_dp_dpcd_write(port->passthrough_aux, + DP_DSC_ENABLE, + &enable_passthrough, 1); + DC_LOG_DC("Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", + ret); + } + + ret = drm_dp_dpcd_write(aconnector->dsc_aux, + DP_DSC_ENABLE, &enable_dsc, 1); + DC_LOG_DC("Sent DSC decoding enable to %s port, ret = %u\n", + (port->passthrough_aux) ? "remote RX" : + "virtual dpcd", + ret); + } else { + ret = drm_dp_dpcd_write(aconnector->dsc_aux, + DP_DSC_ENABLE, &enable_dsc, 1); + DC_LOG_DC("Sent DSC decoding disable to %s port, ret = %u\n", + (port->passthrough_aux) ? "remote RX" : + "virtual dpcd", + ret); + + if (port->passthrough_aux) { + ret = drm_dp_dpcd_write(port->passthrough_aux, + DP_DSC_ENABLE, + &enable_passthrough, 1); + DC_LOG_DC("Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", + ret); + } + } } - if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { - ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); - DC_LOG_DC("Send DSC %s to sst display\n", enable_dsc ? "enable" : "disable"); + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { +#endif + ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); + DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable"); +#if defined(CONFIG_DRM_AMD_DC_DCN) + } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { + ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); + DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable"); + } +#endif } - return (ret > 0); + return ret; } bool dm_helpers_is_dp_sink_present(struct dc_link *link) @@ -650,22 +828,8 @@ enum dc_edid_status dm_helpers_read_local_edid( /* We don't need the original edid anymore */ kfree(edid); - /* connector->display_info will be parsed from EDID and saved - * into drm_connector->display_info from edid by call stack - * below: - * drm_parse_ycbcr420_deep_color_info - * drm_parse_hdmi_forum_vsdb - * drm_parse_cea_ext - * drm_add_display_info - * drm_connector_update_edid_property - * - * drm_connector->display_info will be used by amdgpu_dm funcs, - * like fill_stream_properties_from_drm_display_mode - */ - amdgpu_dm_update_connector_after_detect(aconnector); - edid_status = dm_helpers_parse_edid_caps( - ctx, + link, &sink->dc_edid, &sink->edid_caps); @@ -714,6 +878,34 @@ void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigne //amdgpu_device_gpu_recover(dc_context->driver-context, NULL); } +void dm_helpers_init_panel_settings( + struct dc_context *ctx, + struct dc_panel_config *panel_config, + struct dc_sink *sink) +{ + // Extra Panel Power Sequence + panel_config->pps.extra_t3_ms = sink->edid_caps.panel_patch.extra_t3_ms; + panel_config->pps.extra_t7_ms = sink->edid_caps.panel_patch.extra_t7_ms; + panel_config->pps.extra_delay_backlight_off = sink->edid_caps.panel_patch.extra_delay_backlight_off; + panel_config->pps.extra_post_t7_ms = 0; + panel_config->pps.extra_pre_t11_ms = 0; + panel_config->pps.extra_t12_ms = sink->edid_caps.panel_patch.extra_t12_ms; + panel_config->pps.extra_post_OUI_ms = 0; + // Feature DSC + panel_config->dsc.disable_dsc_edp = false; + panel_config->dsc.force_dsc_edp_policy = 0; +} + +void dm_helpers_override_panel_settings( + struct dc_context *ctx, + struct dc_panel_config *panel_config) +{ + // Feature DSC + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { + panel_config->dsc.disable_dsc_edp = true; + } +} + void *dm_helpers_allocate_gpu_mem( struct dc_context *ctx, enum dc_gpu_mem_alloc_type type, @@ -805,16 +997,12 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) sizeof(new_downspread)); } -#if defined(CONFIG_DRM_AMD_DC_DCN) void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) { - // FPGA programming for this clock in diags framework that - // needs to go through dm layer, therefore leave dummy interace here + // TODO } - void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) { - /* TODO: add peridic detection implementation */ + /* TODO: add periodic detection implementation */ } -#endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 4aba0e8c84f8..19f543ba7205 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -928,7 +928,11 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) to_amdgpu_dm_connector(connector); const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; - dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false); + if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd, + false); + } if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { dc_interrupt_set(adev->dm.dc, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index cc34a35d0bcb..6ff96b4bdda5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -23,10 +23,10 @@ * */ +#include <drm/display/drm_dp_helper.h> +#include <drm/display/drm_dp_mst_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> -#include <drm/drm_dp_mst_helper.h> -#include <drm/drm_dp_helper.h> #include "dm_services.h" #include "amdgpu.h" #include "amdgpu_dm.h" @@ -36,6 +36,7 @@ #include "dm_helpers.h" #include "dc_link_ddc.h" +#include "dc_link_dp.h" #include "ddc_service_types.h" #include "dpcd_defs.h" @@ -45,9 +46,10 @@ #include "amdgpu_dm_debugfs.h" #endif -#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dc/dcn20/dcn20_resource.h" -#endif +bool is_timing_changed(struct dc_stream_state *cur_stream, + struct dc_stream_state *new_stream); + static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) @@ -55,6 +57,8 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, ssize_t result = 0; struct aux_payload payload; enum aux_return_code_type operation_result; + struct amdgpu_device *adev; + struct ddc_service *ddc; if (WARN_ON(msg->size > 16)) return -E2BIG; @@ -73,6 +77,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, &operation_result); + /* + * w/a on certain intel platform where hpd is unexpected to pull low during + * 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON + * aux transaction is succuess in such case, therefore bypass the error + */ + ddc = TO_DM_AUX(aux)->ddc_service; + adev = ddc->ctx->driver_context; + if (adev->dm.aux_hpd_discon_quirk) { + if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && + operation_result == AUX_RET_ERROR_HPD_DISCON) { + result = 0; + operation_result = AUX_RET_SUCCESS; + } + } + if (payload.write && result >= 0) result = msg->size; @@ -139,11 +158,31 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) static void amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) { - struct amdgpu_dm_connector *amdgpu_dm_connector = + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct drm_dp_mst_port *port = amdgpu_dm_connector->port; + struct drm_dp_mst_port *port = aconnector->port; + struct amdgpu_dm_connector *root = aconnector->mst_port; + struct dc_link *dc_link = aconnector->dc_link; + struct dc_sink *dc_sink = aconnector->dc_sink; drm_dp_mst_connector_early_unregister(connector, port); + + /* + * Release dc_sink for connector which its attached port is + * no longer in the mst topology + */ + drm_modeset_lock(&root->mst_mgr.base.lock, NULL); + if (dc_sink) { + if (dc_link->sink_count) + dc_link_remove_remote_sink(dc_link, dc_sink); + + dc_sink_release(dc_sink); + aconnector->dc_sink = NULL; + aconnector->edid = NULL; + } + + aconnector->mst_status = MST_STATUS_DEFAULT; + drm_modeset_unlock(&root->mst_mgr.base.lock); } static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { @@ -159,7 +198,7 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { }; #if defined(CONFIG_DRM_AMD_DC_DCN) -static bool needs_dsc_aux_workaround(struct dc_link *link) +bool needs_dsc_aux_workaround(struct dc_link *link) { if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && @@ -209,6 +248,25 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto return true; } + +static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector) +{ + union dp_downstream_port_present ds_port_present; + + if (!aconnector->dsc_aux) + return false; + + if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DOWNSTREAMPORT_PRESENT, &ds_port_present, 1) < 0) { + DRM_INFO("Failed to read downstream_port_present 0x05 from DFP of branch device\n"); + return false; + } + + aconnector->mst_downstream_port_present = ds_port_present; + DRM_INFO("Downstream port present %d, type %d\n", + ds_port_present.fields.PORT_PRESENT, ds_port_present.fields.PORT_TYPE); + + return true; +} #endif static int dm_dp_mst_get_modes(struct drm_connector *connector) @@ -224,6 +282,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); if (!edid) { + amdgpu_dm_set_mst_status(&aconnector->mst_status, + MST_REMOTE_EDID, false); + drm_connector_update_edid_property( &aconnector->base, NULL); @@ -254,6 +315,8 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) } aconnector->edid = edid; + amdgpu_dm_set_mst_status(&aconnector->mst_status, + MST_REMOTE_EDID, true); } if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) { @@ -289,6 +352,10 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (!validate_dsc_caps_on_connector(aconnector)) memset(&aconnector->dc_sink->dsc_caps, 0, sizeof(aconnector->dc_sink->dsc_caps)); + + if (!retrieve_downstream_port_device(aconnector)) + memset(&aconnector->mst_downstream_port_present, + 0, sizeof(aconnector->mst_downstream_port_present)); #endif } } @@ -320,43 +387,74 @@ dm_dp_mst_detect(struct drm_connector *connector, { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct amdgpu_dm_connector *master = aconnector->mst_port; + struct drm_dp_mst_port *port = aconnector->port; + int connection_status; if (drm_connector_is_unregistered(connector)) return connector_status_disconnected; - return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, - aconnector->port); -} + connection_status = drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, + aconnector->port); -static int dm_dp_mst_atomic_check(struct drm_connector *connector, - struct drm_atomic_state *state) -{ - struct drm_connector_state *new_conn_state = - drm_atomic_get_new_connector_state(state, connector); - struct drm_connector_state *old_conn_state = - drm_atomic_get_old_connector_state(state, connector); - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct drm_crtc_state *new_crtc_state; - struct drm_dp_mst_topology_mgr *mst_mgr; - struct drm_dp_mst_port *mst_port; + if (port->pdt != DP_PEER_DEVICE_NONE && !port->dpcd_rev) { + uint8_t dpcd_rev; + int ret; - mst_port = aconnector->port; - mst_mgr = &aconnector->mst_port->mst_mgr; + ret = drm_dp_dpcd_readb(&port->aux, DP_DP13_DPCD_REV, &dpcd_rev); - if (!old_conn_state->crtc) - return 0; + if (ret == 1) { + port->dpcd_rev = dpcd_rev; - if (new_conn_state->crtc) { - new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); - if (!new_crtc_state || - !drm_atomic_crtc_needs_modeset(new_crtc_state) || - new_crtc_state->enable) - return 0; + /* Could be DP1.2 DP Rx case*/ + if (!dpcd_rev) { + ret = drm_dp_dpcd_readb(&port->aux, DP_DPCD_REV, &dpcd_rev); + + if (ret == 1) + port->dpcd_rev = dpcd_rev; + } + + if (!dpcd_rev) + DRM_DEBUG_KMS("Can't decide DPCD revision number!"); } - return drm_dp_atomic_release_vcpi_slots(state, - mst_mgr, - mst_port); + /* + * Could be legacy sink, logical port etc on DP1.2. + * Will get Nack under these cases when issue remote + * DPCD read. + */ + if (ret != 1) + DRM_DEBUG_KMS("Can't access DPCD"); + } else if (port->pdt == DP_PEER_DEVICE_NONE) { + port->dpcd_rev = 0; + } + + /* + * Release dc_sink for connector which unplug event is notified by CSN msg + */ + if (connection_status == connector_status_disconnected && aconnector->dc_sink) { + if (aconnector->dc_link->sink_count) + dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); + + dc_sink_release(aconnector->dc_sink); + aconnector->dc_sink = NULL; + aconnector->edid = NULL; + + amdgpu_dm_set_mst_status(&aconnector->mst_status, + MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD, + false); + } + + return connection_status; +} + +static int dm_dp_mst_atomic_check(struct drm_connector *connector, + struct drm_atomic_state *state) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_port->mst_mgr; + struct drm_dp_mst_port *mst_port = aconnector->port; + + return drm_dp_atomic_release_time_slots(state, mst_mgr, mst_port); } static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { @@ -419,6 +517,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, connector = &aconnector->base; aconnector->port = port; aconnector->mst_port = master; + amdgpu_dm_set_mst_status(&aconnector->mst_status, + MST_PROBE, true); if (drm_connector_init( dev, @@ -498,15 +598,8 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap); aconnector->mst_mgr.cbs = &dm_mst_cbs; - drm_dp_mst_topology_mgr_init( - &aconnector->mst_mgr, - adev_to_drm(dm->adev), - &aconnector->dm_dp_aux.aux, - 16, - 4, - max_link_enc_cap.lane_count, - drm_dp_bw_code_to_link_rate(max_link_enc_cap.link_rate), - aconnector->connector_id); + drm_dp_mst_topology_mgr_init(&aconnector->mst_mgr, adev_to_drm(dm->adev), + &aconnector->dm_dp_aux.aux, 16, 4, aconnector->connector_id); drm_connector_attach_dp_subconnector_property(&aconnector->base); } @@ -576,6 +669,21 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p } else { params[i].timing->flags.DSC = 0; } + params[i].timing->dsc_cfg.mst_pbn = vars[i + k].pbn; + } + + for (i = 0; i < count; i++) { + if (params[i].sink) { + if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL && + params[i].sink->sink_signal != SIGNAL_TYPE_NONE) + DRM_DEBUG_DRIVER("%s i=%d dispname=%s\n", __func__, i, + params[i].sink->edid_caps.display_name); + } + + DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n", + params[i].timing->flags.DSC, + params[i].timing->dsc_cfg.bits_per_pixel, + vars[i + k].pbn); } } @@ -595,7 +703,8 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) return dsc_config.bits_per_pixel; } -static void increase_dsc_bpp(struct drm_atomic_state *state, +static bool increase_dsc_bpp(struct drm_atomic_state *state, + struct drm_dp_mst_topology_state *mst_state, struct dc_link *dc_link, struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, @@ -608,12 +717,9 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, int min_initial_slack; int next_index; int remaining_to_increase = 0; - int pbn_per_timeslot; int link_timeslots_used; int fair_pbn_alloc; - pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link); - for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled) { initial_slack[i] = @@ -644,56 +750,54 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, link_timeslots_used = 0; for (i = 0; i < count; i++) - link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot); + link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div); - fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot; + fair_pbn_alloc = + (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div; if (initial_slack[next_index] > fair_pbn_alloc) { vars[next_index].pbn += fair_pbn_alloc; - if (drm_dp_atomic_find_vcpi_slots(state, + if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, - vars[next_index].pbn, - pbn_per_timeslot) < 0) - return; + vars[next_index].pbn) < 0) + return false; if (!drm_dp_mst_atomic_check(state)) { vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); } else { vars[next_index].pbn -= fair_pbn_alloc; - if (drm_dp_atomic_find_vcpi_slots(state, + if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, - vars[next_index].pbn, - pbn_per_timeslot) < 0) - return; + vars[next_index].pbn) < 0) + return false; } } else { vars[next_index].pbn += initial_slack[next_index]; - if (drm_dp_atomic_find_vcpi_slots(state, + if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, - vars[next_index].pbn, - pbn_per_timeslot) < 0) - return; + vars[next_index].pbn) < 0) + return false; if (!drm_dp_mst_atomic_check(state)) { vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; } else { vars[next_index].pbn -= initial_slack[next_index]; - if (drm_dp_atomic_find_vcpi_slots(state, + if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, - vars[next_index].pbn, - pbn_per_timeslot) < 0) - return; + vars[next_index].pbn) < 0) + return false; } } bpp_increased[next_index] = true; remaining_to_increase--; } + return true; } -static void try_disable_dsc(struct drm_atomic_state *state, +static bool try_disable_dsc(struct drm_atomic_state *state, struct dc_link *dc_link, struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, @@ -736,46 +840,55 @@ static void try_disable_dsc(struct drm_atomic_state *state, break; vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); - if (drm_dp_atomic_find_vcpi_slots(state, + if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, - vars[next_index].pbn, - dm_mst_get_pbn_divider(dc_link)) < 0) - return; + vars[next_index].pbn) < 0) + return false; if (!drm_dp_mst_atomic_check(state)) { vars[next_index].dsc_enabled = false; vars[next_index].bpp_x16 = 0; } else { vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps); - if (drm_dp_atomic_find_vcpi_slots(state, + if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, - vars[next_index].pbn, - dm_mst_get_pbn_divider(dc_link)) < 0) - return; + vars[next_index].pbn) < 0) + return false; } tried[next_index] = true; remaining_to_try--; } + return true; } static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, struct dc_state *dc_state, struct dc_link *dc_link, struct dsc_mst_fairness_vars *vars, + struct drm_dp_mst_topology_mgr *mgr, int *link_vars_start_index) { - int i, k; struct dc_stream_state *stream; struct dsc_mst_fairness_params params[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr); int count = 0; + int i, k; bool debugfs_overwrite = false; memset(params, 0, sizeof(params)); + if (IS_ERR(mst_state)) + return false; + + mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link); +#if defined(CONFIG_DRM_AMD_DC_DCN) + drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link)); +#endif + /* Set up params */ for (i = 0; i < dc_state->stream_count; i++) { struct dc_dsc_policy dsc_policy = {0}; @@ -834,11 +947,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; - if (drm_dp_atomic_find_vcpi_slots(state, - params[i].port->mgr, - params[i].port, - vars[i + k].pbn, - dm_mst_get_pbn_divider(dc_link)) < 0) + if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, + vars[i + k].pbn) < 0) return false; } if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) { @@ -852,21 +962,15 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); vars[i + k].dsc_enabled = true; vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; - if (drm_dp_atomic_find_vcpi_slots(state, - params[i].port->mgr, - params[i].port, - vars[i + k].pbn, - dm_mst_get_pbn_divider(dc_link)) < 0) + if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, + params[i].port, vars[i + k].pbn) < 0) return false; } else { vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; - if (drm_dp_atomic_find_vcpi_slots(state, - params[i].port->mgr, - params[i].port, - vars[i + k].pbn, - dm_mst_get_pbn_divider(dc_link)) < 0) + if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, + params[i].port, vars[i + k].pbn) < 0) return false; } } @@ -874,9 +978,11 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, return false; /* Optimize degree of compression */ - increase_dsc_bpp(state, dc_link, params, vars, count, k); + if (!increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k)) + return false; - try_disable_dsc(state, dc_link, params, vars, count, k); + if (!try_disable_dsc(state, dc_link, params, vars, count, k)) + return false; set_dsc_configs_from_fairness_vars(params, vars, count, k); @@ -888,22 +994,31 @@ static bool is_dsc_need_re_compute( struct dc_state *dc_state, struct dc_link *dc_link) { - int i; + int i, j; bool is_dsc_need_re_compute = false; + struct amdgpu_dm_connector *stream_on_link[MAX_PIPES]; + int new_stream_on_link_num = 0; + struct amdgpu_dm_connector *aconnector; + struct dc_stream_state *stream; + const struct dc *dc = dc_link->dc; - /* only check phy used by mst branch */ + /* only check phy used by dsc mst branch */ if (dc_link->type != dc_connection_mst_branch) return false; + if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || + dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) + return false; + + for (i = 0; i < MAX_PIPES; i++) + stream_on_link[i] = NULL; + /* check if there is mode change in new request */ for (i = 0; i < dc_state->stream_count; i++) { - struct amdgpu_dm_connector *aconnector; - struct dc_stream_state *stream; struct drm_crtc_state *new_crtc_state; struct drm_connector_state *new_conn_state; stream = dc_state->streams[i]; - if (!stream) continue; @@ -915,8 +1030,10 @@ static bool is_dsc_need_re_compute( if (!aconnector) continue; - new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); + stream_on_link[new_stream_on_link_num] = aconnector; + new_stream_on_link_num++; + new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); if (!new_conn_state) continue; @@ -927,7 +1044,6 @@ static bool is_dsc_need_re_compute( continue; new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); - if (!new_crtc_state) continue; @@ -937,7 +1053,34 @@ static bool is_dsc_need_re_compute( if (new_crtc_state->enable && new_crtc_state->active) { if (new_crtc_state->mode_changed || new_crtc_state->active_changed || new_crtc_state->connectors_changed) - is_dsc_need_re_compute = true; + return true; + } + } + + /* check current_state if there stream on link but it is not in + * new request state + */ + for (i = 0; i < dc->current_state->stream_count; i++) { + stream = dc->current_state->streams[i]; + /* only check stream on the mst hub */ + if (stream->link != dc_link) + continue; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + if (!aconnector) + continue; + + for (j = 0; j < new_stream_on_link_num; j++) { + if (stream_on_link[j]) { + if (aconnector == stream_on_link[j]) + break; + } + } + + if (j == new_stream_on_link_num) { + /* not in new state */ + is_dsc_need_re_compute = true; + break; } } @@ -981,8 +1124,9 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, continue; mutex_lock(&aconnector->mst_mgr.lock); - if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, - vars, &link_vars_start_index)) { + if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, + &aconnector->mst_mgr, + &link_vars_start_index)) { mutex_unlock(&aconnector->mst_mgr.lock); return false; } @@ -1005,4 +1149,313 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, return true; } +static bool + pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars) +{ + int i, j; + struct dc_stream_state *stream; + bool computed_streams[MAX_PIPES]; + struct amdgpu_dm_connector *aconnector; + int link_vars_start_index = 0; + + for (i = 0; i < dc_state->stream_count; i++) + computed_streams[i] = false; + + for (i = 0; i < dc_state->stream_count; i++) { + stream = dc_state->streams[i]; + + if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) + continue; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (!aconnector || !aconnector->dc_sink) + continue; + + if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) + continue; + + if (computed_streams[i]) + continue; + + if (!is_dsc_need_re_compute(state, dc_state, stream->link)) + continue; + + mutex_lock(&aconnector->mst_mgr.lock); + if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, + &aconnector->mst_mgr, + &link_vars_start_index)) { + mutex_unlock(&aconnector->mst_mgr.lock); + return false; + } + mutex_unlock(&aconnector->mst_mgr.lock); + + for (j = 0; j < dc_state->stream_count; j++) { + if (dc_state->streams[j]->link == stream->link) + computed_streams[j] = true; + } + } + + return true; +} + +static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state, + struct dc_stream_state *stream) +{ + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *new_state, *old_state; + + for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, i) { + struct dm_crtc_state *dm_state = to_dm_crtc_state(new_state); + + if (dm_state->stream == stream) + return i; + } + return -1; +} + +static bool is_link_to_dschub(struct dc_link *dc_link) +{ + union dpcd_dsc_basic_capabilities *dsc_caps = + &dc_link->dpcd_caps.dsc_caps.dsc_basic_caps; + + /* only check phy used by dsc mst branch */ + if (dc_link->type != dc_connection_mst_branch) + return false; + + if (!(dsc_caps->fields.dsc_support.DSC_SUPPORT || + dsc_caps->fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) + return false; + return true; +} + +static bool is_dsc_precompute_needed(struct drm_atomic_state *state) +{ + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + bool ret = false; + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(new_crtc_state); + + if (!amdgpu_dm_find_first_crtc_matching_connector(state, crtc)) { + ret = false; + break; + } + if (dm_crtc_state->stream && dm_crtc_state->stream->link) + if (is_link_to_dschub(dm_crtc_state->stream->link)) + ret = true; + } + return ret; +} + +bool pre_validate_dsc(struct drm_atomic_state *state, + struct dm_atomic_state **dm_state_ptr, + struct dsc_mst_fairness_vars *vars) +{ + int i; + struct dm_atomic_state *dm_state; + struct dc_state *local_dc_state = NULL; + int ret = 0; + + if (!is_dsc_precompute_needed(state)) { + DRM_INFO_ONCE("DSC precompute is not needed.\n"); + return true; + } + if (dm_atomic_get_state(state, dm_state_ptr)) { + DRM_INFO_ONCE("dm_atomic_get_state() failed\n"); + return false; + } + dm_state = *dm_state_ptr; + + /* + * create local vailable for dc_state. copy content of streams of dm_state->context + * to local variable. make sure stream pointer of local variable not the same as stream + * from dm_state->context. + */ + + local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL); + if (!local_dc_state) + return false; + + for (i = 0; i < local_dc_state->stream_count; i++) { + struct dc_stream_state *stream = dm_state->context->streams[i]; + int ind = find_crtc_index_in_state_by_stream(state, stream); + + if (ind >= 0) { + struct amdgpu_dm_connector *aconnector; + struct drm_connector_state *drm_new_conn_state; + struct dm_connector_state *dm_new_conn_state; + struct dm_crtc_state *dm_old_crtc_state; + + aconnector = + amdgpu_dm_find_first_crtc_matching_connector(state, + state->crtcs[ind].ptr); + drm_new_conn_state = + drm_atomic_get_new_connector_state(state, + &aconnector->base); + dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); + dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state); + + local_dc_state->streams[i] = + create_validate_stream_for_sink(aconnector, + &state->crtcs[ind].new_state->mode, + dm_new_conn_state, + dm_old_crtc_state->stream); + if (local_dc_state->streams[i] == NULL) { + ret = -EINVAL; + break; + } + } + } + + if (ret != 0) + goto clean_exit; + + if (!pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars)) { + DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n"); + ret = -EINVAL; + goto clean_exit; + } + + /* + * compare local_streams -> timing with dm_state->context, + * if the same set crtc_state->mode-change = 0; + */ + for (i = 0; i < local_dc_state->stream_count; i++) { + struct dc_stream_state *stream = dm_state->context->streams[i]; + + if (local_dc_state->streams[i] && + is_timing_changed(stream, local_dc_state->streams[i])) { + DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i); + } else { + int ind = find_crtc_index_in_state_by_stream(state, stream); + + if (ind >= 0) + state->crtcs[ind].new_state->mode_changed = 0; + } + } +clean_exit: + for (i = 0; i < local_dc_state->stream_count; i++) { + struct dc_stream_state *stream = dm_state->context->streams[i]; + + if (local_dc_state->streams[i] != stream) + dc_stream_release(local_dc_state->streams[i]); + } + + kfree(local_dc_state); + + return (ret == 0); +} + +static unsigned int kbps_from_pbn(unsigned int pbn) +{ + unsigned int kbps = pbn; + + kbps *= (1000000 / PEAK_FACTOR_X1000); + kbps *= 8; + kbps *= 54; + kbps /= 64; + + return kbps; +} + +static bool is_dsc_common_config_possible(struct dc_stream_state *stream, + struct dc_dsc_bw_range *bw_range) +{ + struct dc_dsc_policy dsc_policy = {0}; + + dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy); + dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0], + stream->sink->ctx->dc->debug.dsc_min_slice_height_override, + dsc_policy.min_target_bpp * 16, + dsc_policy.max_target_bpp * 16, + &stream->sink->dsc_caps.dsc_dec_caps, + &stream->timing, bw_range); + + return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16; +} +#endif /* CONFIG_DRM_AMD_DC_DCN */ + +enum dc_status dm_dp_mst_is_port_support_mode( + struct amdgpu_dm_connector *aconnector, + struct dc_stream_state *stream) +{ + int bpp, pbn, branch_max_throughput_mps = 0; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct dc_link_settings cur_link_settings; + unsigned int end_to_end_bw_in_kbps = 0; + unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0; + unsigned int max_compressed_bw_in_kbps = 0; + struct dc_dsc_bw_range bw_range = {0}; + + /* + * check if the mode could be supported if DSC pass-through is supported + * AND check if there enough bandwidth available to support the mode + * with DSC enabled. + */ + if (is_dsc_common_config_possible(stream, &bw_range) && + aconnector->port->passthrough_aux) { + mutex_lock(&aconnector->mst_mgr.lock); + + cur_link_settings = stream->link->verified_link_cap; + + upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, + &cur_link_settings + ); + down_link_bw_in_kbps = kbps_from_pbn(aconnector->port->full_pbn); + + /* pick the bottleneck */ + end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, + down_link_bw_in_kbps); + + mutex_unlock(&aconnector->mst_mgr.lock); + + /* + * use the maximum dsc compression bandwidth as the required + * bandwidth for the mode + */ + max_compressed_bw_in_kbps = bw_range.min_kbps; + + if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) { + DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n"); + return DC_FAIL_BANDWIDTH_VALIDATE; + } + } else { #endif + /* check if mode could be supported within full_pbn */ + bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3; + pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false); + + if (pbn > aconnector->port->full_pbn) + return DC_FAIL_BANDWIDTH_VALIDATE; +#if defined(CONFIG_DRM_AMD_DC_DCN) + } +#endif + + /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */ + switch (stream->timing.pixel_encoding) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + branch_max_throughput_mps = + aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps; + break; + case PIXEL_ENCODING_YCBCR422: + case PIXEL_ENCODING_YCBCR420: + branch_max_throughput_mps = + aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps; + break; + default: + break; + } + + if (branch_max_throughput_mps != 0 && + ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) + return DC_FAIL_BANDWIDTH_VALIDATE; + + return DC_OK; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 900d3f7a8498..b92a7c5671aa 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -26,6 +26,14 @@ #ifndef __DAL_AMDGPU_DM_MST_TYPES_H__ #define __DAL_AMDGPU_DM_MST_TYPES_H__ +#define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24 + +#define SYNAPTICS_RC_COMMAND 0x4B2 +#define SYNAPTICS_RC_RESULT 0x4B3 +#define SYNAPTICS_RC_LENGTH 0x4B8 +#define SYNAPTICS_RC_OFFSET 0x4BC +#define SYNAPTICS_RC_DATA 0x4C0 + struct amdgpu_display_manager; struct amdgpu_dm_connector; @@ -38,8 +46,6 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, void dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); -#if defined(CONFIG_DRM_AMD_DC_DCN) - struct dsc_mst_fairness_vars { int pbn; bool dsc_enabled; @@ -50,6 +56,15 @@ struct dsc_mst_fairness_vars { bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, struct dc_state *dc_state, struct dsc_mst_fairness_vars *vars); -#endif + +bool needs_dsc_aux_workaround(struct dc_link *link); + +bool pre_validate_dsc(struct drm_atomic_state *state, + struct dm_atomic_state **dm_state_ptr, + struct dsc_mst_fairness_vars *vars); + +enum dc_status dm_dp_mst_is_port_support_mode( + struct amdgpu_dm_connector *aconnector, + struct dc_stream_state *stream); #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c new file mode 100644 index 000000000000..e6854f7270a6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -0,0 +1,1614 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_blend.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_fourcc.h> + +#include "amdgpu.h" +#include "dal_asic_id.h" +#include "amdgpu_display.h" +#include "amdgpu_dm_trace.h" +#include "amdgpu_dm_plane.h" +#include "gc/gc_11_0_0_offset.h" +#include "gc/gc_11_0_0_sh_mask.h" + +/* + * TODO: these are currently initialized to rgb formats only. + * For future use cases we should either initialize them dynamically based on + * plane capabilities, or initialize this array to all formats, so internal drm + * check will succeed, and let DC implement proper check + */ +static const uint32_t rgb_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_XRGB16161616, + DRM_FORMAT_XBGR16161616, + DRM_FORMAT_ARGB16161616, + DRM_FORMAT_ABGR16161616, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGB565, +}; + +static const uint32_t overlay_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGB565 +}; + +static const u32 cursor_formats[] = { + DRM_FORMAT_ARGB8888 +}; + +enum dm_micro_swizzle { + MICRO_SWIZZLE_Z = 0, + MICRO_SWIZZLE_S = 1, + MICRO_SWIZZLE_D = 2, + MICRO_SWIZZLE_R = 3 +}; + +const struct drm_format_info *amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd) +{ + return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); +} + +void fill_blending_from_plane_state(const struct drm_plane_state *plane_state, + bool *per_pixel_alpha, bool *pre_multiplied_alpha, + bool *global_alpha, int *global_alpha_value) +{ + *per_pixel_alpha = false; + *pre_multiplied_alpha = true; + *global_alpha = false; + *global_alpha_value = 0xff; + + if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY) + return; + + if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI || + plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) { + static const uint32_t alpha_formats[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_ABGR8888, + }; + uint32_t format = plane_state->fb->format->format; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { + if (format == alpha_formats[i]) { + *per_pixel_alpha = true; + break; + } + } + + if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) + *pre_multiplied_alpha = false; + } + + if (plane_state->alpha < 0xffff) { + *global_alpha = true; + *global_alpha_value = plane_state->alpha >> 8; + } +} + +static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) +{ + if (!*mods) + return; + + if (*cap - *size < 1) { + uint64_t new_cap = *cap * 2; + uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); + + if (!new_mods) { + kfree(*mods); + *mods = NULL; + return; + } + + memcpy(new_mods, *mods, sizeof(uint64_t) * *size); + kfree(*mods); + *mods = new_mods; + *cap = new_cap; + } + + (*mods)[*size] = mod; + *size += 1; +} + +static bool modifier_has_dcc(uint64_t modifier) +{ + return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); +} + +static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier) +{ + if (modifier == DRM_FORMAT_MOD_LINEAR) + return 0; + + return AMD_FMT_MOD_GET(TILE, modifier); +} + +static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, + uint64_t tiling_flags) +{ + /* Fill GFX8 params */ + if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { + unsigned int bankw, bankh, mtaspect, tile_split, num_banks; + + bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); + bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); + mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); + tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); + num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); + + /* XXX fix me for VI */ + tiling_info->gfx8.num_banks = num_banks; + tiling_info->gfx8.array_mode = + DC_ARRAY_2D_TILED_THIN1; + tiling_info->gfx8.tile_split = tile_split; + tiling_info->gfx8.bank_width = bankw; + tiling_info->gfx8.bank_height = bankh; + tiling_info->gfx8.tile_aspect = mtaspect; + tiling_info->gfx8.tile_mode = + DC_ADDR_SURF_MICRO_TILING_DISPLAY; + } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) + == DC_ARRAY_1D_TILED_THIN1) { + tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; + } + + tiling_info->gfx8.pipe_config = + AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); +} + +static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, + union dc_tiling_info *tiling_info) +{ + /* Fill GFX9 params */ + tiling_info->gfx9.num_pipes = + adev->gfx.config.gb_addr_config_fields.num_pipes; + tiling_info->gfx9.num_banks = + adev->gfx.config.gb_addr_config_fields.num_banks; + tiling_info->gfx9.pipe_interleave = + adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; + tiling_info->gfx9.num_shader_engines = + adev->gfx.config.gb_addr_config_fields.num_se; + tiling_info->gfx9.max_compressed_frags = + adev->gfx.config.gb_addr_config_fields.max_compress_frags; + tiling_info->gfx9.num_rb_per_se = + adev->gfx.config.gb_addr_config_fields.num_rb_per_se; + tiling_info->gfx9.shaderEnable = 1; + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) + tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; +} + +static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, + union dc_tiling_info *tiling_info, + uint64_t modifier) +{ + unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); + unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); + unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier); + unsigned int pipes_log2; + + pipes_log2 = min(5u, mod_pipe_xor_bits); + + fill_gfx9_tiling_info_from_device(adev, tiling_info); + + if (!IS_AMD_FMT_MOD(modifier)) + return; + + tiling_info->gfx9.num_pipes = 1u << pipes_log2; + tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2); + + if (adev->family >= AMDGPU_FAMILY_NV) { + tiling_info->gfx9.num_pkrs = 1u << pkrs_log2; + } else { + tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits; + + /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */ + } +} + +static int validate_dcc(struct amdgpu_device *adev, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const union dc_tiling_info *tiling_info, + const struct dc_plane_dcc_param *dcc, + const struct dc_plane_address *address, + const struct plane_size *plane_size) +{ + struct dc *dc = adev->dm.dc; + struct dc_dcc_surface_param input; + struct dc_surface_dcc_cap output; + + memset(&input, 0, sizeof(input)); + memset(&output, 0, sizeof(output)); + + if (!dcc->enable) + return 0; + + if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || + !dc->cap_funcs.get_dcc_compression_cap) + return -EINVAL; + + input.format = format; + input.surface_size.width = plane_size->surface_size.width; + input.surface_size.height = plane_size->surface_size.height; + input.swizzle_mode = tiling_info->gfx9.swizzle; + + if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) + input.scan = SCAN_DIRECTION_HORIZONTAL; + else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) + input.scan = SCAN_DIRECTION_VERTICAL; + + if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) + return -EINVAL; + + if (!output.capable) + return -EINVAL; + + if (dcc->independent_64b_blks == 0 && + output.grph.rgb.independent_64b_blks != 0) + return -EINVAL; + + return 0; +} + +static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, + const struct amdgpu_framebuffer *afb, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const struct plane_size *plane_size, + union dc_tiling_info *tiling_info, + struct dc_plane_dcc_param *dcc, + struct dc_plane_address *address, + const bool force_disable_dcc) +{ + const uint64_t modifier = afb->base.modifier; + int ret = 0; + + fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); + tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier); + + if (modifier_has_dcc(modifier) && !force_disable_dcc) { + uint64_t dcc_address = afb->address + afb->base.offsets[1]; + bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); + bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); + + dcc->enable = 1; + dcc->meta_pitch = afb->base.pitches[1]; + dcc->independent_64b_blks = independent_64b_blks; + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { + if (independent_64b_blks && independent_128b_blks) + dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; + else if (independent_128b_blks) + dcc->dcc_ind_blk = hubp_ind_block_128b; + else if (independent_64b_blks && !independent_128b_blks) + dcc->dcc_ind_blk = hubp_ind_block_64b; + else + dcc->dcc_ind_blk = hubp_ind_block_unconstrained; + } else { + if (independent_64b_blks) + dcc->dcc_ind_blk = hubp_ind_block_64b; + else + dcc->dcc_ind_blk = hubp_ind_block_unconstrained; + } + + address->grph.meta_addr.low_part = lower_32_bits(dcc_address); + address->grph.meta_addr.high_part = upper_32_bits(dcc_address); + } + + ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); + if (ret) + drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret); + + return ret; +} + +static void add_gfx10_1_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, uint64_t *size, uint64_t *capacity) +{ + int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); + + + /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); +} + +static void add_gfx9_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, uint64_t *size, uint64_t *capacity) +{ + int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); + int pipe_xor_bits = min(8, pipes + + ilog2(adev->gfx.config.gb_addr_config_fields.num_se)); + int bank_xor_bits = min(8 - pipe_xor_bits, + ilog2(adev->gfx.config.gb_addr_config_fields.num_banks)); + int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) + + ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se); + + + if (adev->family == AMDGPU_FAMILY_RV) { + /* Raven2 and later */ + bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81; + + /* + * No _D DCC swizzles yet because we only allow 32bpp, which + * doesn't support _D on DCN + */ + + if (has_constant_encode) { + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); + } + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); + + if (has_constant_encode) { + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(RB, rb) | + AMD_FMT_MOD_SET(PIPE, pipes)); + } + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | + AMD_FMT_MOD_SET(RB, rb) | + AMD_FMT_MOD_SET(PIPE, pipes)); + } + + /* + * Only supported for 64bpp on Raven, will be filtered on format in + * dm_plane_format_mod_supported. + */ + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); + + if (adev->family == AMDGPU_FAMILY_RV) { + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); + } + + /* + * Only supported for 64bpp on Raven, will be filtered on format in + * dm_plane_format_mod_supported. + */ + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + + if (adev->family == AMDGPU_FAMILY_RV) { + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + } +} + +static void add_gfx10_3_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, uint64_t *size, uint64_t *capacity) +{ + int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); + int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs)); + + /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); +} + +static void add_gfx11_modifiers(struct amdgpu_device *adev, + uint64_t **mods, uint64_t *size, uint64_t *capacity) +{ + int num_pipes = 0; + int pipe_xor_bits = 0; + int num_pkrs = 0; + int pkrs = 0; + u32 gb_addr_config; + u8 i = 0; + unsigned swizzle_r_x; + uint64_t modifier_r_x; + uint64_t modifier_dcc_best; + uint64_t modifier_dcc_4k; + + /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from + * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} + */ + gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); + ASSERT(gb_addr_config != 0); + + num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); + pkrs = ilog2(num_pkrs); + num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES); + pipe_xor_bits = ilog2(num_pipes); + + for (i = 0; i < 2; i++) { + /* Insert the best one first. */ + /* R_X swizzle modes are the best for rendering and DCC requires them. */ + if (num_pipes > 16) + swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X; + else + swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X; + + modifier_r_x = AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(TILE, swizzle_r_x) | + AMD_FMT_MOD_SET(PACKERS, pkrs); + + /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */ + modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B); + + /* DCC settings for 4K and greater resolutions. (required by display hw) */ + modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B); + + add_modifier(mods, size, capacity, modifier_dcc_best); + add_modifier(mods, size, capacity, modifier_dcc_4k); + + add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + + add_modifier(mods, size, capacity, modifier_r_x); + } + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); +} + +static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) +{ + uint64_t size = 0, capacity = 128; + *mods = NULL; + + /* We have not hooked up any pre-GFX9 modifiers. */ + if (adev->family < AMDGPU_FAMILY_AI) + return 0; + + *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); + + if (plane_type == DRM_PLANE_TYPE_CURSOR) { + add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); + add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); + return *mods ? 0 : -ENOMEM; + } + + switch (adev->family) { + case AMDGPU_FAMILY_AI: + case AMDGPU_FAMILY_RV: + add_gfx9_modifiers(adev, mods, &size, &capacity); + break; + case AMDGPU_FAMILY_NV: + case AMDGPU_FAMILY_VGH: + case AMDGPU_FAMILY_YC: + case AMDGPU_FAMILY_GC_10_3_6: + case AMDGPU_FAMILY_GC_10_3_7: + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) + add_gfx10_3_modifiers(adev, mods, &size, &capacity); + else + add_gfx10_1_modifiers(adev, mods, &size, &capacity); + break; + case AMDGPU_FAMILY_GC_11_0_0: + case AMDGPU_FAMILY_GC_11_0_1: + add_gfx11_modifiers(adev, mods, &size, &capacity); + break; + } + + add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); + + /* INVALID marks the end of the list. */ + add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); + + if (!*mods) + return -ENOMEM; + + return 0; +} + +static int get_plane_formats(const struct drm_plane *plane, + const struct dc_plane_cap *plane_cap, + uint32_t *formats, int max_formats) +{ + int i, num_formats = 0; + + /* + * TODO: Query support for each group of formats directly from + * DC plane caps. This will require adding more formats to the + * caps list. + */ + + switch (plane->type) { + case DRM_PLANE_TYPE_PRIMARY: + for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { + if (num_formats >= max_formats) + break; + + formats[num_formats++] = rgb_formats[i]; + } + + if (plane_cap && plane_cap->pixel_format_support.nv12) + formats[num_formats++] = DRM_FORMAT_NV12; + if (plane_cap && plane_cap->pixel_format_support.p010) + formats[num_formats++] = DRM_FORMAT_P010; + if (plane_cap && plane_cap->pixel_format_support.fp16) { + formats[num_formats++] = DRM_FORMAT_XRGB16161616F; + formats[num_formats++] = DRM_FORMAT_ARGB16161616F; + formats[num_formats++] = DRM_FORMAT_XBGR16161616F; + formats[num_formats++] = DRM_FORMAT_ABGR16161616F; + } + break; + + case DRM_PLANE_TYPE_OVERLAY: + for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { + if (num_formats >= max_formats) + break; + + formats[num_formats++] = overlay_formats[i]; + } + break; + + case DRM_PLANE_TYPE_CURSOR: + for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { + if (num_formats >= max_formats) + break; + + formats[num_formats++] = cursor_formats[i]; + } + break; + } + + return num_formats; +} + +#ifdef CONFIG_DRM_AMD_DC_HDR +static int attach_color_mgmt_properties(struct amdgpu_display_manager *dm, struct drm_plane *plane) +{ + drm_object_attach_property(&plane->base, + dm->degamma_lut_property, + 0); + drm_object_attach_property(&plane->base, + dm->degamma_lut_size_property, + MAX_COLOR_LUT_ENTRIES); + drm_object_attach_property(&plane->base, dm->ctm_property, + 0); + drm_object_attach_property(&plane->base, dm->sdr_boost_property, + DEFAULT_SDR_BOOST); + + return 0; +} +#endif + +int fill_plane_buffer_attributes(struct amdgpu_device *adev, + const struct amdgpu_framebuffer *afb, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const uint64_t tiling_flags, + union dc_tiling_info *tiling_info, + struct plane_size *plane_size, + struct dc_plane_dcc_param *dcc, + struct dc_plane_address *address, + bool tmz_surface, + bool force_disable_dcc) +{ + const struct drm_framebuffer *fb = &afb->base; + int ret; + + memset(tiling_info, 0, sizeof(*tiling_info)); + memset(plane_size, 0, sizeof(*plane_size)); + memset(dcc, 0, sizeof(*dcc)); + memset(address, 0, sizeof(*address)); + + address->tmz_surface = tmz_surface; + + if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { + uint64_t addr = afb->address + fb->offsets[0]; + + plane_size->surface_size.x = 0; + plane_size->surface_size.y = 0; + plane_size->surface_size.width = fb->width; + plane_size->surface_size.height = fb->height; + plane_size->surface_pitch = + fb->pitches[0] / fb->format->cpp[0]; + + address->type = PLN_ADDR_TYPE_GRAPHICS; + address->grph.addr.low_part = lower_32_bits(addr); + address->grph.addr.high_part = upper_32_bits(addr); + } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { + uint64_t luma_addr = afb->address + fb->offsets[0]; + uint64_t chroma_addr = afb->address + fb->offsets[1]; + + plane_size->surface_size.x = 0; + plane_size->surface_size.y = 0; + plane_size->surface_size.width = fb->width; + plane_size->surface_size.height = fb->height; + plane_size->surface_pitch = + fb->pitches[0] / fb->format->cpp[0]; + + plane_size->chroma_size.x = 0; + plane_size->chroma_size.y = 0; + /* TODO: set these based on surface format */ + plane_size->chroma_size.width = fb->width / 2; + plane_size->chroma_size.height = fb->height / 2; + + plane_size->chroma_pitch = + fb->pitches[1] / fb->format->cpp[1]; + + address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; + address->video_progressive.luma_addr.low_part = + lower_32_bits(luma_addr); + address->video_progressive.luma_addr.high_part = + upper_32_bits(luma_addr); + address->video_progressive.chroma_addr.low_part = + lower_32_bits(chroma_addr); + address->video_progressive.chroma_addr.high_part = + upper_32_bits(chroma_addr); + } + + if (adev->family >= AMDGPU_FAMILY_AI) { + ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, + rotation, plane_size, + tiling_info, dcc, + address, + force_disable_dcc); + if (ret) + return ret; + } else { + fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); + } + + return 0; +} + +static int dm_plane_helper_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct amdgpu_framebuffer *afb; + struct drm_gem_object *obj; + struct amdgpu_device *adev; + struct amdgpu_bo *rbo; + struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; + uint32_t domain; + int r; + + if (!new_state->fb) { + DRM_DEBUG_KMS("No FB bound\n"); + return 0; + } + + afb = to_amdgpu_framebuffer(new_state->fb); + obj = new_state->fb->obj[0]; + rbo = gem_to_amdgpu_bo(obj); + adev = amdgpu_ttm_adev(rbo->tbo.bdev); + + r = amdgpu_bo_reserve(rbo, true); + if (r) { + dev_err(adev->dev, "fail to reserve bo (%d)\n", r); + return r; + } + + r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); + if (r) { + dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); + goto error_unlock; + } + + if (plane->type != DRM_PLANE_TYPE_CURSOR) + domain = amdgpu_display_supported_domains(adev, rbo->flags); + else + domain = AMDGPU_GEM_DOMAIN_VRAM; + + r = amdgpu_bo_pin(rbo, domain); + if (unlikely(r != 0)) { + if (r != -ERESTARTSYS) + DRM_ERROR("Failed to pin framebuffer with error %d\n", r); + goto error_unlock; + } + + r = amdgpu_ttm_alloc_gart(&rbo->tbo); + if (unlikely(r != 0)) { + DRM_ERROR("%p bind failed\n", rbo); + goto error_unpin; + } + + r = drm_gem_plane_helper_prepare_fb(plane, new_state); + if (unlikely(r != 0)) + goto error_unpin; + + amdgpu_bo_unreserve(rbo); + + afb->address = amdgpu_bo_gpu_offset(rbo); + + amdgpu_bo_ref(rbo); + + /** + * We don't do surface updates on planes that have been newly created, + * but we also don't have the afb->address during atomic check. + * + * Fill in buffer attributes depending on the address here, but only on + * newly created planes since they're not being used by DC yet and this + * won't modify global state. + */ + dm_plane_state_old = to_dm_plane_state(plane->state); + dm_plane_state_new = to_dm_plane_state(new_state); + + if (dm_plane_state_new->dc_state && + dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { + struct dc_plane_state *plane_state = + dm_plane_state_new->dc_state; + bool force_disable_dcc = !plane_state->dcc.enable; + + fill_plane_buffer_attributes( + adev, afb, plane_state->format, plane_state->rotation, + afb->tiling_flags, + &plane_state->tiling_info, &plane_state->plane_size, + &plane_state->dcc, &plane_state->address, + afb->tmz_surface, force_disable_dcc); + } + + return 0; + +error_unpin: + amdgpu_bo_unpin(rbo); + +error_unlock: + amdgpu_bo_unreserve(rbo); + return r; +} + +static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct amdgpu_bo *rbo; + int r; + + if (!old_state->fb) + return; + + rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r)) { + DRM_ERROR("failed to reserve rbo before unpin\n"); + return; + } + + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + amdgpu_bo_unref(&rbo); +} + +static void get_min_max_dc_plane_scaling(struct drm_device *dev, + struct drm_framebuffer *fb, + int *min_downscale, int *max_upscale) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + struct dc *dc = adev->dm.dc; + /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */ + struct dc_plane_cap *plane_cap = &dc->caps.planes[0]; + + switch (fb->format->format) { + case DRM_FORMAT_P010: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + *max_upscale = plane_cap->max_upscale_factor.nv12; + *min_downscale = plane_cap->max_downscale_factor.nv12; + break; + + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + *max_upscale = plane_cap->max_upscale_factor.fp16; + *min_downscale = plane_cap->max_downscale_factor.fp16; + break; + + default: + *max_upscale = plane_cap->max_upscale_factor.argb8888; + *min_downscale = plane_cap->max_downscale_factor.argb8888; + break; + } + + /* + * A factor of 1 in the plane_cap means to not allow scaling, ie. use a + * scaling factor of 1.0 == 1000 units. + */ + if (*max_upscale == 1) + *max_upscale = 1000; + + if (*min_downscale == 1) + *min_downscale = 1000; +} + +int dm_plane_helper_check_state(struct drm_plane_state *state, + struct drm_crtc_state *new_crtc_state) +{ + struct drm_framebuffer *fb = state->fb; + int min_downscale, max_upscale; + int min_scale = 0; + int max_scale = INT_MAX; + + /* Plane enabled? Validate viewport and get scaling factors from plane caps. */ + if (fb && state->crtc) { + /* Validate viewport to cover the case when only the position changes */ + if (state->plane->type != DRM_PLANE_TYPE_CURSOR) { + int viewport_width = state->crtc_w; + int viewport_height = state->crtc_h; + + if (state->crtc_x < 0) + viewport_width += state->crtc_x; + else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay) + viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x; + + if (state->crtc_y < 0) + viewport_height += state->crtc_y; + else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay) + viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y; + + if (viewport_width < 0 || viewport_height < 0) { + DRM_DEBUG_ATOMIC("Plane completely outside of screen\n"); + return -EINVAL; + } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */ + DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2); + return -EINVAL; + } else if (viewport_height < MIN_VIEWPORT_SIZE) { + DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE); + return -EINVAL; + } + + } + + /* Get min/max allowed scaling factors from plane caps. */ + get_min_max_dc_plane_scaling(state->crtc->dev, fb, + &min_downscale, &max_upscale); + /* + * Convert to drm convention: 16.16 fixed point, instead of dc's + * 1.0 == 1000. Also drm scaling is src/dst instead of dc's + * dst/src, so min_scale = 1.0 / max_upscale, etc. + */ + min_scale = (1000 << 16) / max_upscale; + max_scale = (1000 << 16) / min_downscale; + } + + return drm_atomic_helper_check_plane_state( + state, new_crtc_state, min_scale, max_scale, true, true); +} + +int fill_dc_scaling_info(struct amdgpu_device *adev, + const struct drm_plane_state *state, + struct dc_scaling_info *scaling_info) +{ + int scale_w, scale_h, min_downscale, max_upscale; + + memset(scaling_info, 0, sizeof(*scaling_info)); + + /* Source is fixed 16.16 but we ignore mantissa for now... */ + scaling_info->src_rect.x = state->src_x >> 16; + scaling_info->src_rect.y = state->src_y >> 16; + + /* + * For reasons we don't (yet) fully understand a non-zero + * src_y coordinate into an NV12 buffer can cause a + * system hang on DCN1x. + * To avoid hangs (and maybe be overly cautious) + * let's reject both non-zero src_x and src_y. + * + * We currently know of only one use-case to reproduce a + * scenario with non-zero src_x and src_y for NV12, which + * is to gesture the YouTube Android app into full screen + * on ChromeOS. + */ + if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || + (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) && + (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && + (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) + return -EINVAL; + + scaling_info->src_rect.width = state->src_w >> 16; + if (scaling_info->src_rect.width == 0) + return -EINVAL; + + scaling_info->src_rect.height = state->src_h >> 16; + if (scaling_info->src_rect.height == 0) + return -EINVAL; + + scaling_info->dst_rect.x = state->crtc_x; + scaling_info->dst_rect.y = state->crtc_y; + + if (state->crtc_w == 0) + return -EINVAL; + + scaling_info->dst_rect.width = state->crtc_w; + + if (state->crtc_h == 0) + return -EINVAL; + + scaling_info->dst_rect.height = state->crtc_h; + + /* DRM doesn't specify clipping on destination output. */ + scaling_info->clip_rect = scaling_info->dst_rect; + + /* Validate scaling per-format with DC plane caps */ + if (state->plane && state->plane->dev && state->fb) { + get_min_max_dc_plane_scaling(state->plane->dev, state->fb, + &min_downscale, &max_upscale); + } else { + min_downscale = 250; + max_upscale = 16000; + } + + scale_w = scaling_info->dst_rect.width * 1000 / + scaling_info->src_rect.width; + + if (scale_w < min_downscale || scale_w > max_upscale) + return -EINVAL; + + scale_h = scaling_info->dst_rect.height * 1000 / + scaling_info->src_rect.height; + + if (scale_h < min_downscale || scale_h > max_upscale) + return -EINVAL; + + /* + * The "scaling_quality" can be ignored for now, quality = 0 has DC + * assume reasonable defaults based on the format. + */ + + return 0; +} + +static int dm_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct amdgpu_device *adev = drm_to_adev(plane->dev); + struct dc *dc = adev->dm.dc; + struct dm_plane_state *dm_plane_state; + struct dc_scaling_info scaling_info; + struct drm_crtc_state *new_crtc_state; + int ret; + + trace_amdgpu_dm_plane_atomic_check(new_plane_state); + + dm_plane_state = to_dm_plane_state(new_plane_state); + + if (!dm_plane_state->dc_state) + return 0; + + new_crtc_state = + drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); + if (!new_crtc_state) + return -EINVAL; + + ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state); + if (ret) + return ret; + + ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info); + if (ret) + return ret; + + if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) + return 0; + + return -EINVAL; +} + +static int dm_plane_atomic_async_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + /* Only support async updates on cursor planes. */ + if (plane->type != DRM_PLANE_TYPE_CURSOR) + return -EINVAL; + + return 0; +} + +static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, + struct dc_cursor_position *position) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + int x, y; + int xorigin = 0, yorigin = 0; + + if (!crtc || !plane->state->fb) + return 0; + + if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || + (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { + DRM_ERROR("%s: bad cursor width or height %d x %d\n", + __func__, + plane->state->crtc_w, + plane->state->crtc_h); + return -EINVAL; + } + + x = plane->state->crtc_x; + y = plane->state->crtc_y; + + if (x <= -amdgpu_crtc->max_cursor_width || + y <= -amdgpu_crtc->max_cursor_height) + return 0; + + if (x < 0) { + xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); + x = 0; + } + if (y < 0) { + yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); + y = 0; + } + position->enable = true; + position->translate_by_source = true; + position->x = x; + position->y = y; + position->x_hotspot = xorigin; + position->y_hotspot = yorigin; + + return 0; +} + +void handle_cursor_update(struct drm_plane *plane, + struct drm_plane_state *old_plane_state) +{ + struct amdgpu_device *adev = drm_to_adev(plane->dev); + struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); + struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; + struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + uint64_t address = afb ? afb->address : 0; + struct dc_cursor_position position = {0}; + struct dc_cursor_attributes attributes; + int ret; + + if (!plane->state->fb && !old_plane_state->fb) + return; + + DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n", + __func__, + amdgpu_crtc->crtc_id, + plane->state->crtc_w, + plane->state->crtc_h); + + ret = get_cursor_position(plane, crtc, &position); + if (ret) + return; + + if (!position.enable) { + /* turn off cursor */ + if (crtc_state && crtc_state->stream) { + mutex_lock(&adev->dm.dc_lock); + dc_stream_set_cursor_position(crtc_state->stream, + &position); + mutex_unlock(&adev->dm.dc_lock); + } + return; + } + + amdgpu_crtc->cursor_width = plane->state->crtc_w; + amdgpu_crtc->cursor_height = plane->state->crtc_h; + + memset(&attributes, 0, sizeof(attributes)); + attributes.address.high_part = upper_32_bits(address); + attributes.address.low_part = lower_32_bits(address); + attributes.width = plane->state->crtc_w; + attributes.height = plane->state->crtc_h; + attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; + attributes.rotation_angle = 0; + attributes.attribute_flags.value = 0; + + attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; + + if (crtc_state->stream) { + mutex_lock(&adev->dm.dc_lock); + if (!dc_stream_set_cursor_attributes(crtc_state->stream, + &attributes)) + DRM_ERROR("DC failed to set cursor attributes\n"); + + if (!dc_stream_set_cursor_position(crtc_state->stream, + &position)) + DRM_ERROR("DC failed to set cursor position\n"); + mutex_unlock(&adev->dm.dc_lock); + } +} + +static void dm_plane_atomic_async_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_plane_state *old_state = + drm_atomic_get_old_plane_state(state, plane); + + trace_amdgpu_dm_atomic_update_cursor(new_state); + + swap(plane->state->fb, new_state->fb); + + plane->state->src_x = new_state->src_x; + plane->state->src_y = new_state->src_y; + plane->state->src_w = new_state->src_w; + plane->state->src_h = new_state->src_h; + plane->state->crtc_x = new_state->crtc_x; + plane->state->crtc_y = new_state->crtc_y; + plane->state->crtc_w = new_state->crtc_w; + plane->state->crtc_h = new_state->crtc_h; + + handle_cursor_update(plane, old_state); +} + +static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { + .prepare_fb = dm_plane_helper_prepare_fb, + .cleanup_fb = dm_plane_helper_cleanup_fb, + .atomic_check = dm_plane_atomic_check, + .atomic_async_check = dm_plane_atomic_async_check, + .atomic_async_update = dm_plane_atomic_async_update +}; + +static void dm_drm_plane_reset(struct drm_plane *plane) +{ + struct dm_plane_state *amdgpu_state = NULL; + + if (plane->state) + plane->funcs->atomic_destroy_state(plane, plane->state); + + amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); + WARN_ON(amdgpu_state == NULL); + + if (amdgpu_state) + __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); +#ifdef CONFIG_DRM_AMD_DC_HDR + if (amdgpu_state) + amdgpu_state->sdr_boost = DEFAULT_SDR_BOOST; +#endif +} + +static struct drm_plane_state * +dm_drm_plane_duplicate_state(struct drm_plane *plane) +{ + struct dm_plane_state *dm_plane_state, *old_dm_plane_state; + + old_dm_plane_state = to_dm_plane_state(plane->state); + dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); + if (!dm_plane_state) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); + + if (old_dm_plane_state->dc_state) { + dm_plane_state->dc_state = old_dm_plane_state->dc_state; + dc_plane_state_retain(dm_plane_state->dc_state); + } + +#ifdef CONFIG_DRM_AMD_DC_HDR + if (dm_plane_state->degamma_lut) + drm_property_blob_get(dm_plane_state->degamma_lut); + if (dm_plane_state->ctm) + drm_property_blob_get(dm_plane_state->ctm); + + dm_plane_state->sdr_boost = old_dm_plane_state->sdr_boost; +#endif + + return &dm_plane_state->base; +} + +static bool dm_plane_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) +{ + struct amdgpu_device *adev = drm_to_adev(plane->dev); + const struct drm_format_info *info = drm_format_info(format); + int i; + + enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; + + if (!info) + return false; + + /* + * We always have to allow these modifiers: + * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers. + * 2. Not passing any modifiers is the same as explicitly passing INVALID. + */ + if (modifier == DRM_FORMAT_MOD_LINEAR || + modifier == DRM_FORMAT_MOD_INVALID) { + return true; + } + + /* Check that the modifier is on the list of the plane's supported modifiers. */ + for (i = 0; i < plane->modifier_count; i++) { + if (modifier == plane->modifiers[i]) + break; + } + if (i == plane->modifier_count) + return false; + + /* + * For D swizzle the canonical modifier depends on the bpp, so check + * it here. + */ + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && + adev->family >= AMDGPU_FAMILY_NV) { + if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) + return false; + } + + if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && + info->cpp[0] < 8) + return false; + + if (modifier_has_dcc(modifier)) { + /* Per radeonsi comments 16/64 bpp are more complicated. */ + if (info->cpp[0] != 4) + return false; + /* We support multi-planar formats, but not when combined with + * additional DCC metadata planes. + */ + if (info->num_planes > 1) + return false; + } + + return true; +} + +static void dm_drm_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); + +#ifdef CONFIG_DRM_AMD_DC_HDR + drm_property_blob_put(dm_plane_state->degamma_lut); + drm_property_blob_put(dm_plane_state->ctm); +#endif + if (dm_plane_state->dc_state) + dc_plane_state_release(dm_plane_state->dc_state); + + drm_atomic_helper_plane_destroy_state(plane, state); +} + +#ifdef CONFIG_DRM_AMD_DC_HDR +/* copied from drm_atomic_uapi.c */ +static int atomic_replace_property_blob_from_id(struct drm_device *dev, + struct drm_property_blob **blob, + uint64_t blob_id, + ssize_t expected_size, + ssize_t expected_elem_size, + bool *replaced) +{ + struct drm_property_blob *new_blob = NULL; + + if (blob_id != 0) { + new_blob = drm_property_lookup_blob(dev, blob_id); + if (new_blob == NULL) + return -EINVAL; + + if (expected_size > 0 && + new_blob->length != expected_size) { + drm_property_blob_put(new_blob); + return -EINVAL; + } + if (expected_elem_size > 0 && + new_blob->length % expected_elem_size != 0) { + drm_property_blob_put(new_blob); + return -EINVAL; + } + } + + *replaced |= drm_property_replace_blob(blob, new_blob); + drm_property_blob_put(new_blob); + + return 0; +} + +int dm_drm_plane_set_property(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + struct amdgpu_device *adev = drm_to_adev(plane->dev); + struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); + int ret = 0; + bool replaced; + + if (property == adev->dm.degamma_lut_property) { + ret = atomic_replace_property_blob_from_id(adev_to_drm(adev), + &dm_plane_state->degamma_lut, + val, -1, sizeof(struct drm_color_lut), + &replaced); + } else if (property == adev->dm.ctm_property) { + ret = atomic_replace_property_blob_from_id(adev_to_drm(adev), + &dm_plane_state->ctm, + val, + sizeof(struct drm_color_ctm), -1, + &replaced); + } else if (property == adev->dm.sdr_boost_property) { + dm_plane_state->sdr_boost = val; + } else { + return -EINVAL; + } + + return ret; +} + +int dm_drm_plane_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); + struct amdgpu_device *adev = drm_to_adev(plane->dev); + + if (property == adev->dm.degamma_lut_property) { + *val = (dm_plane_state->degamma_lut) ? + dm_plane_state->degamma_lut->base.id : 0; + } else if (property == adev->dm.ctm_property) { + *val = (dm_plane_state->ctm) ? dm_plane_state->ctm->base.id : 0; + } else if (property == adev->dm.sdr_boost_property) { + *val = dm_plane_state->sdr_boost; + } else { + return -EINVAL; + } + + return 0; +} +#endif + +static const struct drm_plane_funcs dm_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_helper_destroy, + .reset = dm_drm_plane_reset, + .atomic_duplicate_state = dm_drm_plane_duplicate_state, + .atomic_destroy_state = dm_drm_plane_destroy_state, + .format_mod_supported = dm_plane_format_mod_supported, +#ifdef CONFIG_DRM_AMD_DC_HDR + .atomic_set_property = dm_drm_plane_set_property, + .atomic_get_property = dm_drm_plane_get_property, +#endif +}; + +int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, + struct drm_plane *plane, + unsigned long possible_crtcs, + const struct dc_plane_cap *plane_cap) +{ + uint32_t formats[32]; + int num_formats; + int res = -EPERM; + unsigned int supported_rotations; + uint64_t *modifiers = NULL; + + num_formats = get_plane_formats(plane, plane_cap, formats, + ARRAY_SIZE(formats)); + + res = get_plane_modifiers(dm->adev, plane->type, &modifiers); + if (res) + return res; + + if (modifiers == NULL) + adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true; + + res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs, + &dm_plane_funcs, formats, num_formats, + modifiers, plane->type, NULL); + kfree(modifiers); + if (res) + return res; + + if (plane->type == DRM_PLANE_TYPE_OVERLAY && + plane_cap && plane_cap->per_pixel_alpha) { + unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE); + + drm_plane_create_alpha_property(plane); + drm_plane_create_blend_mode_property(plane, blend_caps); + } + + if (plane->type == DRM_PLANE_TYPE_PRIMARY && + plane_cap && + (plane_cap->pixel_format_support.nv12 || + plane_cap->pixel_format_support.p010)) { + /* This only affects YUV formats. */ + drm_plane_create_color_properties( + plane, + BIT(DRM_COLOR_YCBCR_BT601) | + BIT(DRM_COLOR_YCBCR_BT709) | + BIT(DRM_COLOR_YCBCR_BT2020), + BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | + BIT(DRM_COLOR_YCBCR_FULL_RANGE), + DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); + } + + supported_rotations = + DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | + DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; + + if (dm->adev->asic_type >= CHIP_BONAIRE && + plane->type != DRM_PLANE_TYPE_CURSOR) + drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, + supported_rotations); + + drm_plane_helper_add(plane, &dm_plane_helper_funcs); + +#ifdef CONFIG_DRM_AMD_DC_HDR + attach_color_mgmt_properties(dm, plane); +#endif + /* Create (reset) the plane state */ + if (plane->funcs->reset) + plane->funcs->reset(plane); + + return 0; +} + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h new file mode 100644 index 000000000000..286981a2dd40 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_PLANE_H__ +#define __AMDGPU_DM_PLANE_H__ + +#include "dc.h" + +void handle_cursor_update(struct drm_plane *plane, + struct drm_plane_state *old_plane_state); + +int fill_dc_scaling_info(struct amdgpu_device *adev, + const struct drm_plane_state *state, + struct dc_scaling_info *scaling_info); + +int dm_plane_helper_check_state(struct drm_plane_state *state, + struct drm_crtc_state *new_crtc_state); + +int fill_plane_buffer_attributes(struct amdgpu_device *adev, + const struct amdgpu_framebuffer *afb, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const uint64_t tiling_flags, + union dc_tiling_info *tiling_info, + struct plane_size *plane_size, + struct dc_plane_dcc_param *dcc, + struct dc_plane_address *address, + bool tmz_surface, + bool force_disable_dcc); + +int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, + struct drm_plane *plane, + unsigned long possible_crtcs, + const struct dc_plane_cap *plane_cap); + +const struct drm_format_info *amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); + +void fill_blending_from_plane_state(const struct drm_plane_state *plane_state, + bool *per_pixel_alpha, bool *pre_multiplied_alpha, + bool *global_alpha, int *global_alpha_value); + +#endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index eba270121698..75284e2cec74 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -99,12 +99,9 @@ bool dm_pp_apply_display_requirements( adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1; } - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change) - adev->powerplay.pp_funcs->display_configuration_change( - adev->powerplay.pp_handle, - &adev->pm.pm_display_cfg); + amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg); - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); } return true; @@ -298,31 +295,25 @@ bool dm_pp_get_clock_levels_by_type( struct dm_pp_clock_levels *dc_clks) { struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; struct amd_pp_clocks pp_clks = { 0 }; struct amd_pp_simple_clock_info validation_clks = { 0 }; uint32_t i; - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) { - if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle, - dc_to_pp_clock_type(clk_type), &pp_clks)) { - /* Error in pplib. Provide default values. */ - get_default_clock_levels(clk_type, dc_clks); - return true; - } + if (amdgpu_dpm_get_clock_by_type(adev, + dc_to_pp_clock_type(clk_type), &pp_clks)) { + /* Error in pplib. Provide default values. */ + get_default_clock_levels(clk_type, dc_clks); + return true; } pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type); - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) { - if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks( - pp_handle, &validation_clks)) { - /* Error in pplib. Provide default values. */ - DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); - validation_clks.engine_max_clock = 72000; - validation_clks.memory_max_clock = 80000; - validation_clks.level = 0; - } + if (amdgpu_dpm_get_display_mode_validation_clks(adev, &validation_clks)) { + /* Error in pplib. Provide default values. */ + DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); + validation_clks.engine_max_clock = 72000; + validation_clks.memory_max_clock = 80000; + validation_clks.level = 0; } DRM_INFO("DM_PPLIB: Validation clocks:\n"); @@ -370,18 +361,14 @@ bool dm_pp_get_clock_levels_by_type_with_latency( struct dm_pp_clock_levels_with_latency *clk_level_info) { struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; struct pp_clock_levels_with_latency pp_clks = { 0 }; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret; - if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) { - ret = pp_funcs->get_clock_by_type_with_latency(pp_handle, - dc_to_pp_clock_type(clk_type), - &pp_clks); - if (ret) - return false; - } + ret = amdgpu_dpm_get_clock_by_type_with_latency(adev, + dc_to_pp_clock_type(clk_type), + &pp_clks); + if (ret) + return false; pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type); @@ -394,18 +381,14 @@ bool dm_pp_get_clock_levels_by_type_with_voltage( struct dm_pp_clock_levels_with_voltage *clk_level_info) { struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; struct pp_clock_levels_with_voltage pp_clk_info = {0}; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret; - if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) { - ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle, - dc_to_pp_clock_type(clk_type), - &pp_clk_info); - if (ret) - return false; - } + ret = amdgpu_dpm_get_clock_by_type_with_voltage(adev, + dc_to_pp_clock_type(clk_type), + &pp_clk_info); + if (ret) + return false; pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type); @@ -417,19 +400,16 @@ bool dm_pp_notify_wm_clock_changes( struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges) { struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; /* * Limit this watermark setting for Polaris for now * TODO: expand this to other ASICs */ - if ((adev->asic_type >= CHIP_POLARIS10) && (adev->asic_type <= CHIP_VEGAM) - && pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) { - if (!pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, + if ((adev->asic_type >= CHIP_POLARIS10) && + (adev->asic_type <= CHIP_VEGAM) && + !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, (void *)wm_with_clock_ranges)) return true; - } return false; } @@ -456,12 +436,10 @@ bool dm_pp_apply_clock_for_voltage_request( if (!pp_clock_request.clock_type) return false; - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request) - ret = adev->powerplay.pp_funcs->display_clock_voltage_request( - adev->powerplay.pp_handle, - &pp_clock_request); - if (ret) + ret = amdgpu_dpm_display_clock_voltage_request(adev, &pp_clock_request); + if (ret && (ret != -EOPNOTSUPP)) return false; + return true; } @@ -471,15 +449,8 @@ bool dm_pp_get_static_clocks( { struct amdgpu_device *adev = ctx->driver_context; struct amd_pp_clock_info pp_clk_info = {0}; - int ret = 0; - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks) - ret = adev->powerplay.pp_funcs->get_current_clocks( - adev->powerplay.pp_handle, - &pp_clk_info); - else - return false; - if (ret) + if (amdgpu_dpm_get_current_clocks(adev, &pp_clk_info)) return false; static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state); @@ -494,8 +465,6 @@ static void pp_rv_set_wm_ranges(struct pp_smu *pp, { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges; struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges; @@ -536,72 +505,48 @@ static void pp_rv_set_wm_ranges(struct pp_smu *pp, ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; } - if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) - pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, - &wm_with_clock_ranges); + amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, + &wm_with_clock_ranges); } static void pp_rv_set_pme_wa_enable(struct pp_smu *pp) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (pp_funcs && pp_funcs->notify_smu_enable_pwe) - pp_funcs->notify_smu_enable_pwe(pp_handle); + amdgpu_dpm_notify_smu_enable_pwe(adev); } static void pp_rv_set_active_display_count(struct pp_smu *pp, int count) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (!pp_funcs || !pp_funcs->set_active_display_count) - return; - - pp_funcs->set_active_display_count(pp_handle, count); + amdgpu_dpm_set_active_display_count(adev, count); } static void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk) - return; - pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, clock); + amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, clock); } static void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (!pp_funcs || !pp_funcs->set_hard_min_dcefclk_by_freq) - return; - - pp_funcs->set_hard_min_dcefclk_by_freq(pp_handle, clock); + amdgpu_dpm_set_hard_min_dcefclk_by_freq(adev, clock); } static void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs || !pp_funcs->set_hard_min_fclk_by_freq) - return; - pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz); + amdgpu_dpm_set_hard_min_fclk_by_freq(adev, mhz); } static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, @@ -609,11 +554,8 @@ static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) - pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges); + amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, ranges); return PP_SMU_RESULT_OK; } @@ -622,14 +564,13 @@ static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; - if (!pp_funcs || !pp_funcs->set_active_display_count) + ret = amdgpu_dpm_set_active_display_count(adev, count); + if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; - - /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */ - if (pp_funcs->set_active_display_count(pp_handle, count)) + else if (ret) + /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */ return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; @@ -640,14 +581,13 @@ pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk) - return PP_SMU_RESULT_UNSUPPORTED; + int ret = 0; /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */ - if (pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, mhz)) + ret = amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, mhz); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; @@ -658,12 +598,8 @@ static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq( { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; struct pp_display_clock_request clock_req; - - if (!pp_funcs || !pp_funcs->display_clock_voltage_request) - return PP_SMU_RESULT_UNSUPPORTED; + int ret = 0; clock_req.clock_type = amd_pp_dcef_clock; clock_req.clock_freq_in_khz = mhz * 1000; @@ -671,7 +607,10 @@ static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq( /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ - if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req)) + ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; @@ -682,12 +621,8 @@ pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; struct pp_display_clock_request clock_req; - - if (!pp_funcs || !pp_funcs->display_clock_voltage_request) - return PP_SMU_RESULT_UNSUPPORTED; + int ret = 0; clock_req.clock_type = amd_pp_mem_clock; clock_req.clock_freq_in_khz = mhz * 1000; @@ -695,7 +630,10 @@ pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz) /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ - if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req)) + ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; @@ -706,14 +644,10 @@ static enum pp_smu_status pp_nv_set_pstate_handshake_support( { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (pp_funcs && pp_funcs->display_disable_memory_clock_switch) { - if (pp_funcs->display_disable_memory_clock_switch(pp_handle, - !pstate_handshake_supported)) - return PP_SMU_RESULT_FAIL; - } + if (amdgpu_dpm_display_disable_memory_clock_switch(adev, + !pstate_handshake_supported)) + return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; } @@ -723,12 +657,8 @@ static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; struct pp_display_clock_request clock_req; - - if (!pp_funcs || !pp_funcs->display_clock_voltage_request) - return PP_SMU_RESULT_UNSUPPORTED; + int ret = 0; switch (clock_id) { case PP_SMU_NV_DISPCLK: @@ -748,7 +678,10 @@ static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ - if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req)) + ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; @@ -759,16 +692,16 @@ static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks( { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; - if (!pp_funcs || !pp_funcs->get_max_sustainable_clocks_by_dc) + ret = amdgpu_dpm_get_max_sustainable_clocks_by_dc(adev, + max_clocks); + if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) + return PP_SMU_RESULT_FAIL; - if (!pp_funcs->get_max_sustainable_clocks_by_dc(pp_handle, max_clocks)) - return PP_SMU_RESULT_OK; - - return PP_SMU_RESULT_FAIL; + return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, @@ -776,18 +709,17 @@ static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; - if (!pp_funcs || !pp_funcs->get_uclk_dpm_states) + ret = amdgpu_dpm_get_uclk_dpm_states(adev, + clock_values_in_khz, + num_states); + if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) + return PP_SMU_RESULT_FAIL; - if (!pp_funcs->get_uclk_dpm_states(pp_handle, - clock_values_in_khz, - num_states)) - return PP_SMU_RESULT_OK; - - return PP_SMU_RESULT_FAIL; + return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_rn_get_dpm_clock_table( @@ -795,16 +727,15 @@ static enum pp_smu_status pp_rn_get_dpm_clock_table( { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; - if (!pp_funcs || !pp_funcs->get_dpm_clock_table) + ret = amdgpu_dpm_get_dpm_clock_table(adev, clock_table); + if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) + return PP_SMU_RESULT_FAIL; - if (!pp_funcs->get_dpm_clock_table(pp_handle, clock_table)) - return PP_SMU_RESULT_OK; - - return PP_SMU_RESULT_FAIL; + return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp, @@ -812,11 +743,8 @@ static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp, { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) - pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges); + amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, ranges); return PP_SMU_RESULT_OK; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index c022e56f9459..26291db0a3cf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -26,6 +26,32 @@ #include "amdgpu_dm_psr.h" #include "dc.h" #include "dm_helpers.h" +#include "amdgpu_dm.h" +#include "modules/power/power_helpers.h" + +static bool link_supports_psrsu(struct dc_link *link) +{ + struct dc *dc = link->ctx->dc; + + if (!dc->caps.dmcub_support) + return false; + + if (dc->ctx->dce_version < DCN_VERSION_3_1) + return false; + + if (!is_psr_su_specific_panel(link)) + return false; + + if (!link->dpcd_caps.alpm_caps.bits.AUX_WAKE_ALPM_CAP || + !link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) + return false; + + if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.SU_GRANULARITY_REQUIRED && + !link->dpcd_caps.psr_info.psr2_su_y_granularity_cap) + return false; + + return true; +} /* * amdgpu_dm_set_psr_caps() - set link psr capabilities @@ -34,26 +60,36 @@ */ void amdgpu_dm_set_psr_caps(struct dc_link *link) { - uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; - - if (!(link->connector_signal & SIGNAL_TYPE_EDP)) + if (!(link->connector_signal & SIGNAL_TYPE_EDP)) { + link->psr_settings.psr_feature_enabled = false; return; - if (link->type == dc_connection_none) + } + + if (link->type == dc_connection_none) { + link->psr_settings.psr_feature_enabled = false; return; - if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, - dpcd_data, sizeof(dpcd_data))) { - link->dpcd_caps.psr_caps.psr_version = dpcd_data[0]; - - if (dpcd_data[0] == 0) { - link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; - link->psr_settings.psr_feature_enabled = false; - } else { + } + + if (link->dpcd_caps.psr_info.psr_version == 0) { + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + link->psr_settings.psr_feature_enabled = false; + + } else { + if (link_supports_psrsu(link)) + link->psr_settings.psr_version = DC_PSR_VERSION_SU_1; + else link->psr_settings.psr_version = DC_PSR_VERSION_1; - link->psr_settings.psr_feature_enabled = true; - } - DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled); + link->psr_settings.psr_feature_enabled = true; } + + DRM_INFO("PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n", + link->psr_settings.psr_feature_enabled, + link->psr_settings.psr_version, + link->dpcd_caps.psr_info.psr_version, + link->dpcd_caps.psr_info.psr_dpcd_caps.raw, + link->dpcd_caps.psr_info.psr2_su_y_granularity_cap); + } /* @@ -67,21 +103,24 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) struct dc_link *link = NULL; struct psr_config psr_config = {0}; struct psr_context psr_context = {0}; + struct dc *dc = NULL; bool ret = false; if (stream == NULL) return false; link = stream->link; + dc = link->ctx->dc; - psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version; + if (link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) { + mod_power_calc_psr_configs(&psr_config, link, stream); - if (psr_config.psr_version > 0) { - psr_config.psr_exit_link_training_required = 0x1; - psr_config.psr_frame_capture_indication_req = 0; - psr_config.psr_rfb_setup_time = 0x37; - psr_config.psr_sdp_transmit_line_num_deadline = 0x20; - psr_config.allow_smu_optimizations = 0x0; + /* linux DM specific updating for psr config fields */ + psr_config.allow_smu_optimizations = + (amdgpu_dc_feature_mask & DC_PSR_ALLOW_SMU_OPT) && + mod_power_only_edp(dc->current_state, stream); + psr_config.allow_multi_disp_optimizations = + (amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT); ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); @@ -135,7 +174,13 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) &stream, 1, ¶ms); - power_opt |= psr_power_opt_z10_static_screen; + /* + * Only enable static-screen optimizations for PSR1. For PSR SU, this + * causes vstartup interrupt issues, used by amdgpu_dm to send vblank + * events. + */ + if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1) + power_opt |= psr_power_opt_z10_static_screen; return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h index fdcaea22b456..d3bc9dc21771 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h @@ -34,6 +34,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_plane.h> #include <drm/drm_fourcc.h> +#include <drm/drm_framebuffer.h> #include <drm/drm_encoder.h> #include <drm/drm_atomic.h> |