aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c361
1 files changed, 279 insertions, 82 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c911b30de658..2f0b14f8f833 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -51,6 +51,7 @@
#include <drm/drm_hdcp.h>
#endif
#include "amdgpu_pm.h"
+#include "amdgpu_atombios.h"
#include "amd_shared.h"
#include "amdgpu_dm_irq.h"
@@ -623,7 +624,7 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
/**
- * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
+ * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
* @adev: amdgpu_device pointer
* @notify: dmub notification structure
*
@@ -631,7 +632,8 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
* Copies dmub notification to DM which is to be read by AUX command.
* issuing thread and also signals the event to wake up the thread.
*/
-void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
+static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
{
if (adev->dm.dmub_notify)
memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
@@ -647,7 +649,8 @@ void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notific
* Dmub Hpd interrupt processing callback. Gets displayindex through the
* ink index and calls helper to do the processing.
*/
-void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
+static void dmub_hpd_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
{
struct amdgpu_dm_connector *aconnector;
struct amdgpu_dm_connector *hpd_aconnector = NULL;
@@ -704,8 +707,10 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not
* to dmub interrupt handling thread
* Return: true if successfully registered, false if there is existing registration
*/
-bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
-dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
+static bool register_dmub_notify_callback(struct amdgpu_device *adev,
+ enum dmub_notification_type type,
+ dmub_notify_interrupt_callback_t callback,
+ bool dmub_int_thread_offload)
{
if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
adev->dm.dmub_callback[type] = callback;
@@ -789,8 +794,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
plink = adev->dm.dc->links[notify.link_index];
if (plink) {
plink->hpd_status =
- notify.hpd_status ==
- DP_HPD_PLUG ? true : false;
+ notify.hpd_status == DP_HPD_PLUG;
}
}
queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
@@ -1050,6 +1054,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0;
}
+ /* Reset DMCUB if it was previously running - before we overwrite its memory. */
+ status = dmub_srv_hw_reset(dmub_srv);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Error resetting DMUB HW: %d\n", status);
+
hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
fw_inst_const = dmub_fw->data +
@@ -1453,8 +1462,21 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
init_data.flags.edp_no_power_sequencing = true;
+#ifdef CONFIG_DRM_AMD_DC_DCN
+ if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
+ init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
+ if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
+ init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
+#endif
+
init_data.flags.power_down_display_on_boot = true;
+ if (check_seamless_boot_capability(adev)) {
+ init_data.flags.power_down_display_on_boot = false;
+ init_data.flags.allow_seamless_boot_optimization = true;
+ DRM_INFO("Seamless boot condition check passed\n");
+ }
+
INIT_LIST_HEAD(&adev->dm.da_list);
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -1479,8 +1501,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
adev->dm.dc->debug.disable_stutter = true;
- if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
adev->dm.dc->debug.disable_dsc = true;
+ adev->dm.dc->debug.disable_dsc_edp = true;
+ }
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
adev->dm.dc->debug.disable_clock_gate = true;
@@ -2303,14 +2327,6 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
goto fail;
}
-
- res = dc_validate_global_state(dc, context, false);
-
- if (res != DC_OK) {
- DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
- goto fail;
- }
-
res = dc_commit_state(dc, context);
fail:
@@ -2561,6 +2577,23 @@ static int dm_resume(void *handle)
if (amdgpu_in_reset(adev)) {
dc_state = dm->cached_dc_state;
+ /*
+ * The dc->current_state is backed up into dm->cached_dc_state
+ * before we commit 0 streams.
+ *
+ * DC will clear link encoder assignments on the real state
+ * but the changes won't propagate over to the copy we made
+ * before the 0 streams commit.
+ *
+ * DC expects that link encoder assignments are *not* valid
+ * when committing a state, so as a workaround it needs to be
+ * cleared here.
+ */
+ link_enc_cfg_init(dm->dc, dc_state);
+
+ if (dc_enable_dmub_notifications(adev->dm.dc))
+ amdgpu_dm_outbox_init(adev);
+
r = dm_dmub_hw_init(adev);
if (r)
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -2572,20 +2605,11 @@ static int dm_resume(void *handle)
for (i = 0; i < dc_state->stream_count; i++) {
dc_state->streams[i]->mode_changed = true;
- for (j = 0; j < dc_state->stream_status->plane_count; j++) {
- dc_state->stream_status->plane_states[j]->update_flags.raw
+ for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
+ dc_state->stream_status[i].plane_states[j]->update_flags.raw
= 0xffffffff;
}
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- /*
- * Resource allocation happens for link encoders for newer ASIC in
- * dc_validate_global_state, so we need to revalidate it.
- *
- * This shouldn't fail (it passed once before), so warn if it does.
- */
- WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
-#endif
WARN_ON(!dc_commit_state(dm->dc, dc_state));
@@ -2608,6 +2632,10 @@ static int dm_resume(void *handle)
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
dc_resource_state_construct(dm->dc, dm_state->context);
+ /* Re-enable outbox interrupts for DPIA. */
+ if (dc_enable_dmub_notifications(adev->dm.dc))
+ amdgpu_dm_outbox_init(adev);
+
/* Before powering on DC we need to re-initialize DMUB. */
r = dm_dmub_hw_init(adev);
if (r)
@@ -2938,13 +2966,12 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->edid =
(struct edid *)sink->dc_edid.raw_edid;
- drm_connector_update_edid_property(connector,
- aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
aconnector->edid);
}
+ drm_connector_update_edid_property(connector, aconnector->edid);
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
update_connector_ext_caps(aconnector);
} else {
@@ -3012,7 +3039,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
drm_modeset_unlock_all(dev);
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
- drm_kms_helper_hotplug_event(dev);
+ drm_kms_helper_connector_hotplug_event(connector);
} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
if (new_connection_type == dc_connection_none &&
@@ -3027,7 +3054,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
drm_modeset_unlock_all(dev);
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
- drm_kms_helper_hotplug_event(dev);
+ drm_kms_helper_connector_hotplug_event(connector);
}
mutex_unlock(&aconnector->hpd_lock);
@@ -3221,7 +3248,7 @@ out:
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
- drm_kms_helper_hotplug_event(dev);
+ drm_kms_helper_connector_hotplug_event(connector);
} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
if (aconnector->fake_enable)
@@ -3234,7 +3261,7 @@ out:
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
- drm_kms_helper_hotplug_event(dev);
+ drm_kms_helper_connector_hotplug_event(connector);
}
}
#ifdef CONFIG_DRM_AMD_DC_HDCP
@@ -3909,6 +3936,9 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
caps = dm->backlight_caps[bl_idx];
dm->brightness[bl_idx] = user_brightness;
+ /* update scratch register */
+ if (bl_idx == 0)
+ amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
link = (struct dc_link *)dm->backlight_link[bl_idx];
@@ -4242,7 +4272,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
-
+ if (dm->num_of_edps)
+ update_connector_ext_caps(aconnector);
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
}
@@ -4250,6 +4281,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
+ /*
+ * Disable vblank IRQs aggressively for power-saving.
+ *
+ * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
+ * is also supported.
+ */
+ adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
+
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -6035,11 +6074,72 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
{
stream->timing.flags.DSC = 0;
- if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
- dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
- aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
- aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
- dsc_caps);
+ if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
+ if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
+ sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+ dsc_caps);
+ }
+}
+
+static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps,
+ uint32_t max_dsc_target_bpp_limit_override)
+{
+ const struct dc_link_settings *verified_link_cap = NULL;
+ uint32_t link_bw_in_kbps;
+ uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
+ struct dc *dc = sink->ctx->dc;
+ struct dc_dsc_bw_range bw_range = {0};
+ struct dc_dsc_config dsc_cfg = {0};
+
+ verified_link_cap = dc_link_get_link_cap(stream->link);
+ link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
+ edp_min_bpp_x16 = 8 * 16;
+ edp_max_bpp_x16 = 8 * 16;
+
+ if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
+ edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
+
+ if (edp_max_bpp_x16 < edp_min_bpp_x16)
+ edp_min_bpp_x16 = edp_max_bpp_x16;
+
+ if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
+ dc->debug.dsc_min_slice_height_override,
+ edp_min_bpp_x16, edp_max_bpp_x16,
+ dsc_caps,
+ &stream->timing,
+ &bw_range)) {
+
+ if (bw_range.max_kbps < link_bw_in_kbps) {
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ dc->debug.dsc_min_slice_height_override,
+ max_dsc_target_bpp_limit_override,
+ 0,
+ &stream->timing,
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
+ stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
+ }
+ return;
+ }
+ }
+
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ dc->debug.dsc_min_slice_height_override,
+ max_dsc_target_bpp_limit_override,
+ link_bw_in_kbps,
+ &stream->timing,
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
}
}
@@ -6050,6 +6150,9 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
struct drm_connector *drm_connector = &aconnector->base;
uint32_t link_bandwidth_kbps;
uint32_t max_dsc_target_bpp_limit_override = 0;
+ struct dc *dc = sink->ctx->dc;
+ uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
+ uint32_t dsc_max_supported_bw_in_kbps;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
@@ -6062,17 +6165,43 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_dsc_policy_set_enable_dsc_when_not_needed(
aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
- if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
+ dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
+
+ apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
- if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,
aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
max_dsc_target_bpp_limit_override,
link_bandwidth_kbps,
&stream->timing,
&stream->timing.dsc_cfg)) {
- stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
+ __func__, drm_connector->name);
+ }
+ } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
+ timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ max_supported_bw_in_kbps = link_bandwidth_kbps;
+ dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
+
+ if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
+ max_supported_bw_in_kbps > 0 &&
+ dsc_max_supported_bw_in_kbps > 0)
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ dsc_caps,
+ aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
+ max_dsc_target_bpp_limit_override,
+ dsc_max_supported_bw_in_kbps,
+ &stream->timing,
+ &stream->timing.dsc_cfg)) {
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
+ __func__, drm_connector->name);
+ }
}
}
@@ -8216,15 +8345,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
break;
case DRM_MODE_CONNECTOR_DisplayPort:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
- if (link->is_dig_mapping_flexible &&
- link->dc->res_pool->funcs->link_encs_assign) {
- link->link_enc =
- link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
- if (!link->link_enc)
- link->link_enc =
- link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
- }
-
+ link->link_enc = dp_get_link_enc(link);
+ ASSERT(link->link_enc);
if (link->link_enc)
aconnector->base.ycbcr_420_allowed =
link->link_enc->features.dp_ycbcr420_supported ? true : false;
@@ -10627,6 +10749,24 @@ static int dm_update_plane_state(struct dc *dc,
return ret;
}
+static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
+ int *src_w, int *src_h)
+{
+ switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_90:
+ case DRM_MODE_ROTATE_270:
+ *src_w = plane_state->src_h >> 16;
+ *src_h = plane_state->src_w >> 16;
+ break;
+ case DRM_MODE_ROTATE_0:
+ case DRM_MODE_ROTATE_180:
+ default:
+ *src_w = plane_state->src_w >> 16;
+ *src_h = plane_state->src_h >> 16;
+ break;
+ }
+}
+
static int dm_check_crtc_cursor(struct drm_atomic_state *state,
struct drm_crtc *crtc,
struct drm_crtc_state *new_crtc_state)
@@ -10635,6 +10775,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
struct drm_plane_state *new_cursor_state, *new_underlying_state;
int i;
int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
+ int cursor_src_w, cursor_src_h;
+ int underlying_src_w, underlying_src_h;
/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
* cursor per pipe but it's going to inherit the scaling and
@@ -10646,10 +10788,9 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
return 0;
}
- cursor_scale_w = new_cursor_state->crtc_w * 1000 /
- (new_cursor_state->src_w >> 16);
- cursor_scale_h = new_cursor_state->crtc_h * 1000 /
- (new_cursor_state->src_h >> 16);
+ dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
+ cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
+ cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
/* Narrow down to non-cursor planes on the same CRTC as the cursor */
@@ -10660,10 +10801,10 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
if (!new_underlying_state->fb)
continue;
- underlying_scale_w = new_underlying_state->crtc_w * 1000 /
- (new_underlying_state->src_w >> 16);
- underlying_scale_h = new_underlying_state->crtc_h * 1000 /
- (new_underlying_state->src_h >> 16);
+ dm_get_oriented_plane_size(new_underlying_state,
+ &underlying_src_w, &underlying_src_h);
+ underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
+ underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
if (cursor_scale_w != underlying_scale_w ||
cursor_scale_h != underlying_scale_h) {
@@ -10758,8 +10899,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
trace_amdgpu_dm_atomic_check_begin(state);
ret = drm_atomic_helper_check_modeset(dev, state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
goto fail;
+ }
/* Check connector changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -10775,6 +10918,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
if (IS_ERR(new_crtc_state)) {
+ DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
ret = PTR_ERR(new_crtc_state);
goto fail;
}
@@ -10789,8 +10933,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
ret = add_affected_mst_dsc_crtcs(state, crtc);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
goto fail;
+ }
}
}
}
@@ -10805,19 +10951,25 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
continue;
ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
goto fail;
+ }
if (!new_crtc_state->enable)
continue;
ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
goto fail;
+ }
ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
goto fail;
+ }
if (dm_old_crtc_state->dsc_force_changed)
new_crtc_state->mode_changed = true;
@@ -10854,6 +11006,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (IS_ERR(new_plane_state)) {
ret = PTR_ERR(new_plane_state);
+ DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
goto fail;
}
}
@@ -10866,8 +11019,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_plane_state,
false,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
goto fail;
+ }
}
/* Disable all crtcs which require disable */
@@ -10877,8 +11032,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_crtc_state,
false,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
goto fail;
+ }
}
/* Enable all crtcs which require enable */
@@ -10888,8 +11045,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_crtc_state,
true,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
goto fail;
+ }
}
/* Add new/modified planes */
@@ -10899,20 +11058,26 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_plane_state,
true,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
goto fail;
+ }
}
/* Run this here since we want to validate the streams we created */
ret = drm_atomic_helper_check_planes(dev, state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
goto fail;
+ }
/* Check cursor planes scaling */
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
goto fail;
+ }
}
if (state->legacy_cursor_update) {
@@ -10999,20 +11164,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
*/
if (lock_and_validation_needed) {
ret = dm_atomic_get_state(state, &dm_state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
goto fail;
+ }
ret = do_aquire_global_lock(dev, state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
goto fail;
+ }
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
+ if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
+ DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
goto fail;
+ }
ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
goto fail;
+ }
#endif
/*
@@ -11022,12 +11195,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
* to get stuck in an infinite loop and hang eventually.
*/
ret = drm_dp_mst_atomic_check(state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
goto fail;
- status = dc_validate_global_state(dc, dm_state->context, false);
+ }
+ status = dc_validate_global_state(dc, dm_state->context, true);
if (status != DC_OK) {
- drm_dbg_atomic(dev,
- "DC global validation failure: %s (%d)",
+ DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
dc_status_to_str(status), status);
ret = -EINVAL;
goto fail;
@@ -11149,7 +11323,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
input->offset = offset;
input->length = length;
- input->total_length = total_length;
+ input->cea_total_length = total_length;
memcpy(input->payload, data, length);
res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
@@ -11456,8 +11630,10 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
return value;
}
-int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
- uint8_t status_type, uint32_t *operation_result)
+static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
+ struct dc_context *ctx,
+ uint8_t status_type,
+ uint32_t *operation_result)
{
struct amdgpu_device *adev = ctx->driver_context;
int return_status = -1;
@@ -11528,3 +11704,24 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context
ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
(uint32_t *)operation_result);
}
+
+/*
+ * Check whether seamless boot is supported.
+ *
+ * So far we only support seamless boot on CHIP_VANGOGH.
+ * If everything goes well, we may consider expanding
+ * seamless boot to other ASICs.
+ */
+bool check_seamless_boot_capability(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VANGOGH:
+ if (!adev->mman.keep_stolen_vga_memory)
+ return true;
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}