aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/dc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc')
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c248
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c516
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c617
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h67
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h5
27 files changed, 975 insertions, 646 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 02943ca65807..cf1b5f354ae9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -122,7 +122,7 @@ static void rn_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
dpp_inst = clk_mgr->base.ctx->dc->res_pool->dpps[i]->inst;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
- prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
+ prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[dpp_inst];
if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
clk_mgr->dccg->funcs->update_dpp_dto(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index 27501b735a9c..a2ade6e93f5e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -91,7 +91,8 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
+ dc_is_virtual_signal(pipe->stream->signal))) {
if (disable)
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
else
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 3121dd2d2a91..fc3af81ed6c6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -122,7 +122,8 @@ static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
+ dc_is_virtual_signal(pipe->stream->signal))) {
if (disable)
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
else
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e41a48f596a3..f14449401188 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2901,14 +2901,15 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg);
}
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, true);
- else
+ } else {
/* Lock the top pipe while updating plane addrs, since freesync requires
* plane addr update event triggers to be synchronized.
* top_pipe_to_program is expected to never be NULL
*/
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
+ }
// Stream updates
if (stream_update)
@@ -2924,10 +2925,11 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, false);
- else
+ } else {
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+ }
dc->hwss.post_unlock_program_front_end(dc, context);
return;
}
@@ -3052,10 +3054,11 @@ static void commit_planes_for_stream(struct dc *dc,
}
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, false);
- else
+ } else {
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+ }
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 67ef357e5798..a789ea8af27f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -33,6 +33,7 @@
#include "gpio_service_interface.h"
#include "core_status.h"
#include "dc_link_dp.h"
+#include "dc_link_dpia.h"
#include "dc_link_ddc.h"
#include "link_hwss.h"
#include "opp.h"
@@ -240,7 +241,7 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
/* Link may not have physical HPD pin. */
if (link->ep_type != DISPLAY_ENDPOINT_PHY) {
- if (link->is_hpd_pending || !link->hpd_status)
+ if (link->is_hpd_pending || !dc_link_dpia_query_hpd_status(link))
*type = dc_connection_none;
else
*type = dc_connection_single;
@@ -1604,8 +1605,25 @@ static bool dc_link_construct_legacy(struct dc_link *link,
if (link->hpd_gpio) {
if (!link->dc->config.allow_edp_hotplug_detection)
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
- link->irq_source_hpd_rx =
- dal_irq_get_rx_source(link->hpd_gpio);
+
+ switch (link->dc->config.allow_edp_hotplug_detection) {
+ case 1: // only the 1st eDP handles hotplug
+ if (link->link_index == 0)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+ else
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ break;
+ case 2: // only the 2nd eDP handles hotplug
+ if (link->link_index == 1)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+ else
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ break;
+ default:
+ break;
+ }
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 340b5f90a82a..dc30ac366a50 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2783,31 +2783,37 @@ bool perform_link_training_with_retries(
struct dc_link *link = stream->link;
enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
- struct dc_link_settings current_setting = *link_setting;
+ struct dc_link_settings cur_link_settings = *link_setting;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
int fail_count = 0;
+ bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */
+ bool is_link_bw_min = /* RBR x 1 */
+ (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE);
dp_trace_commit_lt_init(link);
- if (dp_get_link_encoding_format(&current_setting) == DP_8b_10b_ENCODING)
+ if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
/* We need to do this before the link training to ensure the idle
* pattern in SST mode will be sent right after the link training
*/
link_hwss->setup_stream_encoder(pipe_ctx);
dp_trace_set_lt_start_timestamp(link, false);
- for (j = 0; j < attempts; ++j) {
+ j = 0;
+ while (j < attempts && fail_count < (attempts * 10)) {
- DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d\n",
- __func__, (unsigned int)j + 1, attempts);
+ DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d @ rate(%d) x lane(%d)\n",
+ __func__, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+ cur_link_settings.lane_count);
dp_enable_link_phy(
link,
&pipe_ctx->link_res,
signal,
pipe_ctx->clock_source->id,
- &current_setting);
+ &cur_link_settings);
if (stream->sink_patches.dppowerup_delay > 0) {
int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
@@ -2832,30 +2838,30 @@ bool perform_link_training_with_retries(
dp_set_panel_mode(link, panel_mode);
if (link->aux_access_disabled) {
- dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &current_setting);
+ dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings);
return true;
} else {
/** @todo Consolidate USB4 DP and DPx.x training. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
status = dc_link_dpia_perform_link_training(link,
&pipe_ctx->link_res,
- &current_setting,
+ &cur_link_settings,
skip_video_pattern);
/* Transmit idle pattern once training successful. */
- if (status == LINK_TRAINING_SUCCESS)
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
} else {
status = dc_link_dp_perform_link_training(link,
&pipe_ctx->link_res,
- &current_setting,
+ &cur_link_settings,
skip_video_pattern);
}
dp_trace_lt_total_count_increment(link, false);
dp_trace_lt_result_update(link, status, false);
dp_trace_set_lt_end_timestamp(link, false);
- if (status == LINK_TRAINING_SUCCESS)
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
return true;
}
@@ -2866,8 +2872,9 @@ bool perform_link_training_with_retries(
if (j == (attempts - 1) && link->ep_type == DISPLAY_ENDPOINT_PHY)
break;
- DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n",
- __func__, (unsigned int)j + 1, attempts);
+ DC_LOG_WARNING("%s: Link training attempt %u of %d failed @ rate(%d) x lane(%d)\n",
+ __func__, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+ cur_link_settings.lane_count);
dp_disable_link_phy(link, &pipe_ctx->link_res, signal);
@@ -2876,27 +2883,49 @@ bool perform_link_training_with_retries(
enum dc_connection_type type = dc_connection_none;
dc_link_detect_sink(link, &type);
- if (type == dc_connection_none)
+ if (type == dc_connection_none) {
+ DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__);
break;
- } else if (do_fallback) {
+ }
+ }
+
+ /* Try to train again at original settings if:
+ * - not falling back between training attempts;
+ * - aborted previous attempt due to reasons other than sink unplug;
+ * - successfully trained but at a link rate lower than that required by stream;
+ * - reached minimum link bandwidth.
+ */
+ if (!do_fallback || (status == LINK_TRAINING_ABORT) ||
+ (status == LINK_TRAINING_SUCCESS && is_link_bw_low) ||
+ is_link_bw_min) {
+ j++;
+ cur_link_settings = *link_setting;
+ delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
+ is_link_bw_low = false;
+ is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE);
+
+ } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */
uint32_t req_bw;
uint32_t link_bw;
- decide_fallback_link_setting(link, *link_setting, &current_setting, status);
- /* Fail link training if reduced link bandwidth no longer meets
- * stream requirements.
+ decide_fallback_link_setting(link, *link_setting, &cur_link_settings, status);
+ /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to
+ * minimum link bandwidth.
*/
req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
- link_bw = dc_link_bandwidth_kbps(link, &current_setting);
- if (req_bw > link_bw)
- break;
+ link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings);
+ is_link_bw_low = (req_bw > link_bw);
+ is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE));
+
+ if (is_link_bw_low)
+ DC_LOG_WARNING("%s: Link bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
+ __func__, req_bw, link_bw);
}
msleep(delay_between_attempts);
-
- delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
}
-
return false;
}
@@ -5097,13 +5126,16 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
return true;
}
-void dp_retrieve_lttpr_cap(struct dc_link *link)
+bool dp_retrieve_lttpr_cap(struct dc_link *link)
{
+ uint8_t lttpr_dpcd_data[8];
bool allow_lttpr_non_transparent_mode = 0;
+ bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
enum dc_status status = DC_ERROR_UNEXPECTED;
+ bool is_lttpr_present = false;
- memset(link->lttpr_dpcd_data, '\0', sizeof(link->lttpr_dpcd_data));
+ memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
@@ -5113,116 +5145,82 @@ void dp_retrieve_lttpr_cap(struct dc_link *link)
allow_lttpr_non_transparent_mode = 1;
}
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
- link->lttpr_support = LTTPR_UNSUPPORTED;
-
/*
- * Logic to determine LTTPR support
+ * Logic to determine LTTPR mode
*/
- if (vbios_lttpr_interop)
- link->lttpr_support = LTTPR_SUPPORTED;
- else if (link->dc->config.allow_lttpr_non_transparent_mode.raw == 0
- || !link->dc->caps.extended_aux_timeout_support)
- link->lttpr_support = LTTPR_UNSUPPORTED;
- else
- link->lttpr_support = LTTPR_CHECK_EXT_SUPPORT;
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+ if (vbios_lttpr_enable && vbios_lttpr_interop)
+ link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+ else if (!vbios_lttpr_enable && vbios_lttpr_interop) {
+ if (allow_lttpr_non_transparent_mode)
+ link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+ else
+ link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
+ } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) {
+ if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support)
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+ else
+ link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Check DP tunnel LTTPR mode debug option. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
link->dc->debug.dpia_debug.bits.force_non_lttpr)
- link->lttpr_support = LTTPR_UNSUPPORTED;
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+#endif
- if (link->lttpr_support > LTTPR_UNSUPPORTED) {
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
/* By reading LTTPR capability, RX assumes that we will enable
* LTTPR extended aux timeout if LTTPR is present.
*/
status = core_link_read_dpcd(
link,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
- link->lttpr_dpcd_data,
- sizeof(link->lttpr_dpcd_data));
- }
-}
-
-bool dp_parse_lttpr_mode(struct dc_link *link)
-{
- bool dpcd_allow_lttpr_non_transparent_mode = false;
- bool is_lttpr_present = false;
-
- bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
-
- if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
- link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
- dpcd_allow_lttpr_non_transparent_mode = true;
- } else if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
- !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
- dpcd_allow_lttpr_non_transparent_mode = true;
+ lttpr_dpcd_data,
+ sizeof(lttpr_dpcd_data));
+
+ link->dpcd_caps.lttpr_caps.revision.raw =
+ lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_link_rate =
+ lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
+ lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_lane_count =
+ lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.mode =
+ lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_ext_timeout =
+ lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
+ lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
+ lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
+ is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
+ link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
+ if (is_lttpr_present) {
+ CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+ configure_lttpr_mode_transparent(link);
+ } else
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
}
-
- /*
- * Logic to determine LTTPR mode
- */
- if (link->lttpr_support == LTTPR_SUPPORTED)
- if (vbios_lttpr_enable)
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- else if (dpcd_allow_lttpr_non_transparent_mode)
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- else
- link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
- else // lttpr_support == LTTPR_CHECK_EXT_SUPPORT
- if (dpcd_allow_lttpr_non_transparent_mode) {
- link->lttpr_support = LTTPR_SUPPORTED;
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- } else {
- link->lttpr_support = LTTPR_UNSUPPORTED;
- }
-
- if (link->lttpr_support == LTTPR_UNSUPPORTED)
- return false;
-
- link->dpcd_caps.lttpr_caps.revision.raw =
- link->lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_link_rate =
- link->lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
- link->lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_lane_count =
- link->lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.mode =
- link->lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_ext_timeout =
- link->lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
- link->lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
- link->lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
-
- /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
- is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
- link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
- link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
- if (is_lttpr_present) {
- CONN_DATA_DETECT(link, link->lttpr_dpcd_data, sizeof(link->lttpr_dpcd_data), "LTTPR Caps: ");
- configure_lttpr_mode_transparent(link);
- } else
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
-
return is_lttpr_present;
}
@@ -5374,8 +5372,7 @@ static bool retrieve_link_cap(struct dc_link *link)
status = wa_try_to_wake_dprx(link, timeout_ms);
}
- dp_retrieve_lttpr_cap(link);
-
+ is_lttpr_present = dp_retrieve_lttpr_cap(link);
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
@@ -5411,9 +5408,6 @@ static bool retrieve_link_cap(struct dc_link *link)
return false;
}
- if (link->lttpr_support > LTTPR_UNSUPPORTED)
- is_lttpr_present = dp_parse_lttpr_mode(link);
-
if (!is_lttpr_present)
dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
index a5765f36d86f..1b7a8774b0c9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
@@ -34,6 +34,7 @@
#include "dm_helpers.h"
#include "dmub/inc/dmub_cmd.h"
#include "inc/link_dpcd.h"
+#include "dc_dmub_srv.h"
#define DC_LOGGER \
link->ctx->logger
@@ -69,6 +70,24 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
return status;
}
+bool dc_link_dpia_query_hpd_status(struct dc_link *link)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
+ bool is_hpd_high = false;
+
+ /* prepare QUERY_HPD command */
+ cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
+ cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
+ cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
+
+ /* Return HPD status reported by DMUB if query successfully executed. */
+ if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
+ is_hpd_high = cmd.query_hpd.data.result;
+
+ return is_hpd_high;
+}
+
/* Configure link as prescribed in link_setting; set LTTPR mode; and
* Initialize link training settings.
* Abort link training if sink unplug detected.
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index e6b9c6a71841..5bc6ff2fa73e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -61,6 +61,8 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl
plane_state->blend_tf->type = TF_TYPE_BYPASS;
}
+ plane_state->pre_multiplied_alpha = true;
+
}
static void dc_plane_destruct(struct dc_plane_state *plane_state)
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 26c24db8f1da..3960c74482be 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.185"
+#define DC_VER "3.2.186"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -329,7 +329,7 @@ struct dc_config {
bool disable_dmcu;
bool enable_4to1MPC;
bool enable_windowed_mpo_odm;
- bool allow_edp_hotplug_detection;
+ uint32_t allow_edp_hotplug_detection;
bool clamp_min_dcfclk;
uint64_t vblank_alignment_dto_params;
uint8_t vblank_alignment_max_frame_time_diff;
@@ -1011,6 +1011,7 @@ struct dc_plane_state {
bool is_tiling_rotated;
bool per_pixel_alpha;
+ bool pre_multiplied_alpha;
bool global_alpha;
int global_alpha_value;
bool visible;
@@ -1045,6 +1046,7 @@ struct dc_plane_info {
bool horizontal_mirror;
bool visible;
bool per_pixel_alpha;
+ bool pre_multiplied_alpha;
bool global_alpha;
int global_alpha_value;
bool input_csc_enabled;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 251f2bbc96b9..a3c37ee3f849 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -129,8 +129,6 @@ struct dc_link {
bool link_state_valid;
bool aux_access_disabled;
bool sync_lt_in_progress;
- uint8_t lttpr_dpcd_data[8];
- enum lttpr_support lttpr_support;
enum lttpr_mode lttpr_mode;
bool is_internal_display;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 29e20d92b0bb..9e39cd7b203e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -87,7 +87,8 @@ static void release_engine(
engine->ddc = NULL;
- REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+ REG_UPDATE_2(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1,
+ AUX_SW_USE_AUX_REG_REQ, 0);
}
#define SW_CAN_ACCESS_AUX 1
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 5e6fea85a7b5..845aa8a1027d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1101,9 +1101,12 @@ static bool get_pixel_clk_frequency_100hz(
* not be programmed equal to DPREFCLK
*/
modulo_hz = REG_READ(MODULO[inst]);
- *pixel_clk_khz = div_u64((uint64_t)clock_hz*
- clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
- modulo_hz);
+ if (modulo_hz)
+ *pixel_clk_khz = div_u64((uint64_t)clock_hz*
+ clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
+ modulo_hz);
+ else
+ *pixel_clk_khz = 0;
} else {
/* NOTE: There is agreement with VBIOS here that MODULO is
* programmed equal to DPREFCLK, in which case PHASE will be
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index e02ac75afbf7..e3a62873c0e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2550,12 +2550,21 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
- if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
- blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
- } else if (per_pixel_alpha) {
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ if (per_pixel_alpha) {
+ /* DCN1.0 has output CM before MPC which seems to screw with
+ * pre-multiplied alpha.
+ */
+ blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
+ pipe_ctx->stream->output_color_space)
+ && pipe_ctx->plane_state->pre_multiplied_alpha);
+ if (pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ }
} else {
+ blnd_cfg.pre_multiplied_alpha = false;
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
}
@@ -2564,14 +2573,6 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
else
blnd_cfg.global_alpha = 0xff;
- /* DCN1.0 has output CM before MPC which seems to screw with
- * pre-multiplied alpha.
- */
- blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
- pipe_ctx->stream->output_color_space)
- && per_pixel_alpha;
-
-
/*
* TODO: remove hack
* Note: currently there is a bug in init_hw such that
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index e1f87bd72e4a..ec6aa8d8b251 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1773,7 +1773,6 @@ void dcn20_post_unlock_program_front_end(
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) {
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
@@ -2346,12 +2345,16 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
- if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
- blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
- } else if (per_pixel_alpha) {
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ if (per_pixel_alpha) {
+ blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
+ if (pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ }
} else {
+ blnd_cfg.pre_multiplied_alpha = false;
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
}
@@ -2365,7 +2368,7 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
blnd_cfg.top_gain = 0x1f000;
blnd_cfg.bottom_inside_gain = 0x1f000;
blnd_cfg.bottom_outside_gain = 0x1f000;
- blnd_cfg.pre_multiplied_alpha = per_pixel_alpha;
+
if (pipe_ctx->plane_state->format
== SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
blnd_cfg.pre_multiplied_alpha = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index f5e8916601d3..b604fb26f288 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -28,6 +28,8 @@
#include "dc.h"
#include "dcn_calc_math.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#define REG(reg)\
optc1->tg_regs->reg
@@ -184,6 +186,14 @@ void optc3_set_dsc_config(struct timing_generator *optc,
}
+void optc3_set_vrr_m_const(struct timing_generator *optc,
+ double vtotal_avg)
+{
+ DC_FP_START();
+ optc3_fpu_set_vrr_m_const(optc, vtotal_avg);
+ DC_FP_END();
+}
+
void optc3_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 336b2ce6a636..1c1a67c4cec1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -84,6 +84,7 @@
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
+#include "dml/dcn30/dcn30_fpu.h"
#include "dml/dcn30/display_mode_vba_30.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
@@ -91,137 +92,6 @@
#define DC_LOGGER_INIT(logger)
-struct _vcs_dpi_ip_params_st dcn3_0_ip = {
- .use_min_dcfclk = 0,
- .clamp_min_dcfclk = 0,
- .odm_capable = 1,
- .gpuvm_enable = 0,
- .hostvm_enable = 0,
- .gpuvm_max_page_table_levels = 4,
- .hostvm_max_page_table_levels = 4,
- .hostvm_cached_page_table_levels = 0,
- .pte_group_size_bytes = 2048,
- .num_dsc = 6,
- .rob_buffer_size_kbytes = 184,
- .det_buffer_size_kbytes = 184,
- .dpte_buffer_size_in_pte_reqs_luma = 84,
- .pde_proc_buffer_size_64k_reqs = 48,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .pte_enable = 1,
- .max_page_table_levels = 2,
- .pte_chunk_size_kbytes = 2, // ?
- .meta_chunk_size_kbytes = 2,
- .writeback_chunk_size_kbytes = 8,
- .line_buffer_size_bits = 789504,
- .is_line_buffer_bpp_fixed = 0, // ?
- .line_buffer_fixed_bpp = 0, // ?
- .dcc_supported = true,
- .writeback_interface_buffer_size_kbytes = 90,
- .writeback_line_buffer_buffer_size = 0,
- .max_line_buffer_lines = 12,
- .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
- .writeback_chroma_buffer_size_kbytes = 8,
- .writeback_chroma_line_buffer_width_pixels = 4,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .writeback_line_buffer_luma_buffer_size = 0,
- .writeback_line_buffer_chroma_buffer_size = 14643,
- .cursor_buffer_size = 8,
- .cursor_chunk_size = 2,
- .max_num_otg = 6,
- .max_num_dpp = 6,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .hscl_mults = 4,
- .vscl_mults = 4,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dispclk_ramp_margin_percent = 1,
- .underscan_factor = 1.11,
- .min_vblank_lines = 32,
- .dppclk_delay_subtotal = 46,
- .dynamic_metadata_vm_enabled = true,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_scl = 50,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dcfclk_cstate_latency = 5.2, // SRExitTime
- .max_inter_dcn_tile_repeaters = 8,
- .odm_combine_4to1_supported = true,
-
- .xfc_supported = false,
- .xfc_fill_bw_overhead_percent = 10.0,
- .xfc_fill_constant_bytes = 0,
- .gfx7_compat_tiling_supported = 0,
- .number_of_cursors = 1,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 562.0,
- .dppclk_mhz = 300.0,
- .phyclk_mhz = 300.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 405.6,
- },
- },
- .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
- .num_states = 1,
- .sr_exit_time_us = 15.5,
- .sr_enter_plus_exit_time_us = 20,
- .urgent_latency_us = 4.0,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 40.0,
- .writeback_latency_us = 12.0,
- .max_request_size_bytes = 256,
- .fabric_datapath_to_dcn_data_return_bytes = 64,
- .dcn_downspread_percent = 0.5,
- .downspread_percent = 0.38,
- .dram_page_open_time_ns = 50.0,
- .dram_rw_turnaround_time_ns = 17.5,
- .dram_return_buffer_per_channel_bytes = 8192,
- .round_trip_ping_latency_dcfclk_cycles = 191,
- .urgent_out_of_order_return_per_channel_bytes = 4096,
- .channel_interleave_bytes = 256,
- .num_banks = 8,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 404,
- .dummy_pstate_latency_us = 5,
- .writeback_dram_clock_change_latency_us = 23.0,
- .return_bus_width_bytes = 64,
- .dispclk_dppclk_vco_speed_mhz = 3650,
- .xfc_bus_transport_time_us = 20, // ?
- .xfc_xbuf_latency_tolerance_us = 4, // ?
- .use_urgent_burst_bw = 1, // ?
- .do_urgent_latency_adjustment = true,
- .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
-};
-
enum dcn30_clk_src_array_id {
DCN30_CLK_SRC_PLL0,
DCN30_CLK_SRC_PLL1,
@@ -1480,90 +1350,9 @@ int dcn30_populate_dml_pipes_from_context(
void dcn30_populate_dml_writeback_from_context(
struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
{
- int pipe_cnt, i, j;
- double max_calc_writeback_dispclk;
- double writeback_dispclk;
- struct writeback_st dout_wb;
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_stream_state *stream = res_ctx->pipe_ctx[i].stream;
-
- if (!stream)
- continue;
- max_calc_writeback_dispclk = 0;
-
- /* Set writeback information */
- pipes[pipe_cnt].dout.wb_enable = 0;
- pipes[pipe_cnt].dout.num_active_wb = 0;
- for (j = 0; j < stream->num_wb_info; j++) {
- struct dc_writeback_info *wb_info = &stream->writeback_info[j];
-
- if (wb_info->wb_enabled && wb_info->writeback_source_plane &&
- (wb_info->writeback_source_plane == res_ctx->pipe_ctx[i].plane_state)) {
- pipes[pipe_cnt].dout.wb_enable = 1;
- pipes[pipe_cnt].dout.num_active_wb++;
- dout_wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_en ?
- wb_info->dwb_params.cnv_params.crop_height :
- wb_info->dwb_params.cnv_params.src_height;
- dout_wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_en ?
- wb_info->dwb_params.cnv_params.crop_width :
- wb_info->dwb_params.cnv_params.src_width;
- dout_wb.wb_dst_width = wb_info->dwb_params.dest_width;
- dout_wb.wb_dst_height = wb_info->dwb_params.dest_height;
-
- /* For IP that doesn't support WB scaling, set h/v taps to 1 to avoid DML validation failure */
- if (dc->dml.ip.writeback_max_hscl_taps > 1) {
- dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps;
- dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps;
- } else {
- dout_wb.wb_htaps_luma = 1;
- dout_wb.wb_vtaps_luma = 1;
- }
- dout_wb.wb_htaps_chroma = 0;
- dout_wb.wb_vtaps_chroma = 0;
- dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ?
- (double)wb_info->dwb_params.cnv_params.crop_width /
- (double)wb_info->dwb_params.dest_width :
- (double)wb_info->dwb_params.cnv_params.src_width /
- (double)wb_info->dwb_params.dest_width;
- dout_wb.wb_vratio = wb_info->dwb_params.cnv_params.crop_en ?
- (double)wb_info->dwb_params.cnv_params.crop_height /
- (double)wb_info->dwb_params.dest_height :
- (double)wb_info->dwb_params.cnv_params.src_height /
- (double)wb_info->dwb_params.dest_height;
- if (wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB ||
- wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA)
- dout_wb.wb_pixel_format = dm_444_64;
- else
- dout_wb.wb_pixel_format = dm_444_32;
-
- /* Workaround for cases where multiple writebacks are connected to same plane
- * In which case, need to compute worst case and set the associated writeback parameters
- * This workaround is necessary due to DML computation assuming only 1 set of writeback
- * parameters per pipe
- */
- writeback_dispclk = dml30_CalculateWriteBackDISPCLK(
- dout_wb.wb_pixel_format,
- pipes[pipe_cnt].pipe.dest.pixel_rate_mhz,
- dout_wb.wb_hratio,
- dout_wb.wb_vratio,
- dout_wb.wb_htaps_luma,
- dout_wb.wb_vtaps_luma,
- dout_wb.wb_src_width,
- dout_wb.wb_dst_width,
- pipes[pipe_cnt].pipe.dest.htotal,
- dc->current_state->bw_ctx.dml.ip.writeback_line_buffer_buffer_size);
-
- if (writeback_dispclk > max_calc_writeback_dispclk) {
- max_calc_writeback_dispclk = writeback_dispclk;
- pipes[pipe_cnt].dout.wb = dout_wb;
- }
- }
- }
-
- pipe_cnt++;
- }
-
+ DC_FP_START();
+ dcn30_fpu_populate_dml_writeback_from_context(dc, res_ctx, pipes);
+ DC_FP_END();
}
unsigned int dcn30_calc_max_scaled_time(
@@ -1598,7 +1387,7 @@ void dcn30_set_mcif_arb_params(
enum mmhubbub_wbif_mode wbif_mode;
struct display_mode_lib *dml = &context->bw_ctx.dml;
struct mcif_arb_params *wb_arb_params;
- int i, j, k, dwb_pipe;
+ int i, j, dwb_pipe;
/* Writeback MCIF_WB arbitration parameters */
dwb_pipe = 0;
@@ -1622,17 +1411,15 @@ void dcn30_set_mcif_arb_params(
else
wbif_mode = PACKED_444;
- for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
- wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(dml, pipes, pipe_cnt) * 1000;
- wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
- }
+ DC_FP_START();
+ dcn30_fpu_set_mcif_arb_params(wb_arb_params, dml, pipes, pipe_cnt, j);
+ DC_FP_END();
wb_arb_params->time_per_pixel = (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* time_per_pixel should be in u6.6 format */
wb_arb_params->slice_lines = 32;
wb_arb_params->arbitration_slice = 2; /* irrelevant since there is no YUV output */
wb_arb_params->max_scaled_time = dcn30_calc_max_scaled_time(wb_arb_params->time_per_pixel,
wbif_mode,
wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */
- wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[j] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */
dwb_pipe++;
@@ -2111,178 +1898,11 @@ validate_out:
return out;
}
-/*
- * This must be noinline to ensure anything that deals with FP registers
- * is contained within this call; previously our compiling with hard-float
- * would result in fp instructions being emitted outside of the boundaries
- * of the DC_FP_START/END macros, which makes sense as the compiler has no
- * idea about what is wrapped and what is not
- *
- * This is largely just a workaround to avoid breakage introduced with 5.6,
- * ideally all fp-using code should be moved into its own file, only that
- * should be compiled with hard-float, and all code exported from there
- * should be strictly wrapped with DC_FP_START/END
- */
-static noinline void dcn30_calculate_wm_and_dlg_fp(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel)
+void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
{
- int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
- int i, pipe_idx;
- double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
- bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
-
- if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
- dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-
- /* Set B:
- * DCFCLK: 1GHz or min required above 1GHz
- * FCLK/UCLK: Max
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
- if (vlevel == 0) {
- pipes[0].clks_cfg.voltage = 1;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
- }
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-
- /* Set D:
- * DCFCLK: Min Required
- * FCLK(proportional to UCLK): 1GHz or Max
- * MALL stutter, sr_enter_exit = 4, sr_exit = 2us
- */
- /*
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- */
-
- /* Set C:
- * DCFCLK: Min Required
- * FCLK(proportional to UCLK): 1GHz or Max
- * pstate latency overridden to 5us
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
- unsigned int min_dram_speed_mts_margin = 160;
-
- if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
- min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
-
- /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
- for (i = 3; i > 0; i--)
- if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
- break;
-
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
- }
-
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- if (!pstate_en) {
- /* The only difference between A and C is p-state latency, if p-state is not supported we want to
- * calculate DLG based on dummy p-state latency, and max out the set A p-state watermark
- */
- context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
- } else {
- /* Set A:
- * DCFCLK: Min Required
- * FCLK(proportional to UCLK): 1GHz or Max
- *
- * Set A calculated last so that following calculations are based on Set A
- */
- dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
- context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- }
-
- context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
-
- /* Make set D = set A until set D is enabled */
- context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
- pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
- if (dc->config.forced_clocks) {
- pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
- pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
- if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
- if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
- pipe_idx++;
- }
-
DC_FP_START();
- dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ dcn30_fpu_update_soc_for_wm_a(dc, context);
DC_FP_END();
-
- if (!pstate_en)
- /* Restore full p-state latency */
- context->bw_ctx.dml.soc.dram_clock_change_latency_us =
- dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
-}
-
-void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
-{
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
- }
}
void dcn30_calculate_wm_and_dlg(
@@ -2292,7 +1912,7 @@ void dcn30_calculate_wm_and_dlg(
int vlevel)
{
DC_FP_START();
- dcn30_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel);
+ dcn30_fpu_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
DC_FP_END();
}
@@ -2351,40 +1971,6 @@ validate_out:
return out;
}
-/*
- * This must be noinline to ensure anything that deals with FP registers
- * is contained within this call; previously our compiling with hard-float
- * would result in fp instructions being emitted outside of the boundaries
- * of the DC_FP_START/END macros, which makes sense as the compiler has no
- * idea about what is wrapped and what is not
- *
- * This is largely just a workaround to avoid breakage introduced with 5.6,
- * ideally all fp-using code should be moved into its own file, only that
- * should be compiled with hard-float, and all code exported from there
- * should be strictly wrapped with DC_FP_START/END
- */
-static noinline void dcn30_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
- unsigned int *optimal_dcfclk,
- unsigned int *optimal_fclk)
-{
- double bw_from_dram, bw_from_dram1, bw_from_dram2;
-
- bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
- dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
- bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
- dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
-
- bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
-
- if (optimal_fclk)
- *optimal_fclk = bw_from_dram /
- (dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
-
- if (optimal_dcfclk)
- *optimal_dcfclk = bw_from_dram /
- (dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
-}
-
void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
unsigned int i, j;
@@ -2399,47 +1985,43 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
unsigned int num_dcfclk_sta_targets = 4;
unsigned int num_uclk_states;
+ struct dc_bounding_box_max_clk dcn30_bb_max_clk;
+
+ memset(&dcn30_bb_max_clk, 0, sizeof(dcn30_bb_max_clk));
+
if (dc->ctx->dc_bios->vram_info.num_chans)
dcn3_0_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
- if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
- dcn3_0_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
-
- dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ DC_FP_START();
+ dcn30_fpu_update_dram_channel_width_bytes(dc);
+ DC_FP_END();
if (bw_params->clk_table.entries[0].memclk_mhz) {
- int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
- if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
- max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
- if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
- if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
- if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
- max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > dcn30_bb_max_clk.max_dcfclk_mhz)
+ dcn30_bb_max_clk.max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > dcn30_bb_max_clk.max_dispclk_mhz)
+ dcn30_bb_max_clk.max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > dcn30_bb_max_clk.max_dppclk_mhz)
+ dcn30_bb_max_clk.max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > dcn30_bb_max_clk.max_phyclk_mhz)
+ dcn30_bb_max_clk.max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
}
- if (!max_dcfclk_mhz)
- max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
- if (!max_dispclk_mhz)
- max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
- if (!max_dppclk_mhz)
- max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
- if (!max_phyclk_mhz)
- max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
+ DC_FP_START();
+ dcn30_fpu_update_max_clk(&dcn30_bb_max_clk);
+ DC_FP_END();
- if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ if (dcn30_bb_max_clk.max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
- dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
+ dcfclk_sta_targets[num_dcfclk_sta_targets] = dcn30_bb_max_clk.max_dcfclk_mhz;
num_dcfclk_sta_targets++;
- } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ } else if (dcn30_bb_max_clk.max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
// If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
for (i = 0; i < num_dcfclk_sta_targets; i++) {
- if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
- dcfclk_sta_targets[i] = max_dcfclk_mhz;
+ if (dcfclk_sta_targets[i] > dcn30_bb_max_clk.max_dcfclk_mhz) {
+ dcfclk_sta_targets[i] = dcn30_bb_max_clk.max_dcfclk_mhz;
break;
}
}
@@ -2452,7 +2034,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
// Calculate optimal dcfclk for each uclk
for (i = 0; i < num_uclk_states; i++) {
DC_FP_START();
- dcn30_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
+ dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
&optimal_dcfclk_for_uclk[i], NULL);
DC_FP_END();
if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
@@ -2479,7 +2061,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
- if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+ if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
} else {
@@ -2494,33 +2076,15 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
}
while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
- optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+ optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
}
dcn3_0_soc.num_states = num_states;
- for (i = 0; i < dcn3_0_soc.num_states; i++) {
- dcn3_0_soc.clock_limits[i].state = i;
- dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
- dcn3_0_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
- dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
-
- /* Fill all states with max values of all other clocks */
- dcn3_0_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
- dcn3_0_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
- dcn3_0_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
- dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
- /* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
- /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
- dcn3_0_soc.clock_limits[i].phyclk_d18_mhz = dcn3_0_soc.clock_limits[0].phyclk_d18_mhz;
- dcn3_0_soc.clock_limits[i].socclk_mhz = dcn3_0_soc.clock_limits[0].socclk_mhz;
- dcn3_0_soc.clock_limits[i].dscclk_mhz = dcn3_0_soc.clock_limits[0].dscclk_mhz;
- }
- /* re-init DML with updated bb */
- dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
- if (dc->current_state)
- dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+ DC_FP_START();
+ dcn30_fpu_update_bw_bounding_box(dc, bw_params, &dcn30_bb_max_clk, dcfclk_mhz, dram_speed_mts);
+ DC_FP_END();
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
index b92e4cc0232f..3330a1026fa5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
@@ -35,6 +35,9 @@ struct dc;
struct resource_pool;
struct _vcs_dpi_display_pipe_params_st;
+extern struct _vcs_dpi_ip_params_st dcn3_0_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc;
+
struct dcn30_resource_pool {
struct resource_pool base;
};
@@ -96,4 +99,6 @@ enum dc_status dcn30_add_stream_to_ctx(
void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+void dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context);
+
#endif /* _DCN30_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 4daf8931aa7c..a5df74110284 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -81,6 +81,8 @@
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dml/dcn30/display_mode_vba_30.h"
#include "dml/dcn301/dcn301_fpu.h"
#include "vm_helper.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index f0938653bb88..f537888f4fa6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -43,6 +43,8 @@
#include "dcn20/dcn20_dsc.h"
#include "dcn20/dcn20_resource.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dcn10/dcn10_resource.h"
#include "dce/dce_abm.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 4fcbc0502808..76f863eb86ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -25,6 +25,8 @@
#include "dcn20/dcn20_dsc.h"
#include "dcn20/dcn20_resource.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dcn10/dcn10_resource.h"
#include "dc_link_ddc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index ccf1b71a8269..3d9f07d4770b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -36,6 +36,8 @@
#include "dcn20/dcn20_resource.h"
#include "dcn30/dcn30_resource.h"
+#include "dml/dcn30/dcn30_fpu.h"
+
#include "dcn10/dcn10_ipp.h"
#include "dcn30/dcn30_hubbub.h"
#include "dcn31/dcn31_hubbub.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index ee911452c048..a64b88ca01a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -71,6 +71,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
@@ -113,7 +114,7 @@ DML += dcn20/dcn20_fpu.o
DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
-DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
+DML += dcn30/dcn30_fpu.o dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
DML += dcn31/dcn31_fpu.o
DML += dcn301/dcn301_fpu.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
new file mode 100644
index 000000000000..574676a0711a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -0,0 +1,617 @@
+/*
+ * Copyright 2020-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "resource.h"
+#include "clk_mgr.h"
+#include "reg_helper.h"
+#include "dcn_calc_math.h"
+#include "dcn20/dcn20_resource.h"
+#include "dcn30/dcn30_resource.h"
+
+
+#include "display_mode_vba_30.h"
+#include "dcn30_fpu.h"
+
+#define REG(reg)\
+ optc1->tg_regs->reg
+
+#define CTX \
+ optc1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ optc1->tg_shift->field_name, optc1->tg_mask->field_name
+
+
+struct _vcs_dpi_ip_params_st dcn3_0_ip = {
+ .use_min_dcfclk = 0,
+ .clamp_min_dcfclk = 0,
+ .odm_capable = 1,
+ .gpuvm_enable = 0,
+ .hostvm_enable = 0,
+ .gpuvm_max_page_table_levels = 4,
+ .hostvm_max_page_table_levels = 4,
+ .hostvm_cached_page_table_levels = 0,
+ .pte_group_size_bytes = 2048,
+ .num_dsc = 6,
+ .rob_buffer_size_kbytes = 184,
+ .det_buffer_size_kbytes = 184,
+ .dpte_buffer_size_in_pte_reqs_luma = 84,
+ .pde_proc_buffer_size_64k_reqs = 48,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .pte_enable = 1,
+ .max_page_table_levels = 2,
+ .pte_chunk_size_kbytes = 2, // ?
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 8,
+ .line_buffer_size_bits = 789504,
+ .is_line_buffer_bpp_fixed = 0, // ?
+ .line_buffer_fixed_bpp = 0, // ?
+ .dcc_supported = true,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .writeback_line_buffer_buffer_size = 0,
+ .max_line_buffer_lines = 12,
+ .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .writeback_chroma_line_buffer_width_pixels = 4,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .writeback_line_buffer_luma_buffer_size = 0,
+ .writeback_line_buffer_chroma_buffer_size = 14643,
+ .cursor_buffer_size = 8,
+ .cursor_chunk_size = 2,
+ .max_num_otg = 6,
+ .max_num_dpp = 6,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.11,
+ .min_vblank_lines = 32,
+ .dppclk_delay_subtotal = 46,
+ .dynamic_metadata_vm_enabled = true,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dcfclk_cstate_latency = 5.2, // SRExitTime
+ .max_inter_dcn_tile_repeaters = 8,
+ .max_num_hdmi_frl_outputs = 1,
+ .odm_combine_4to1_supported = true,
+
+ .xfc_supported = false,
+ .xfc_fill_bw_overhead_percent = 10.0,
+ .xfc_fill_constant_bytes = 0,
+ .gfx7_compat_tiling_supported = 0,
+ .number_of_cursors = 1,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 562.0,
+ .dppclk_mhz = 300.0,
+ .phyclk_mhz = 300.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 405.6,
+ },
+ },
+
+ .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
+ .num_states = 1,
+ .sr_exit_time_us = 15.5,
+ .sr_enter_plus_exit_time_us = 20,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 40.0,
+ .writeback_latency_us = 12.0,
+ .max_request_size_bytes = 256,
+ .fabric_datapath_to_dcn_data_return_bytes = 64,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.38,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 191,
+ .urgent_out_of_order_return_per_channel_bytes = 4096,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 404,
+ .dummy_pstate_latency_us = 5,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3650,
+ .xfc_bus_transport_time_us = 20, // ?
+ .xfc_xbuf_latency_tolerance_us = 4, // ?
+ .use_urgent_burst_bw = 1, // ?
+ .do_urgent_latency_adjustment = true,
+ .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
+};
+
+
+void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
+ double vtotal_avg)
+{
+struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ double vtotal_min, vtotal_max;
+ double ratio, modulo, phase;
+ uint32_t vblank_start;
+ uint32_t v_total_mask_value = 0;
+
+ dc_assert_fp_enabled();
+
+ /* Compute VTOTAL_MIN and VTOTAL_MAX, so that
+ * VOTAL_MAX - VTOTAL_MIN = 1
+ */
+ v_total_mask_value = 16;
+ vtotal_min = dcn_bw_floor(vtotal_avg);
+ vtotal_max = dcn_bw_ceil(vtotal_avg);
+
+ /* Check that bottom VBLANK is at least 2 lines tall when running with
+ * VTOTAL_MIN. Note that VTOTAL registers are defined as 'total number
+ * of lines in a frame - 1'.
+ */
+ REG_GET(OTG_V_BLANK_START_END, OTG_V_BLANK_START,
+ &vblank_start);
+ ASSERT(vtotal_min >= vblank_start + 1);
+
+ /* Special case where the average frame rate can be achieved
+ * without using the DTO
+ */
+ if (vtotal_min == vtotal_max) {
+ REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
+
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
+ REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
+ REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
+ return;
+ }
+
+ ratio = vtotal_max - vtotal_avg;
+ modulo = 65536.0 * 65536.0 - 1.0; /* 2^32 - 1 */
+ phase = ratio * modulo;
+
+ /* Special cases where the DTO phase gets rounded to 0 or
+ * to DTO modulo
+ */
+ if (phase <= 0 || phase >= modulo) {
+ REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL,
+ phase <= 0 ?
+ (uint32_t)vtotal_max : (uint32_t)vtotal_min);
+ REG_SET(OTG_V_TOTAL_MIN, 0, OTG_V_TOTAL_MIN, 0);
+ REG_SET(OTG_V_TOTAL_MAX, 0, OTG_V_TOTAL_MAX, 0);
+ REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
+ REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
+ REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
+ return;
+ }
+ REG_UPDATE_6(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 1,
+ OTG_SET_V_TOTAL_MIN_MASK, v_total_mask_value,
+ OTG_VTOTAL_MID_REPLACING_MIN_EN, 0,
+ OTG_VTOTAL_MID_REPLACING_MAX_EN, 0);
+ REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
+ optc->funcs->set_vtotal_min_max(optc, vtotal_min, vtotal_max);
+ REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, (uint32_t)phase);
+ REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, (uint32_t)modulo);
+}
+
+void dcn30_fpu_populate_dml_writeback_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
+{
+ int pipe_cnt, i, j;
+ double max_calc_writeback_dispclk;
+ double writeback_dispclk;
+ struct writeback_st dout_wb;
+
+ dc_assert_fp_enabled();
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *stream = res_ctx->pipe_ctx[i].stream;
+
+ if (!stream)
+ continue;
+ max_calc_writeback_dispclk = 0;
+
+ /* Set writeback information */
+ pipes[pipe_cnt].dout.wb_enable = 0;
+ pipes[pipe_cnt].dout.num_active_wb = 0;
+ for (j = 0; j < stream->num_wb_info; j++) {
+ struct dc_writeback_info *wb_info = &stream->writeback_info[j];
+
+ if (wb_info->wb_enabled && wb_info->writeback_source_plane &&
+ (wb_info->writeback_source_plane == res_ctx->pipe_ctx[i].plane_state)) {
+ pipes[pipe_cnt].dout.wb_enable = 1;
+ pipes[pipe_cnt].dout.num_active_wb++;
+ dout_wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_height :
+ wb_info->dwb_params.cnv_params.src_height;
+ dout_wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_width :
+ wb_info->dwb_params.cnv_params.src_width;
+ dout_wb.wb_dst_width = wb_info->dwb_params.dest_width;
+ dout_wb.wb_dst_height = wb_info->dwb_params.dest_height;
+
+ /* For IP that doesn't support WB scaling, set h/v taps to 1 to avoid DML validation failure */
+ if (dc->dml.ip.writeback_max_hscl_taps > 1) {
+ dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps;
+ dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps;
+ } else {
+ dout_wb.wb_htaps_luma = 1;
+ dout_wb.wb_vtaps_luma = 1;
+ }
+ dout_wb.wb_htaps_chroma = 0;
+ dout_wb.wb_vtaps_chroma = 0;
+ dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_width /
+ (double)wb_info->dwb_params.dest_width :
+ (double)wb_info->dwb_params.cnv_params.src_width /
+ (double)wb_info->dwb_params.dest_width;
+ dout_wb.wb_vratio = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_height /
+ (double)wb_info->dwb_params.dest_height :
+ (double)wb_info->dwb_params.cnv_params.src_height /
+ (double)wb_info->dwb_params.dest_height;
+ if (wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB ||
+ wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA)
+ dout_wb.wb_pixel_format = dm_444_64;
+ else
+ dout_wb.wb_pixel_format = dm_444_32;
+
+ /* Workaround for cases where multiple writebacks are connected to same plane
+ * In which case, need to compute worst case and set the associated writeback parameters
+ * This workaround is necessary due to DML computation assuming only 1 set of writeback
+ * parameters per pipe
+ */
+ writeback_dispclk = dml30_CalculateWriteBackDISPCLK(
+ dout_wb.wb_pixel_format,
+ pipes[pipe_cnt].pipe.dest.pixel_rate_mhz,
+ dout_wb.wb_hratio,
+ dout_wb.wb_vratio,
+ dout_wb.wb_htaps_luma,
+ dout_wb.wb_vtaps_luma,
+ dout_wb.wb_src_width,
+ dout_wb.wb_dst_width,
+ pipes[pipe_cnt].pipe.dest.htotal,
+ dc->current_state->bw_ctx.dml.ip.writeback_line_buffer_buffer_size);
+
+ if (writeback_dispclk > max_calc_writeback_dispclk) {
+ max_calc_writeback_dispclk = writeback_dispclk;
+ pipes[pipe_cnt].dout.wb = dout_wb;
+ }
+ }
+ }
+
+ pipe_cnt++;
+ }
+}
+
+void dcn30_fpu_set_mcif_arb_params(struct mcif_arb_params *wb_arb_params,
+ struct display_mode_lib *dml,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int cur_pipe)
+{
+ int i;
+
+ dc_assert_fp_enabled();
+
+ for (i = 0; i < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); i++) {
+ wb_arb_params->cli_watermark[i] = get_wm_writeback_urgent(dml, pipes, pipe_cnt) * 1000;
+ wb_arb_params->pstate_watermark[i] = get_wm_writeback_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
+ }
+
+ wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[cur_pipe] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */
+}
+
+void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+{
+
+dc_assert_fp_enabled();
+
+if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+ }
+}
+
+void dcn30_fpu_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
+ int i, pipe_idx;
+ double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
+ bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
+
+dc_assert_fp_enabled();
+
+ if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
+ dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+
+ /* Set B:
+ * DCFCLK: 1GHz or min required above 1GHz
+ * FCLK/UCLK: Max
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
+ if (vlevel == 0) {
+ pipes[0].clks_cfg.voltage = 1;
+ pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
+ }
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+
+ /* Set D:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ * MALL stutter, sr_enter_exit = 4, sr_exit = 2us
+ */
+ /*
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ */
+
+ /* Set C:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ * pstate latency overridden to 5us
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+ unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
+ unsigned int min_dram_speed_mts_margin = 160;
+
+ if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
+ min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
+
+ /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
+ for (i = 3; i > 0; i--)
+ if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
+ break;
+
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
+ }
+
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ if (!pstate_en) {
+ /* The only difference between A and C is p-state latency, if p-state is not supported we want to
+ * calculate DLG based on dummy p-state latency, and max out the set A p-state watermark
+ */
+ context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
+ } else {
+ /* Set A:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ *
+ * Set A calculated last so that following calculations are based on Set A
+ */
+ dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ }
+
+ context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
+
+ /* Make set D = set A until set D is enabled */
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ if (dc->config.forced_clocks) {
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+ }
+ if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+
+ pipe_idx++;
+ }
+
+ DC_FP_START();
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ DC_FP_END();
+
+ if (!pstate_en)
+ /* Restore full p-state latency */
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+
+}
+
+void dcn30_fpu_update_dram_channel_width_bytes(struct dc *dc)
+{
+ dc_assert_fp_enabled();
+
+ if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+ dcn3_0_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+}
+
+void dcn30_fpu_update_max_clk(struct dc_bounding_box_max_clk *dcn30_bb_max_clk)
+{
+ dc_assert_fp_enabled();
+
+ if (!dcn30_bb_max_clk->max_dcfclk_mhz)
+ dcn30_bb_max_clk->max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
+ if (!dcn30_bb_max_clk->max_dispclk_mhz)
+ dcn30_bb_max_clk->max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
+ if (!dcn30_bb_max_clk->max_dppclk_mhz)
+ dcn30_bb_max_clk->max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
+ if (!dcn30_bb_max_clk->max_phyclk_mhz)
+ dcn30_bb_max_clk->max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
+}
+
+void dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+ unsigned int *optimal_dcfclk,
+ unsigned int *optimal_fclk)
+{
+ double bw_from_dram, bw_from_dram1, bw_from_dram2;
+
+ dc_assert_fp_enabled();
+
+ bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
+ dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
+ bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
+ dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
+
+ bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
+
+ if (optimal_fclk)
+ *optimal_fclk = bw_from_dram /
+ (dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
+
+ if (optimal_dcfclk)
+ *optimal_dcfclk = bw_from_dram /
+ (dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
+}
+
+void dcn30_fpu_update_bw_bounding_box(struct dc *dc,
+ struct clk_bw_params *bw_params,
+ struct dc_bounding_box_max_clk *dcn30_bb_max_clk,
+ unsigned int *dcfclk_mhz,
+ unsigned int *dram_speed_mts)
+{
+ unsigned int i;
+
+ dc_assert_fp_enabled();
+
+ dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+ for (i = 0; i < dcn3_0_soc.num_states; i++) {
+ dcn3_0_soc.clock_limits[i].state = i;
+ dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
+ dcn3_0_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
+ dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
+
+ /* Fill all states with max values of all other clocks */
+ dcn3_0_soc.clock_limits[i].dispclk_mhz = dcn30_bb_max_clk->max_dispclk_mhz;
+ dcn3_0_soc.clock_limits[i].dppclk_mhz = dcn30_bb_max_clk->max_dppclk_mhz;
+ dcn3_0_soc.clock_limits[i].phyclk_mhz = dcn30_bb_max_clk->max_phyclk_mhz;
+ dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
+ /* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
+ /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+ dcn3_0_soc.clock_limits[i].phyclk_d18_mhz = dcn3_0_soc.clock_limits[0].phyclk_d18_mhz;
+ dcn3_0_soc.clock_limits[i].socclk_mhz = dcn3_0_soc.clock_limits[0].socclk_mhz;
+ dcn3_0_soc.clock_limits[i].dscclk_mhz = dcn3_0_soc.clock_limits[0].dscclk_mhz;
+ }
+ /* re-init DML with updated bb */
+ dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+ if (dc->current_state)
+ dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
new file mode 100644
index 000000000000..dedfe7b5f173
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2020-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN30_FPU_H__
+#define __DCN30_FPU_H__
+
+#include "core_types.h"
+#include "dcn20/dcn20_optc.h"
+
+void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
+ double vtotal_avg);
+
+void dcn30_fpu_populate_dml_writeback_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+
+void dcn30_fpu_set_mcif_arb_params(struct mcif_arb_params *wb_arb_params,
+ struct display_mode_lib *dml,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int cur_pipe);
+
+void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
+
+void dcn30_fpu_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
+
+void dcn30_fpu_update_dram_channel_width_bytes(struct dc *dc);
+
+void dcn30_fpu_update_max_clk(struct dc_bounding_box_max_clk *dcn30_bb_max_clk);
+
+void dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+ unsigned int *optimal_dcfclk,
+ unsigned int *optimal_fclk);
+
+void dcn30_fpu_update_bw_bounding_box(struct dc *dc,
+ struct clk_bw_params *bw_params,
+ struct dc_bounding_box_max_clk *dcn30_bb_max_clk,
+ unsigned int *dcfclk_mhz,
+ unsigned int *dram_speed_mts);
+
+
+#endif /* __DCN30_FPU_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 26f3a55c35d7..555d4d9e1454 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -486,4 +486,11 @@ struct dc_state {
} perf_params;
};
+struct dc_bounding_box_max_clk {
+ int max_dcfclk_mhz;
+ int max_dispclk_mhz;
+ int max_dppclk_mhz;
+ int max_phyclk_mhz;
+};
+
#endif /* _CORE_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 851b98299063..a3c1e9c56d8b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -217,8 +217,7 @@ void disable_dp_hpo_output(struct dc_link *link,
void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable);
bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);
-void dp_retrieve_lttpr_cap(struct dc_link *link);
-bool dp_apply_lttpr_mode(struct dc_link *link);
+bool dp_retrieve_lttpr_cap(struct dc_link *link);
void edp_panel_backlight_power_on(struct dc_link *link);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
index 74dafd0f9d3d..39c1d1d07357 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
@@ -87,6 +87,11 @@ union dpia_set_config_data {
*/
enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
+/* Query hot plug status of USB4 DP tunnel.
+ * Returns true if HPD high.
+ */
+bool dc_link_dpia_query_hpd_status(struct dc_link *link);
+
/* Train DP tunneling link for USB4 DPIA display endpoint.
* DPIA equivalent of dc_link_dp_perfrorm_link_training.
* Aborts link training upon detection of sink unplug.