aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display
diff options
context:
space:
mode:
authorNoah Abradjian <noah.abradjian@amd.com>2019-09-27 16:30:57 -0400
committerAlex Deucher <alexander.deucher@amd.com>2019-10-25 16:50:09 -0400
commit1ea8751bd28d1ec2b36a56ec6bc1ac28903d09b4 (patch)
tree317a09d3dd278b8630b38cc516a5b8d04c350bc4 /drivers/gpu/drm/amd/display
parentdrm/amd/display: Only use EETF when maxCL > max display (diff)
downloadlinux-dev-1ea8751bd28d1ec2b36a56ec6bc1ac28903d09b4.tar.xz
linux-dev-1ea8751bd28d1ec2b36a56ec6bc1ac28903d09b4.zip
drm/amd/display: Make clk mgr the only dto update point
[Why] * Clk Mgr DTO update point did not cover all needed updates, as it included a check for plane_state which does not exist yet when the updater is called on driver startup * This resulted in another update path in the pipe programming sequence, based on a dppclk update flag * However, this alternate path allowed for stray DTO updates, some of which would occur in the wrong order during dppclk lowering and cause underflow [How] * Remove plane_state check and use of plane_res.dpp->inst, getting rid of sequence dependencies (this results in extra dto programming for unused pipes but that doesn't cause issues and is a small cost) * Allow DTOs to be updated even if global clock is equal, to account for edge case exposed by diags tests * Remove update_dpp_dto call in pipe programming sequence (leave update to dppclk_control there, as that update is necessary and shouldn't occur in clk mgr) * Remove call to optimize_bandwidth when committing state, as it is not needed and resulted in sporadic underflows even with other fixes in place Signed-off-by: Noah Abradjian <noah.abradjian@amd.com> Reviewed-by: Jun Lei <Jun.Lei@amd.com> Acked-by: Leo Li <sunpeng.li@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/display')
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c8
4 files changed, 12 insertions, 17 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 607d8afc56ec..25d7b7c6681c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -108,11 +108,12 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
int dpp_inst, dppclk_khz;
- if (!context->res_ctx.pipe_ctx[i].plane_state)
- continue;
-
- dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
+ /* Loop index will match dpp->inst if resource exists,
+ * and we want to avoid dependency on dpp object
+ */
+ dpp_inst = i;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
clk_mgr->dccg->funcs->update_dpp_dto(
clk_mgr->dccg, dpp_inst, dppclk_khz);
}
@@ -235,6 +236,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
update_dispclk = true;
}
+
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
// if clock is being lowered, increase DTO before lowering refclk
@@ -244,10 +246,12 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
// if clock is being raised, increase refclk before lowering DTO
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr);
- if (update_dppclk)
+ // always update dtos unless clock is lowered and not safe to lower
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
}
}
+
if (update_dispclk &&
dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
/*update dmcu for wait_loop count*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index f64d221ad6f1..790a2d211bd6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -171,7 +171,8 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
// if clock is being raised, increase refclk before lowering DTO
if (update_dppclk || update_dispclk)
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
- if (update_dppclk)
+ // always update dtos unless clock is lowered and not safe to lower
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5e487bb82861..0a443348df10 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1241,10 +1241,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- if (!dc->optimize_seamless_boot)
- /* pplib is notified if disp_num changed */
- dc->hwss.optimize_bandwidth(dc, context);
-
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index e237ec39d193..921a36668ced 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1202,15 +1202,9 @@ static void dcn20_update_dchubp_dpp(
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
- if (pipe_ctx->update_flags.bits.dppclk) {
+ if (pipe_ctx->update_flags.bits.dppclk)
dpp->funcs->dpp_dppclk_control(dpp, false, true);
- dc->res_pool->dccg->funcs->update_dpp_dto(
- dc->res_pool->dccg,
- dpp->inst,
- pipe_ctx->plane_res.bw.dppclk_khz);
- }
-
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
* VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
* VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG