aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorAlvin Lee <alvin.lee2@amd.com>2023-08-10 11:50:52 -0400
committerAlex Deucher <alexander.deucher@amd.com>2023-08-30 14:59:14 -0400
commit0b9dc439f4046ef9e43f54989f6c3ff6cddc6d1b (patch)
tree0097a2d4ffa00fc469b78031cb49856593ac84c3
parentdrm/amd/display: set minimum of VBlank_nom (diff)
downloadwireguard-linux-0b9dc439f4046ef9e43f54989f6c3ff6cddc6d1b.tar.xz
wireguard-linux-0b9dc439f4046ef9e43f54989f6c3ff6cddc6d1b.zip
drm/amd/display: Write flip addr to scratch reg for subvp
[Description] SubVP needs to "calculate" the earliest in use META address by using the current primary / meta addresses, but this leads to a race condition where FW and driver can read/write the address at the same time and intermittently produce inconsistent address offsets. To mitigate this issue without locking (too slow), save each surface flip addr into scratch registers and use this to keep track of the earliest in use META addres. Reviewed-by: Jun Lei <jun.lei@amd.com> Acked-by: Wayne Lin <wayne.lin@amd.com> Signed-off-by: Alvin Lee <alvin.lee2@amd.com> Tested-by: Daniel Wheeler <daniel.wheeler@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h10
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h17
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c30
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h7
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c10
11 files changed, 134 insertions, 1 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index f99ec1b0efaf..a50df7126e39 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -29,6 +29,8 @@
#include "hw_sequencer.h"
#include "hw_sequencer_private.h"
#include "basics/dc_common.h"
+#include "resource.h"
+#include "dc_dmub_srv.h"
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
@@ -530,6 +532,15 @@ void hwss_build_fast_sequence(struct dc *dc,
block_sequence[*num_steps].params.update_plane_addr_params.pipe_ctx = current_mpc_pipe;
block_sequence[*num_steps].func = HUBP_UPDATE_PLANE_ADDR;
(*num_steps)++;
+
+ if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) &&
+ current_mpc_pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
+ block_sequence[*num_steps].params.subvp_save_surf_addr.addr = &current_mpc_pipe->plane_state->address;
+ block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index;
+ block_sequence[*num_steps].func = DMUB_SUBVP_SAVE_SURF_ADDR;
+ (*num_steps)++;
+ }
}
if (hws->funcs.set_input_transfer_func && current_mpc_pipe->plane_state->update_flags.bits.gamma_change) {
@@ -697,6 +708,9 @@ void hwss_execute_sequence(struct dc *dc,
case DMUB_SEND_DMCUB_CMD:
hwss_send_dmcub_cmd(params);
break;
+ case DMUB_SUBVP_SAVE_SURF_ADDR:
+ hwss_subvp_save_surf_addr(params);
+ break;
default:
ASSERT(false);
break;
@@ -789,6 +803,15 @@ void hwss_set_ocsc_default(union block_sequence_params *params)
ocsc_mode);
}
+void hwss_subvp_save_surf_addr(union block_sequence_params *params)
+{
+ struct dc_dmub_srv *dc_dmub_srv = params->subvp_save_surf_addr.dc_dmub_srv;
+ const struct dc_plane_address *addr = params->subvp_save_surf_addr.addr;
+ uint8_t subvp_index = params->subvp_save_surf_addr.subvp_index;
+
+ dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index);
+}
+
void get_mclk_switch_visual_confirm_color(
struct dc *dc,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 4c5ef3ef8dbd..f32b5c71a66b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -1054,4 +1054,9 @@ void dc_dmub_srv_enable_dpia_trace(const struct dc *dc)
}
DC_LOG_DEBUG("Enabled DPIA trace\n");
+}
+
+void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index)
+{
+ dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index);
} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index bb3fe162dd93..2ebd4717f4a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -89,5 +89,6 @@ void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx);
bool dc_dmub_check_min_version(struct dmub_srv *srv);
void dc_dmub_srv_enable_dpia_trace(const struct dc *dc);
+void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 65fa9e21ad9c..d3caba52d2fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1679,8 +1679,18 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->update_flags.bits.plane_changed ||
- plane_state->update_flags.bits.addr_update)
+ plane_state->update_flags.bits.addr_update) {
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) &&
+ pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
+ union block_sequence_params params;
+
+ params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
+ params.subvp_save_surf_addr.addr = &pipe_ctx->plane_state->address;
+ params.subvp_save_surf_addr.subvp_index = pipe_ctx->subvp_index;
+ hwss_subvp_save_surf_addr(&params);
+ }
hws->funcs.update_plane_addr(dc, pipe_ctx);
+ }
if (pipe_ctx->update_flags.bits.enable)
hubp->funcs->set_blank(hubp, false);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 711d4085b33b..b3252db43ecb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1144,6 +1144,23 @@ static bool subvp_validate_static_schedulability(struct dc *dc,
return schedulable;
}
+static void assign_subvp_index(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ int index = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) &&
+ pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
+ pipe_ctx->subvp_index = index++;
+ } else {
+ pipe_ctx->subvp_index = 0;
+ }
+ }
+}
+
static void dcn32_full_validate_bw_helper(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -1294,6 +1311,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
vba->VoltageLevel = *vlevel;
// Note: We can't apply the phantom pipes to hardware at this time. We have to wait
// until driver has acquired the DMCUB lock to do it safely.
+ assign_subvp_index(dc, context);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 027aec70c070..7f9c75ffda18 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -408,6 +408,8 @@ struct pipe_ctx {
union pipe_update_flags update_flags;
struct tg_color visual_confirm_color;
bool has_vactive_margin;
+ /* subvp_index: only valid if the pipe is a SUBVP_MAIN*/
+ uint8_t subvp_index;
};
/* Data used for dynamic link encoder assignment.
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 02ff99f7bec2..ca064e70c22f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -134,6 +134,12 @@ struct set_ocsc_default_params {
enum mpc_output_csc_mode ocsc_mode;
};
+struct subvp_save_surf_addr {
+ struct dc_dmub_srv *dc_dmub_srv;
+ const struct dc_plane_address *addr;
+ uint8_t subvp_index;
+};
+
union block_sequence_params {
struct update_plane_addr_params update_plane_addr_params;
struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params;
@@ -151,6 +157,7 @@ union block_sequence_params {
struct power_on_mpc_mem_pwr_params power_on_mpc_mem_pwr_params;
struct set_output_csc_params set_output_csc_params;
struct set_ocsc_default_params set_ocsc_default_params;
+ struct subvp_save_surf_addr subvp_save_surf_addr;
};
enum block_sequence_func {
@@ -170,6 +177,7 @@ enum block_sequence_func {
MPC_POWER_ON_MPC_MEM_PWR,
MPC_SET_OUTPUT_CSC,
MPC_SET_OCSC_DEFAULT,
+ DMUB_SUBVP_SAVE_SURF_ADDR,
};
struct block_sequence {
@@ -471,4 +479,6 @@ void hwss_set_output_csc(union block_sequence_params *params);
void hwss_set_ocsc_default(union block_sequence_params *params);
+void hwss_subvp_save_surf_addr(union block_sequence_params *params);
+
#endif /* __DC_HW_SEQUENCER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 43676c1c81d9..e7a50cbf2540 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -401,6 +401,8 @@ struct dmub_srv_hw_funcs {
bool (*should_detect)(struct dmub_srv *dmub);
void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx);
+
+ void (*subvp_save_surf_addr)(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
};
/**
@@ -835,6 +837,21 @@ enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t ti
*/
enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub);
+/**
+ * dmub_srv_subvp_save_surf_addr() - Save primary and meta address for subvp on each flip
+ * @dmub: The dmub service
+ * @addr: The surface address to be programmed on the current flip
+ * @subvp_index: Index of subvp pipe, indicates which subvp pipe the address should be saved for
+ *
+ * Function to save the surface flip addr into scratch registers. This is to fix a race condition
+ * between FW and driver reading / writing to the surface address at the same time. This is
+ * required because there is no EARLIEST_IN_USE_META.
+ *
+ * Return:
+ * void
+ */
+void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index 8f427047ac40..2daa1e0c8061 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -27,6 +27,7 @@
#include "dmub_reg.h"
#include "dmub_dcn32.h"
#include "dc/dc_types.h"
+#include "dc_hw_types.h"
#include "dcn/dcn_3_2_0_offset.h"
#include "dcn/dcn_3_2_0_sh_mask.h"
@@ -506,3 +507,32 @@ uint32_t dmub_dcn32_read_inbox0_ack_register(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_SCRATCH17);
}
+
+void dmub_dcn32_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index)
+{
+ uint32_t index = 0;
+
+ if (subvp_index == 0) {
+ index = REG_READ(DMCUB_SCRATCH15);
+ if (index) {
+ REG_WRITE(DMCUB_SCRATCH9, addr->grph.addr.low_part);
+ REG_WRITE(DMCUB_SCRATCH11, addr->grph.meta_addr.low_part);
+ } else {
+ REG_WRITE(DMCUB_SCRATCH12, addr->grph.addr.low_part);
+ REG_WRITE(DMCUB_SCRATCH13, addr->grph.meta_addr.low_part);
+ }
+ REG_WRITE(DMCUB_SCRATCH15, !index);
+ } else if (subvp_index == 1) {
+ index = REG_READ(DMCUB_SCRATCH23);
+ if (index) {
+ REG_WRITE(DMCUB_SCRATCH18, addr->grph.addr.low_part);
+ REG_WRITE(DMCUB_SCRATCH19, addr->grph.meta_addr.low_part);
+ } else {
+ REG_WRITE(DMCUB_SCRATCH20, addr->grph.addr.low_part);
+ REG_WRITE(DMCUB_SCRATCH22, addr->grph.meta_addr.low_part);
+ }
+ REG_WRITE(DMCUB_SCRATCH23, !index);
+ } else {
+ return;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
index 38e47065e00e..b0cd8d29402f 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
@@ -107,6 +107,12 @@ struct dmub_srv;
DMUB_SR(DMCUB_SCRATCH15) \
DMUB_SR(DMCUB_SCRATCH16) \
DMUB_SR(DMCUB_SCRATCH17) \
+ DMUB_SR(DMCUB_SCRATCH18) \
+ DMUB_SR(DMCUB_SCRATCH19) \
+ DMUB_SR(DMCUB_SCRATCH20) \
+ DMUB_SR(DMCUB_SCRATCH21) \
+ DMUB_SR(DMCUB_SCRATCH22) \
+ DMUB_SR(DMCUB_SCRATCH23) \
DMUB_SR(DMCUB_GPINT_DATAIN0) \
DMUB_SR(DMCUB_GPINT_DATAIN1) \
DMUB_SR(DMCUB_GPINT_DATAOUT) \
@@ -253,6 +259,7 @@ void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub);
void dmub_dcn32_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
void dmub_dcn32_clear_inbox0_ack_register(struct dmub_srv *dmub);
uint32_t dmub_dcn32_read_inbox0_ack_register(struct dmub_srv *dmub);
+void dmub_dcn32_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
void dmub_srv_dcn32_regs_init(struct dmub_srv *dmub, struct dc_context *ctx);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 9780c157196c..53464c3e49c1 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -278,6 +278,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->send_inbox0_cmd = dmub_dcn32_send_inbox0_cmd;
funcs->clear_inbox0_ack_register = dmub_dcn32_clear_inbox0_ack_register;
funcs->read_inbox0_ack_register = dmub_dcn32_read_inbox0_ack_register;
+ funcs->subvp_save_surf_addr = dmub_dcn32_save_surf_addr;
funcs->reset = dmub_dcn32_reset;
funcs->reset_release = dmub_dcn32_reset_release;
funcs->backdoor_load = dmub_dcn32_backdoor_load;
@@ -985,3 +986,12 @@ enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub,
dmub->hw_funcs.send_inbox0_cmd(dmub, data);
return DMUB_STATUS_OK;
}
+
+void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index)
+{
+ if (dmub->hw_funcs.subvp_save_surf_addr) {
+ dmub->hw_funcs.subvp_save_surf_addr(dmub,
+ addr,
+ subvp_index);
+ }
+}