aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/dc/dcn20
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/dcn20')
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c158
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c316
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c579
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c133
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h182
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c202
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h1
26 files changed, 1514 insertions, 585 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index 10b47986526b..5fcaf78334ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -2,16 +2,20 @@
#
# Makefile for DCN.
-DCN20 = dcn20_resource.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
+DCN20 = dcn20_resource.o dcn20_init.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \
dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
-ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DCN20 += dcn20_dsc.o
-endif
+ifdef CONFIG_X86
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
+endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -19,6 +23,7 @@ IS_OLD_GCC = 1
endif
endif
+ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -27,6 +32,7 @@ CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4
else
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2
endif
+endif
AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 1e1151356e60..50bffbfdd394 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -50,20 +50,20 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
if (dccg->ref_dppclk && req_dppclk) {
int ref_dppclk = dccg->ref_dppclk;
+ int modulo, phase;
- ASSERT(req_dppclk <= ref_dppclk);
- /* need to clamp to 8 bits */
- if (ref_dppclk > 0xff) {
- int divider = (ref_dppclk + 0xfe) / 0xff;
+ // phase / modulo = dpp pipe clk / dpp global clk
+ modulo = 0xff; // use FF at the end
+ phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk;
- ref_dppclk /= divider;
- req_dppclk = (req_dppclk + divider - 1) / divider;
- if (req_dppclk > ref_dppclk)
- req_dppclk = ref_dppclk;
+ if (phase > 0xff) {
+ ASSERT(false);
+ phase = 0xff;
}
+
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, req_dppclk,
- DPPCLK0_DTO_MODULO, ref_dppclk);
+ DPPCLK0_DTO_PHASE, phase,
+ DPPCLK0_DTO_MODULO, modulo);
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 1);
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 4d7e45892f08..13e057d7ee93 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -104,7 +104,7 @@ static void dpp2_cnv_setup (
uint32_t pixel_format = 0;
uint32_t alpha_en = 1;
enum dc_color_space color_space = COLOR_SPACE_SRGB;
- enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS;
+ enum dcn20_input_csc_select select = DCN2_ICSC_SELECT_BYPASS;
bool force_disable_cursor = false;
struct out_csc_color_matrix tbl_entry;
uint32_t is_2bit = 0;
@@ -145,25 +145,25 @@ static void dpp2_cnv_setup (
force_disable_cursor = false;
pixel_format = 65;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
force_disable_cursor = true;
pixel_format = 64;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
force_disable_cursor = true;
pixel_format = 67;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
force_disable_cursor = true;
pixel_format = 66;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
pixel_format = 22;
@@ -177,7 +177,7 @@ static void dpp2_cnv_setup (
case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
pixel_format = 12;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
pixel_format = 112;
@@ -188,13 +188,13 @@ static void dpp2_cnv_setup (
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
pixel_format = 114;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
pixel_format = 115;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
@@ -227,13 +227,13 @@ static void dpp2_cnv_setup (
tbl_entry.color_space = input_color_space;
if (color_space >= COLOR_SPACE_YCBCR601)
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
else
- select = INPUT_CSC_SELECT_BYPASS;
+ select = DCN2_ICSC_SELECT_BYPASS;
- dpp1_program_input_csc(dpp_base, color_space, select, &tbl_entry);
+ dpp2_program_input_csc(dpp_base, color_space, select, &tbl_entry);
} else
- dpp1_program_input_csc(dpp_base, color_space, select, NULL);
+ dpp2_program_input_csc(dpp_base, color_space, select, NULL);
if (force_disable_cursor) {
REG_UPDATE(CURSOR_CONTROL,
@@ -458,7 +458,7 @@ static struct dpp_funcs dcn20_dpp_funcs = {
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
.dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
- .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
+ .dpp_set_gamut_remap = dpp2_cm_set_gamut_remap,
.dpp_set_csc_adjustment = NULL,
.dpp_set_csc_default = NULL,
.dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
index 5b03b737b1d6..27610251c57f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
@@ -150,6 +150,16 @@
SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \
SRI(CM_SHAPER_LUT_INDEX, CM, id)
+#define TF_REG_LIST_DCN20_COMMON_APPEND(id) \
+ SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\
+ SRI(CM_ICSC_B_C11_C12, CM, id), \
+ SRI(CM_ICSC_B_C33_C34, CM, id)
+
#define TF_REG_LIST_DCN20(id) \
TF_REG_LIST_DCN(id), \
TF_REG_LIST_DCN20_COMMON(id), \
@@ -572,10 +582,29 @@
TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\
TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh)
+/* DPP CM debug status register:
+ *
+ * Status index including current ICSC, Gamut Remap Mode is 9
+ * ICSC Mode: [4..3]
+ * Gamut Remap Mode: [10..9]
+ */
+#define CM_TEST_DEBUG_DATA_STATUS_IDX 9
+
+#define TF_DEBUG_REG_LIST_SH_DCN20 \
+ TF_DEBUG_REG_LIST_SH_DCN10, \
+ .CM_TEST_DEBUG_DATA_ICSC_MODE = 3, \
+ .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 9
+
+#define TF_DEBUG_REG_LIST_MASK_DCN20 \
+ TF_DEBUG_REG_LIST_MASK_DCN10, \
+ .CM_TEST_DEBUG_DATA_ICSC_MODE = 0x18, \
+ .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 0x600
#define TF_REG_FIELD_LIST_DCN2_0(type) \
TF_REG_FIELD_LIST(type) \
type CM_BLNDGAM_LUT_DATA; \
+ type CM_TEST_DEBUG_DATA_ICSC_MODE; \
+ type CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE; \
type FORMAT_CNV16; \
type CNVC_BYPASS_MSB_ALIGN; \
type CLAMP_POSITIVE; \
@@ -630,11 +659,22 @@ struct dcn2_dpp_mask {
uint32_t COLOR_KEYER_RED; \
uint32_t COLOR_KEYER_GREEN; \
uint32_t COLOR_KEYER_BLUE; \
- uint32_t OBUF_MEM_PWR_CTRL;\
+ uint32_t OBUF_MEM_PWR_CTRL; \
uint32_t DSCL_MEM_PWR_CTRL
+#define DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND \
+ uint32_t CM_GAMUT_REMAP_B_C11_C12; \
+ uint32_t CM_GAMUT_REMAP_B_C13_C14; \
+ uint32_t CM_GAMUT_REMAP_B_C21_C22; \
+ uint32_t CM_GAMUT_REMAP_B_C23_C24; \
+ uint32_t CM_GAMUT_REMAP_B_C31_C32; \
+ uint32_t CM_GAMUT_REMAP_B_C33_C34; \
+ uint32_t CM_ICSC_B_C11_C12; \
+ uint32_t CM_ICSC_B_C33_C34
+
struct dcn2_dpp_registers {
DPP_DCN2_REG_VARIABLE_LIST;
+ DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND;
};
struct dcn20_dpp {
@@ -656,6 +696,18 @@ struct dcn20_dpp {
struct pwl_params pwl_data;
};
+enum dcn20_input_csc_select {
+ DCN2_ICSC_SELECT_BYPASS = 0,
+ DCN2_ICSC_SELECT_ICSC_A = 1,
+ DCN2_ICSC_SELECT_ICSC_B = 2
+};
+
+enum dcn20_gamut_remap_select {
+ DCN2_GAMUT_REMAP_BYPASS = 0,
+ DCN2_GAMUT_REMAP_COEF_A = 1,
+ DCN2_GAMUT_REMAP_COEF_B = 2
+};
+
void dpp20_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s);
@@ -667,6 +719,16 @@ void dpp2_set_degamma(
struct dpp *dpp_base,
enum ipp_degamma_mode mode);
+void dpp2_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust);
+
+void dpp2_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn20_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry);
+
bool dpp20_program_blnd_lut(
struct dpp *dpp_base, const struct pwl_params *params);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
index 2d112c316424..8dc3d1f73984 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
@@ -36,6 +36,9 @@
#define REG(reg)\
dpp->tf_regs->reg
+#define IND_REG(index) \
+ (index)
+
#define CTX \
dpp->base.ctx
@@ -44,9 +47,6 @@
dpp->tf_shift->field_name, dpp->tf_mask->field_name
-
-
-
static void dpp2_enable_cm_block(
struct dpp *dpp_base)
{
@@ -149,12 +149,164 @@ void dpp2_set_degamma(
case IPP_DEGAMMA_MODE_HW_xvYCC:
REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
break;
+ case IPP_DEGAMMA_MODE_USER_PWL:
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
+ break;
default:
BREAK_TO_DEBUGGER();
break;
}
}
+static void program_gamut_remap(
+ struct dcn20_dpp *dpp,
+ const uint16_t *regval,
+ enum dcn20_gamut_remap_select select)
+{
+ uint32_t cur_select = 0;
+ struct color_matrices_reg gam_regs;
+
+ if (regval == NULL || select == DCN2_GAMUT_REMAP_BYPASS) {
+ REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, 0);
+ return;
+ }
+
+ /* determine which gamut_remap coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the update so gamut_remap is updated on frame boundary
+ */
+ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_STATUS_IDX,
+ CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &cur_select);
+
+ /* value stored in dbg reg will be 1 greater than mode we want */
+ if (cur_select != DCN2_GAMUT_REMAP_COEF_A)
+ select = DCN2_GAMUT_REMAP_COEF_A;
+ else
+ select = DCN2_GAMUT_REMAP_COEF_B;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+ if (select == DCN2_GAMUT_REMAP_COEF_A) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+ } else {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+ }
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ REG_SET(
+ CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, select);
+
+}
+
+void dpp2_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ int i = 0;
+
+ if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+ /* Bypass if type is bypass or hw */
+ program_gamut_remap(dpp, NULL, DCN2_GAMUT_REMAP_BYPASS);
+ else {
+ struct fixed31_32 arr_matrix[12];
+ uint16_t arr_reg_val[12];
+
+ for (i = 0; i < 12; i++)
+ arr_matrix[i] = adjust->temperature_matrix[i];
+
+ convert_float_matrix(
+ arr_reg_val, arr_matrix, 12);
+
+ program_gamut_remap(dpp, arr_reg_val, DCN2_GAMUT_REMAP_COEF_A);
+ }
+}
+
+void dpp2_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn20_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ int i;
+ int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix);
+ const uint16_t *regval = NULL;
+ uint32_t cur_select = 0;
+ enum dcn20_input_csc_select select;
+ struct color_matrices_reg icsc_regs;
+
+ if (input_select == DCN2_ICSC_SELECT_BYPASS) {
+ REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
+ return;
+ }
+
+ if (tbl_entry == NULL) {
+ for (i = 0; i < arr_size; i++)
+ if (dpp_input_csc_matrix[i].color_space == color_space) {
+ regval = dpp_input_csc_matrix[i].regval;
+ break;
+ }
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ } else {
+ regval = tbl_entry->regval;
+ }
+
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_STATUS_IDX,
+ CM_TEST_DEBUG_DATA_ICSC_MODE, &cur_select);
+
+ if (cur_select != DCN2_ICSC_SELECT_ICSC_A)
+ select = DCN2_ICSC_SELECT_ICSC_A;
+ else
+ select = DCN2_ICSC_SELECT_ICSC_B;
+
+ icsc_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
+ icsc_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11;
+ icsc_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
+ icsc_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
+
+ if (select == DCN2_ICSC_SELECT_ICSC_A) {
+
+ icsc_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
+ icsc_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
+
+ } else {
+
+ icsc_regs.csc_c11_c12 = REG(CM_ICSC_B_C11_C12);
+ icsc_regs.csc_c33_c34 = REG(CM_ICSC_B_C33_C34);
+
+ }
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &icsc_regs);
+
+ REG_SET(CM_ICSC_CONTROL, 0,
+ CM_ICSC_MODE, select);
+}
+
static void dpp20_power_on_blnd_lut(
struct dpp *dpp_base,
bool power_on)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 63eb377ed9c0..6bdfee20b6a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -23,7 +23,6 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "reg_helper.h"
#include "dcn20_dsc.h"
#include "dsc/dscc_types.h"
@@ -207,6 +206,9 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str
struct dsc_reg_values dsc_reg_vals;
struct dsc_optc_config dsc_optc_cfg;
+ memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals));
+ memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg));
+
DC_LOG_DSC("Getting packed DSC PPS for DSC Config:");
dsc_config_log(dsc, dsc_cfg);
DC_LOG_DSC("DSC Picture Parameter Set (PPS):");
@@ -222,9 +224,18 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str
static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+ int dsc_clock_en;
+ int dsc_fw_config;
+ int enabled_opp_pipe;
+
+ DC_LOG_DSC("enable DSC %d at opp pipe %d", dsc->inst, opp_pipe);
- /* TODO Check if DSC alreay in use? */
- DC_LOG_DSC("enable DSC at opp pipe %d", opp_pipe);
+ REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
+ if ((dsc_clock_en || dsc_fw_config) && enabled_opp_pipe != opp_pipe) {
+ DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already enabled!", dsc->inst, enabled_opp_pipe);
+ ASSERT(0);
+ }
REG_UPDATE(DSC_TOP_CONTROL,
DSC_CLOCK_EN, 1);
@@ -238,8 +249,18 @@ static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)
static void dsc2_disable(struct display_stream_compressor *dsc)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+ int dsc_clock_en;
+ int dsc_fw_config;
+ int enabled_opp_pipe;
- DC_LOG_DSC("disable DSC");
+ DC_LOG_DSC("disable DSC %d", dsc->inst);
+
+ REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
+ if (!dsc_clock_en || !dsc_fw_config) {
+ DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already disabled!", dsc->inst, enabled_opp_pipe);
+ ASSERT(0);
+ }
REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG,
DSCRM_DSC_FORWARD_EN, 0);
@@ -715,4 +736,3 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const
}
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
index 4e2fb38390a4..9855a7ed0387 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -21,7 +21,6 @@
* Authors: AMD
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#ifndef __DCN20_DSC_H__
#define __DCN20_DSC_H__
@@ -572,4 +571,3 @@ void dsc2_construct(struct dcn20_dsc *dsc,
#endif
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 8b8438566101..9235f7d29454 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -293,6 +293,9 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,
output->grph.rgb.max_compressed_blk_size = 64;
output->grph.rgb.independent_64b_blks = true;
break;
+ default:
+ ASSERT(false);
+ break;
}
output->capable = true;
output->const_color_support = true;
@@ -601,7 +604,8 @@ static const struct hubbub_funcs hubbub2_funcs = {
.wm_read_state = hubbub2_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub2_program_watermarks,
- .allow_self_refresh_control = hubbub1_allow_self_refresh_control
+ .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 69e2aae42394..84d7ac5dd206 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -30,6 +30,8 @@
#include "reg_helper.h"
#include "basics/conversion.h"
+#define DC_LOGGER_INIT(logger)
+
#define REG(reg)\
hubp2->hubp_regs->reg
@@ -483,7 +485,6 @@ void hubp2_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 12);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 112);
@@ -504,7 +505,6 @@ void hubp2_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 119);
break;
-#endif
default:
BREAK_TO_DEBUGGER();
break;
@@ -1204,6 +1204,9 @@ void hubp2_read_state_common(struct hubp *hubp)
HUBP_TTU_DISABLE, &s->ttu_disable,
HUBP_UNDERFLOW_STATUS, &s->underflow_status);
+ REG_GET(HUBP_CLK_CNTL,
+ HUBP_CLOCK_ENABLE, &s->clock_en);
+
REG_GET(DCN_GLOBAL_TTU_CNTL,
MIN_TTU_VBLANK, &s->min_ttu_vblank);
@@ -1243,6 +1246,314 @@ void hubp2_read_state(struct hubp *hubp)
}
+void hubp2_validate_dml_output(struct hubp *hubp,
+ struct dc_context *ctx,
+ struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
+ struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct _vcs_dpi_display_rq_regs_st rq_regs = {0};
+ struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0};
+ struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0};
+ DC_LOGGER_INIT(ctx->logger);
+ DC_LOG_DEBUG("DML Validation | Running Validation");
+
+ /* Requestor Regs */
+ REG_GET(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address);
+ REG_GET_4(DCN_EXPANSION_MODE,
+ DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode,
+ PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode,
+ MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode);
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+ CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear);
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
+ CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear);
+
+ if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address)
+ DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
+ dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address);
+ if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode);
+ if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode);
+ if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
+ dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode);
+ if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode);
+
+ if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size);
+ if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size);
+ if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size);
+ if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size);
+ if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size);
+ if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MPTE_GROUP_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size);
+ if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height);
+ if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear);
+
+ if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size);
+ if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size);
+ if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size);
+ if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size);
+ if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size);
+ if (rq_regs.rq_regs_c.mpte_group_size != dml_rq_regs->rq_regs_c.mpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.mpte_group_size, rq_regs.rq_regs_c.mpte_group_size);
+ if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height);
+ if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear);
+
+ /* DLG - Per hubp */
+ REG_GET_2(BLANK_OFFSET_0,
+ REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end,
+ DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end);
+ REG_GET(BLANK_OFFSET_1,
+ MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start);
+ REG_GET(DST_DIMENSIONS,
+ REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal);
+ REG_GET_2(DST_AFTER_SCALER,
+ REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler);
+ REG_GET(REF_FREQ_TO_PIX_FREQ,
+ REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq);
+
+ if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end);
+ if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end);
+ if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u Actual: %u\n",
+ dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start);
+ if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal)
+ DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal);
+ if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler)
+ DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler);
+ if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler)
+ DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler);
+ if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq)
+ DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u Actual: %u\n",
+ dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_GET(VBLANK_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l);
+ if (REG(NOM_PARAMETERS_0))
+ REG_GET(NOM_PARAMETERS_0,
+ DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l);
+ if (REG(NOM_PARAMETERS_1))
+ REG_GET(NOM_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l);
+ REG_GET(NOM_PARAMETERS_4,
+ DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l);
+ REG_GET(NOM_PARAMETERS_5,
+ REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l);
+ REG_GET_2(PER_LINE_DELIVERY,
+ REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c);
+ REG_GET_2(PER_LINE_DELIVERY_PRE,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c);
+ REG_GET(VBLANK_PARAMETERS_2,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c);
+ if (REG(NOM_PARAMETERS_2))
+ REG_GET(NOM_PARAMETERS_2,
+ DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c);
+ if (REG(NOM_PARAMETERS_3))
+ REG_GET(NOM_PARAMETERS_3,
+ REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c);
+ REG_GET(NOM_PARAMETERS_6,
+ DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c);
+ REG_GET(NOM_PARAMETERS_7,
+ REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c);
+ REG_GET(VBLANK_PARAMETERS_3,
+ REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l);
+ REG_GET(VBLANK_PARAMETERS_4,
+ REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+ if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l);
+ if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l);
+ if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l);
+ if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l);
+ if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l);
+ if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l);
+ if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c);
+ if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c);
+ if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c);
+ if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c);
+ if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c);
+ if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c);
+ if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l);
+ if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c);
+ if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l);
+ if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+ /* TTU - per hubp */
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm);
+
+ if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm)
+ DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm);
+ if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm)
+ DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+ REG_GET_3(DCN_SURF0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l);
+ REG_GET_3(DCN_SURF1_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c);
+ REG_GET_3(DCN_CUR0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0);
+ REG_GET(FLIP_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l);
+ REG_GET(DCN_CUR0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0);
+ REG_GET(DCN_CUR1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1);
+ REG_GET(DCN_SURF0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l);
+ REG_GET(DCN_SURF1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c);
+
+ if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l);
+ if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l);
+ if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l);
+ if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c);
+ if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c);
+ if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c);
+ if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0);
+ if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0);
+ if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0);
+ if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l);
+ if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0);
+ if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1);
+ if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l);
+ if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c);
+}
+
static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -1266,6 +1577,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_clear_underflow = hubp2_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp1_init,
+ .validate_dml_output = hubp2_validate_dml_output,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
index d5c8615af45e..8c04a3606a54 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
@@ -148,7 +148,6 @@
uint32_t VMID_SETTINGS_0
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \
uint32_t FLIP_PARAMETERS_3;\
@@ -157,7 +156,6 @@
uint32_t FLIP_PARAMETERS_6;\
uint32_t VBLANK_PARAMETERS_5;\
uint32_t VBLANK_PARAMETERS_6
-#endif
#define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
DCN_HUBP_REG_FIELD_BASE_LIST(type); \
@@ -184,7 +182,6 @@
type SURFACE_TRIPLE_BUFFER_ENABLE;\
type VMID
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
#define DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type) \
DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type);\
type REFCYC_PER_VM_GROUP_FLIP;\
@@ -194,31 +191,18 @@
type REFCYC_PER_PTE_GROUP_FLIP_C; \
type REFCYC_PER_META_CHUNK_FLIP_C; \
type VM_GROUP_SIZE
-#endif
struct dcn_hubp2_registers {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
DCN21_HUBP_REG_COMMON_VARIABLE_LIST;
-#else
- DCN2_HUBP_REG_COMMON_VARIABLE_LIST;
-#endif
};
struct dcn_hubp2_shift {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
-#else
- DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
-#endif
};
struct dcn_hubp2_mask {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
-#else
- DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
-#endif
};
struct dcn20_hubp {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index ac8c18fadefc..cfbbaffa8654 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -25,17 +25,15 @@
#include <linux/delay.h>
#include "dm_services.h"
+#include "basics/dc_common.h"
#include "dm_helpers.h"
#include "core_types.h"
#include "resource.h"
-#include "dcn20/dcn20_resource.h"
-#include "dce110/dce110_hw_sequencer.h"
-#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20_resource.h"
#include "dcn20_hwseq.h"
#include "dce/dce_hwseq.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-#include "dcn20/dcn20_dsc.h"
-#endif
+#include "dcn20_dsc.h"
+#include "dcn20_optc.h"
#include "abm.h"
#include "clk_mgr.h"
#include "dmcu.h"
@@ -45,10 +43,9 @@
#include "ipp.h"
#include "mpc.h"
#include "mcif_wb.h"
+#include "dchubbub.h"
#include "reg_helper.h"
#include "dcn10/dcn10_cm_common.h"
-#include "dcn10/dcn10_hubbub.h"
-#include "dcn10/dcn10_optc.h"
#include "dc_link_dp.h"
#include "vm_helper.h"
#include "dccg.h"
@@ -64,14 +61,132 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
-static void dcn20_enable_power_gating_plane(
+static int find_free_gsl_group(const struct dc *dc)
+{
+ if (dc->res_pool->gsl_groups.gsl_0 == 0)
+ return 1;
+ if (dc->res_pool->gsl_groups.gsl_1 == 0)
+ return 2;
+ if (dc->res_pool->gsl_groups.gsl_2 == 0)
+ return 3;
+
+ return 0;
+}
+
+/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock)
+ * This is only used to lock pipes in pipe splitting case with immediate flip
+ * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate,
+ * so we get tearing with freesync since we cannot flip multiple pipes
+ * atomically.
+ * We use GSL for this:
+ * - immediate flip: find first available GSL group if not already assigned
+ * program gsl with that group, set current OTG as master
+ * and always us 0x4 = AND of flip_ready from all pipes
+ * - vsync flip: disable GSL if used
+ *
+ * Groups in stream_res are stored as +1 from HW registers, i.e.
+ * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1
+ * Using a magic value like -1 would require tracking all inits/resets
+ */
+static void dcn20_setup_gsl_group_as_lock(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable)
+{
+ struct gsl_params gsl;
+ int group_idx;
+
+ memset(&gsl, 0, sizeof(struct gsl_params));
+
+ if (enable) {
+ /* return if group already assigned since GSL was set up
+ * for vsync flip, we would unassign so it can't be "left over"
+ */
+ if (pipe_ctx->stream_res.gsl_group > 0)
+ return;
+
+ group_idx = find_free_gsl_group(dc);
+ ASSERT(group_idx != 0);
+ pipe_ctx->stream_res.gsl_group = group_idx;
+
+ /* set gsl group reg field and mark resource used */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 1;
+ dc->res_pool->gsl_groups.gsl_0 = 1;
+ break;
+ case 2:
+ gsl.gsl1_en = 1;
+ dc->res_pool->gsl_groups.gsl_1 = 1;
+ break;
+ case 3:
+ gsl.gsl2_en = 1;
+ dc->res_pool->gsl_groups.gsl_2 = 1;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return; // invalid case
+ }
+ gsl.gsl_master_en = 1;
+ } else {
+ group_idx = pipe_ctx->stream_res.gsl_group;
+ if (group_idx == 0)
+ return; // if not in use, just return
+
+ pipe_ctx->stream_res.gsl_group = 0;
+
+ /* unset gsl group reg field and mark resource free */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 0;
+ dc->res_pool->gsl_groups.gsl_0 = 0;
+ break;
+ case 2:
+ gsl.gsl1_en = 0;
+ dc->res_pool->gsl_groups.gsl_1 = 0;
+ break;
+ case 3:
+ gsl.gsl2_en = 0;
+ dc->res_pool->gsl_groups.gsl_2 = 0;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ gsl.gsl_master_en = 0;
+ }
+
+ /* at this point we want to program whether it's to enable or disable */
+ if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
+ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
+ pipe_ctx->stream_res.tg->funcs->set_gsl(
+ pipe_ctx->stream_res.tg,
+ &gsl);
+
+ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
+ pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+ } else
+ BREAK_TO_DEBUGGER();
+}
+
+void dcn20_set_flip_control_gsl(
+ struct pipe_ctx *pipe_ctx,
+ bool flip_immediate)
+{
+ if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl)
+ pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl(
+ pipe_ctx->plane_res.hubp, flip_immediate);
+
+}
+
+void dcn20_enable_power_gating_plane(
struct dce_hwseq *hws,
bool enable)
{
- bool force_on = 1; /* disable power gating */
+ bool force_on = true; /* disable power gating */
if (enable)
- force_on = 0;
+ force_on = false;
/* DCHUBP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
@@ -128,44 +243,6 @@ void dcn20_dccg_init(struct dce_hwseq *hws)
/* This value is dependent on the hardware pipeline delay so set once per SOC */
REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c);
}
-void dcn20_display_init(struct dc *dc)
-{
- struct dce_hwseq *hws = dc->hwseq;
-
- /* RBBMIF
- * disable RBBMIF timeout detection for all clients
- * Ensure RBBMIF does not drop register accesses due to the per-client timeout
- */
- REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
- REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
-
- /* DCCG */
- dcn20_dccg_init(hws);
-
- REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 0);
-
- /* DCHUB/MMHUBBUB
- * set global timer refclk divider
- * 100Mhz refclk -> 2
- * 27Mhz refclk -> 1
- * 48Mhz refclk -> 1
- */
- REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
- REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
- REG_WRITE(REFCLK_CNTL, 0);
-
- /* OPTC
- * OTG_CONTROL.OTG_DISABLE_POINT_CNTL = 0x3; will be set during optc2_enable_crtc
- */
-
- /* AZ
- * default value is 0x64 for 100Mhz ref clock, if the ref clock is 100Mhz, no need to program this regiser,
- * if not, it should be programmed according to the ref clock
- */
- REG_UPDATE(AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, 0x64);
- /* Enable controller clock gating */
- REG_WRITE(AZALIA_CONTROLLER_CLOCK_GATING, 0x1);
-}
void dcn20_disable_vga(
struct dce_hwseq *hws)
@@ -178,15 +255,15 @@ void dcn20_disable_vga(
REG_WRITE(D6VGA_CONTROL, 0);
}
-void dcn20_program_tripleBuffer(
+void dcn20_program_triple_buffer(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
- bool enableTripleBuffer)
+ bool enable_triple_buffer)
{
if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) {
pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer(
pipe_ctx->plane_res.hubp,
- enableTripleBuffer);
+ enable_triple_buffer);
}
}
@@ -195,6 +272,7 @@ void dcn20_init_blank(
struct dc *dc,
struct timing_generator *tg)
{
+ struct dce_hwseq *hws = dc->hwseq;
enum dc_color_space color_space;
struct tg_color black_color = {0};
struct output_pixel_processor *opp = NULL;
@@ -225,6 +303,7 @@ void dcn20_init_blank(
opp->funcs->opp_set_disp_pattern_generator(
opp,
CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
@@ -234,17 +313,17 @@ void dcn20_init_blank(
bottom_opp->funcs->opp_set_disp_pattern_generator(
bottom_opp,
CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
otg_active_height);
}
- dcn20_hwss_wait_for_blank_complete(opp);
+ hws->funcs.wait_for_blank_complete(opp);
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static void dcn20_dsc_pg_control(
+void dcn20_dsc_pg_control(
struct dce_hwseq *hws,
unsigned int dsc_inst,
bool power_on)
@@ -320,9 +399,8 @@ static void dcn20_dsc_pg_control(
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
}
-#endif
-static void dcn20_dpp_pg_control(
+void dcn20_dpp_pg_control(
struct dce_hwseq *hws,
unsigned int dpp_inst,
bool power_on)
@@ -396,7 +474,7 @@ static void dcn20_dpp_pg_control(
}
-static void dcn20_hubp_pg_control(
+void dcn20_hubp_pg_control(
struct dce_hwseq *hws,
unsigned int hubp_inst,
bool power_on)
@@ -473,8 +551,9 @@ static void dcn20_hubp_pg_control(
/* disable HW used by plane.
* note: cannot disable until disconnect is complete
*/
-static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
@@ -495,7 +574,7 @@ static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->power_gated = true;
dc->optimized_required = false; /* We're powering off, no need to optimize */
- dc->hwss.plane_atomic_power_down(dc,
+ hws->funcs.plane_atomic_power_down(dc,
pipe_ctx->plane_res.dpp,
pipe_ctx->plane_res.hubp);
@@ -526,6 +605,7 @@ enum dc_status dcn20_enable_stream_timing(
struct dc_state *context,
struct dc *dc)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct dc_stream_state *stream = pipe_ctx->stream;
struct drr_params params = {0};
unsigned int event_triggers = 0;
@@ -585,7 +665,7 @@ enum dc_status dcn20_enable_stream_timing(
pipe_ctx->stream_res.opp,
true);
- dc->hwss.blank_pixel_data(dc, pipe_ctx, true);
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
/* VTG is within DCHUB command block. DCFCLK is always on */
if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
@@ -593,7 +673,7 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
- dcn20_hwss_wait_for_blank_complete(pipe_ctx->stream_res.opp);
+ hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
@@ -606,9 +686,13 @@ enum dc_status dcn20_enable_stream_timing(
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
event_triggers = 0x80;
+ /* Event triggers and num frames initialized for DRR, but can be
+ * later updated for PSR use. Note DRR trigger events are generated
+ * regardless of whether num frames met.
+ */
if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx->stream_res.tg, event_triggers);
+ pipe_ctx->stream_res.tg, event_triggers, 2);
/* TODO program crtc source select for non-virtual signal*/
/* TODO program FMT */
@@ -649,7 +733,7 @@ void dcn20_program_output_csc(struct dc *dc,
}
}
-bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
+bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
{
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
@@ -736,20 +820,14 @@ bool dcn20_set_shaper_3dlut(
else
result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL);
- if (plane_state->lut3d_func &&
- plane_state->lut3d_func->state.bits.initialized == 1 &&
- plane_state->lut3d_func->hdr_multiplier != 0)
- dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base,
- plane_state->lut3d_func->hdr_multiplier);
- else
- dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, 0x1f000);
-
return result;
}
-bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state)
+bool dcn20_set_input_transfer_func(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
const struct dc_transfer_func *tf = NULL;
bool result = true;
@@ -758,8 +836,8 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
if (dpp_base == NULL || plane_state == NULL)
return false;
- dcn20_set_shaper_3dlut(pipe_ctx, plane_state);
- dcn20_set_blend_lut(pipe_ctx, plane_state);
+ hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state);
+ hws->funcs.set_blend_lut(pipe_ctx, plane_state);
if (plane_state->in_transfer_func)
tf = plane_state->in_transfer_func;
@@ -804,6 +882,11 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
IPP_DEGAMMA_MODE_BYPASS);
break;
case TRANSFER_FUNCTION_PQ:
+ dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
+ cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
+ dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
+ result = true;
+ break;
default:
result = false;
break;
@@ -824,7 +907,7 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
return result;
}
-static void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
@@ -855,12 +938,16 @@ void dcn20_blank_pixel_data(
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_color_space color_space = stream->output_color_space;
enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
+ enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
struct pipe_ctx *odm_pipe;
int odm_cnt = 1;
int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+ if (stream->link->test_pattern_enabled)
+ return;
+
/* get opp dpg blank color */
color_space_to_black_color(dc, color_space, &black_color);
@@ -873,8 +960,10 @@ void dcn20_blank_pixel_data(
if (stream_res->abm)
stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
- if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
+ if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+ test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+ }
} else {
test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
}
@@ -882,6 +971,7 @@ void dcn20_blank_pixel_data(
stream_res->opp->funcs->opp_set_disp_pattern_generator(
stream_res->opp,
test_pattern,
+ test_pattern_color_space,
stream->timing.display_color_depth,
&black_color,
width,
@@ -892,6 +982,7 @@ void dcn20_blank_pixel_data(
odm_pipe->stream_res.opp,
dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ?
CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern,
+ test_pattern_color_space,
stream->timing.display_color_depth,
&black_color,
width,
@@ -1217,9 +1308,11 @@ static void dcn20_update_dchubp_dpp(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ bool viewport_changed = false;
if (pipe_ctx->update_flags.bits.dppclk)
dpp->funcs->dpp_dppclk_control(dpp, false, true);
@@ -1261,7 +1354,7 @@ static void dcn20_update_dchubp_dpp(
if (dpp->funcs->dpp_program_bias_and_scale) {
//TODO :for CNVC set scale and bias registers if necessary
- dcn10_build_prescale_params(&bns_params, plane_state);
+ build_prescale_params(&bns_params, plane_state);
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
}
}
@@ -1269,19 +1362,19 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.mpcc
|| plane_state->update_flags.bits.global_alpha_change
|| plane_state->update_flags.bits.per_pixel_alpha_change) {
- /* Need mpcc to be idle if changing opp */
- if (pipe_ctx->update_flags.bits.opp_changed) {
- struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
- int mpcc_inst;
-
- for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
- if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst])
- continue;
+ // MPCC inst is equal to pipe index in practice
+ int mpcc_inst = hubp->inst;
+ int opp_inst;
+ int opp_count = dc->res_pool->pipe_count;
+
+ for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+ if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
- old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+ break;
}
}
- dc->hwss.update_mpcc(dc, pipe_ctx);
+ hws->funcs.update_mpcc(dc, pipe_ctx);
}
if (pipe_ctx->update_flags.bits.scaler ||
@@ -1298,14 +1391,18 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.viewport ||
(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
- (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling))
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
+
hubp->funcs->mem_program_viewport(
hubp,
&pipe_ctx->plane_res.scl_data.viewport,
&pipe_ctx->plane_res.scl_data.viewport_c);
+ viewport_changed = true;
+ }
/* Any updates are handled in dc interface, just need to apply existing for plane enable */
- if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed)
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport)
&& pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
dc->hwss.set_cursor_position(pipe_ctx);
dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -1355,8 +1452,13 @@ static void dcn20_update_dchubp_dpp(
hubp->power_gated = false;
}
+ if (hubp->funcs->apply_PLAT_54186_wa && viewport_changed)
+ hubp->funcs->apply_PLAT_54186_wa(hubp, &plane_state->address);
+
if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update)
- dc->hwss.update_plane_addr(dc, pipe_ctx);
+ hws->funcs.update_plane_addr(dc, pipe_ctx);
+
+
if (pipe_ctx->update_flags.bits.enable)
hubp->funcs->set_blank(hubp, false);
@@ -1368,10 +1470,11 @@ static void dcn20_program_pipe(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
/* Only need to unblank on top pipe */
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)
&& !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
- dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
if (pipe_ctx->update_flags.bits.global_sync) {
pipe_ctx->stream_res.tg->funcs->program_global_sync(
@@ -1384,12 +1487,12 @@ static void dcn20_program_pipe(
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- if (dc->hwss.setup_vupdate_interrupt)
- dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
}
if (pipe_ctx->update_flags.bits.odm)
- dc->hwss.update_odm(dc, context, pipe_ctx);
+ hws->funcs.update_odm(dc, context, pipe_ctx);
if (pipe_ctx->update_flags.bits.enable)
dcn20_enable_plane(dc, pipe_ctx, context);
@@ -1398,20 +1501,20 @@ static void dcn20_program_pipe(
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
if (pipe_ctx->update_flags.bits.enable
- || pipe_ctx->plane_state->update_flags.bits.sdr_white_level)
- set_hdr_multiplier(pipe_ctx);
+ || pipe_ctx->plane_state->update_flags.bits.hdr_mult)
+ hws->funcs.set_hdr_multiplier(pipe_ctx);
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change)
- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+ hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
* only do gamma programming for powering on, internal memcmp to avoid
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf)
- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+ hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
* should reprogram the fmt. This deals with cases where
@@ -1445,12 +1548,13 @@ static bool does_pipe_need_lock(struct pipe_ctx *pipe)
return false;
}
-static void dcn20_program_front_end_for_ctx(
+void dcn20_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context)
{
const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
int i;
+ struct dce_hwseq *hws = dc->hwseq;
bool pipe_locked[MAX_PIPES] = {false};
DC_LOGGER_INIT(dc->ctx->logger);
@@ -1482,13 +1586,13 @@ static void dcn20_program_front_end_for_ctx(
&& !context->res_ctx.pipe_ctx[i].top_pipe
&& !context->res_ctx.pipe_ctx[i].prev_odm_pipe
&& context->res_ctx.pipe_ctx[i].stream)
- dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
+ hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
|| context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
- dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
@@ -1508,8 +1612,8 @@ static void dcn20_program_front_end_for_ctx(
pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
&& (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
- && dc->hwss.program_all_writeback_pipes_in_tree)
- dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
+ && hws->funcs.program_all_writeback_pipes_in_tree)
+ hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
}
}
@@ -1541,9 +1645,9 @@ static void dcn20_program_front_end_for_ctx(
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
- for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS
+ for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000
&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
- msleep(1);
+ mdelay(1);
}
}
@@ -1594,6 +1698,7 @@ bool dcn20_update_bandwidth(
struct dc_state *context)
{
int i;
+ struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
@@ -1623,10 +1728,10 @@ bool dcn20_update_bandwidth(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
if (pipe_ctx->prev_odm_pipe == NULL)
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
- if (dc->hwss.setup_vupdate_interrupt)
- dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
}
pipe_ctx->plane_res.hubp->funcs->hubp_setup(
@@ -1640,9 +1745,8 @@ bool dcn20_update_bandwidth(
return true;
}
-static void dcn20_enable_writeback(
+void dcn20_enable_writeback(
struct dc *dc,
- const struct dc_stream_status *stream_status,
struct dc_writeback_info *wb_info,
struct dc_state *context)
{
@@ -1656,8 +1760,7 @@ static void dcn20_enable_writeback(
mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
/* set the OPTC source mux */
- ASSERT(stream_status->primary_otg_inst < MAX_PIPES);
- optc = dc->res_pool->timing_generators[stream_status->primary_otg_inst];
+ optc = dc->res_pool->timing_generators[dwb->otg_inst];
optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);
/* set MCIF_WB buffer and arbitration configuration */
mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
@@ -1684,7 +1787,7 @@ void dcn20_disable_writeback(
mcif_wb->funcs->disable_mcif(mcif_wb);
}
-bool dcn20_hwss_wait_for_blank_complete(
+bool dcn20_wait_for_blank_complete(
struct output_pixel_processor *opp)
{
int counter;
@@ -1713,9 +1816,8 @@ bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
return hubp->funcs->dmdata_status_done(hubp);
}
-static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dce_hwseq *hws = dc->hwseq;
if (pipe_ctx->stream_res.dsc) {
@@ -1727,12 +1829,10 @@ static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx
odm_pipe = odm_pipe->next_odm_pipe;
}
}
-#endif
}
-static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dce_hwseq *hws = dc->hwseq;
if (pipe_ctx->stream_res.dsc) {
@@ -1744,7 +1844,6 @@ static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
odm_pipe = odm_pipe->next_odm_pipe;
}
}
-#endif
}
void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
@@ -1767,12 +1866,7 @@ void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
hubp->funcs->dmdata_set_attributes(hubp, &attr);
}
-void dcn20_disable_stream(struct pipe_ctx *pipe_ctx)
-{
- dce110_disable_stream(pipe_ctx);
-}
-
-static void dcn20_init_vm_ctx(
+void dcn20_init_vm_ctx(
struct dce_hwseq *hws,
struct dc *dc,
struct dc_virtual_addr_space_config *va_config,
@@ -1794,7 +1888,7 @@ static void dcn20_init_vm_ctx(
dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid);
}
-static int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
+int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
struct dcn_hubbub_phys_addr_config config;
@@ -1838,8 +1932,7 @@ static bool patch_address_for_sbs_tb_stereo(
return false;
}
-
-static void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
{
bool addr_patched = false;
PHYSICAL_ADDRESS_LOC addr;
@@ -1873,6 +1966,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
struct encoder_unblank_param params = { { 0 } };
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
+ struct dce_hwseq *hws = link->dc->hwseq;
struct pipe_ctx *odm_pipe;
params.opp_cnt = 1;
@@ -1885,7 +1979,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate;
if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- if (optc1_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1)
+ if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1)
params.timing.pix_clk_100hz /= 2;
pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1);
@@ -1893,14 +1987,14 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
}
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_backlight_control(link, true);
+ hws->funcs.edp_backlight_control(link, true);
}
}
-void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
+void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct timing_generator *tg = pipe_ctx->stream_res.tg;
- int start_line = get_vupdate_offset_from_vsync(pipe_ctx);
+ int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
if (start_line < 0)
start_line = 0;
@@ -1915,6 +2009,7 @@ static void dcn20_reset_back_end_for_pipe(
struct dc_state *context)
{
int i;
+ struct dc_link *link;
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
@@ -1922,8 +2017,14 @@ static void dcn20_reset_back_end_for_pipe(
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- /* DPMS may already disable */
- if (!pipe_ctx->stream->dpms_off)
+ link = pipe_ctx->stream->link;
+ /* DPMS may already disable or */
+ /* dpms_off status is incorrect due to fastboot
+ * feature. When system resume from S4 with second
+ * screen only, the dpms_off would be true but
+ * VBIOS lit up eDP, so check link status too.
+ */
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
core_link_disable_stream(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -1943,11 +2044,9 @@ static void dcn20_reset_back_end_for_pipe(
}
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
else if (pipe_ctx->stream_res.dsc) {
dp_set_dsc_enable(pipe_ctx, false);
}
-#endif
/* by upper caller loop, parent pipe: pipe0, will be reset last.
* back end share by all pipes and will be disable only when disable
@@ -1978,11 +2077,12 @@ static void dcn20_reset_back_end_for_pipe(
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
}
-static void dcn20_reset_hw_ctx_wrap(
+void dcn20_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context)
{
int i;
+ struct dce_hwseq *hws = dc->hwseq;
/* Reset Back End*/
for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
@@ -2001,8 +2101,8 @@ static void dcn20_reset_hw_ctx_wrap(
struct clock_source *old_clk = pipe_ctx_old->clock_source;
dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
- if (dc->hwss.enable_stream_gating)
- dc->hwss.enable_stream_gating(dc, pipe_ctx);
+ if (hws->funcs.enable_stream_gating)
+ hws->funcs.enable_stream_gating(dc, pipe_ctx);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
@@ -2031,8 +2131,9 @@ void dcn20_get_mpctree_visual_confirm_color(
*color = pipe_colors[top_pipe->pipe_idx];
}
-static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct mpcc_blnd_cfg blnd_cfg = { {0} };
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
@@ -2043,10 +2144,10 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
// input to MPCC is always RGB, by default leave black_color at 0
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
- dcn10_get_hdr_visual_confirm_color(
+ hws->funcs.get_hdr_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
- dcn10_get_surface_visual_confirm_color(
+ hws->funcs.get_surface_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
dcn20_get_mpctree_visual_confirm_color(
@@ -2083,12 +2184,6 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
*/
mpcc_id = hubp->inst;
- /* If there is no full update, don't need to touch MPC tree*/
- if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
- mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
- return;
- }
-
/* check if this MPCC is already being used */
new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
/* remove MPCC if being used */
@@ -2113,125 +2208,7 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->mpcc_id = mpcc_id;
}
-static int find_free_gsl_group(const struct dc *dc)
-{
- if (dc->res_pool->gsl_groups.gsl_0 == 0)
- return 1;
- if (dc->res_pool->gsl_groups.gsl_1 == 0)
- return 2;
- if (dc->res_pool->gsl_groups.gsl_2 == 0)
- return 3;
-
- return 0;
-}
-
-/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock)
- * This is only used to lock pipes in pipe splitting case with immediate flip
- * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate,
- * so we get tearing with freesync since we cannot flip multiple pipes
- * atomically.
- * We use GSL for this:
- * - immediate flip: find first available GSL group if not already assigned
- * program gsl with that group, set current OTG as master
- * and always us 0x4 = AND of flip_ready from all pipes
- * - vsync flip: disable GSL if used
- *
- * Groups in stream_res are stored as +1 from HW registers, i.e.
- * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1
- * Using a magic value like -1 would require tracking all inits/resets
- */
-void dcn20_setup_gsl_group_as_lock(
- const struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool enable)
-{
- struct gsl_params gsl;
- int group_idx;
-
- memset(&gsl, 0, sizeof(struct gsl_params));
-
- if (enable) {
- /* return if group already assigned since GSL was set up
- * for vsync flip, we would unassign so it can't be "left over"
- */
- if (pipe_ctx->stream_res.gsl_group > 0)
- return;
-
- group_idx = find_free_gsl_group(dc);
- ASSERT(group_idx != 0);
- pipe_ctx->stream_res.gsl_group = group_idx;
-
- /* set gsl group reg field and mark resource used */
- switch (group_idx) {
- case 1:
- gsl.gsl0_en = 1;
- dc->res_pool->gsl_groups.gsl_0 = 1;
- break;
- case 2:
- gsl.gsl1_en = 1;
- dc->res_pool->gsl_groups.gsl_1 = 1;
- break;
- case 3:
- gsl.gsl2_en = 1;
- dc->res_pool->gsl_groups.gsl_2 = 1;
- break;
- default:
- BREAK_TO_DEBUGGER();
- return; // invalid case
- }
- gsl.gsl_master_en = 1;
- } else {
- group_idx = pipe_ctx->stream_res.gsl_group;
- if (group_idx == 0)
- return; // if not in use, just return
-
- pipe_ctx->stream_res.gsl_group = 0;
-
- /* unset gsl group reg field and mark resource free */
- switch (group_idx) {
- case 1:
- gsl.gsl0_en = 0;
- dc->res_pool->gsl_groups.gsl_0 = 0;
- break;
- case 2:
- gsl.gsl1_en = 0;
- dc->res_pool->gsl_groups.gsl_1 = 0;
- break;
- case 3:
- gsl.gsl2_en = 0;
- dc->res_pool->gsl_groups.gsl_2 = 0;
- break;
- default:
- BREAK_TO_DEBUGGER();
- return;
- }
- gsl.gsl_master_en = 0;
- }
-
- /* at this point we want to program whether it's to enable or disable */
- if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
- pipe_ctx->stream_res.tg->funcs->set_gsl(
- pipe_ctx->stream_res.tg,
- &gsl);
-
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
- pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
- } else
- BREAK_TO_DEBUGGER();
-}
-
-static void dcn20_set_flip_control_gsl(
- struct pipe_ctx *pipe_ctx,
- bool flip_immediate)
-{
- if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl)
- pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl(
- pipe_ctx->plane_res.hubp, flip_immediate);
-
-}
-
-static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
+void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
{
enum dc_lane_count lane_count =
pipe_ctx->stream->link->cur_link_settings.lane_count;
@@ -2279,7 +2256,7 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
}
}
-static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
+void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -2305,7 +2282,7 @@ static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
hubp->inst, mode);
}
-static void dcn20_fpga_init_hw(struct dc *dc)
+void dcn20_fpga_init_hw(struct dc *dc)
{
int i, j;
struct dce_hwseq *hws = dc->hwseq;
@@ -2320,13 +2297,13 @@ static void dcn20_fpga_init_hw(struct dc *dc)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
//Enable ability to power gate / don't force power on permanently
- dc->hwss.enable_power_gating_plane(hws, true);
+ hws->funcs.enable_power_gating_plane(hws, true);
// Specific to FPGA dccg and registers
REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
- dcn20_dccg_init(hws);
+ hws->funcs.dccg_init(hws);
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
@@ -2390,7 +2367,7 @@ static void dcn20_fpga_init_hw(struct dc *dc)
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
/*to do*/
- hwss1_plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
}
/* initialize DWB pointer to MCIF_WB */
@@ -2419,57 +2396,3 @@ static void dcn20_fpga_init_hw(struct dc *dc)
tg->funcs->tg_init(tg);
}
}
-
-void dcn20_hw_sequencer_construct(struct dc *dc)
-{
- dcn10_hw_sequencer_construct(dc);
- dc->hwss.unblank_stream = dcn20_unblank_stream;
- dc->hwss.update_plane_addr = dcn20_update_plane_addr;
- dc->hwss.enable_stream_timing = dcn20_enable_stream_timing;
- dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer;
- dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func;
- dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func;
- dc->hwss.apply_ctx_for_surface = NULL;
- dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx;
- dc->hwss.pipe_control_lock = dcn20_pipe_control_lock;
- dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global;
- dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth;
- dc->hwss.prepare_bandwidth = dcn20_prepare_bandwidth;
- dc->hwss.update_bandwidth = dcn20_update_bandwidth;
- dc->hwss.enable_writeback = dcn20_enable_writeback;
- dc->hwss.disable_writeback = dcn20_disable_writeback;
- dc->hwss.program_output_csc = dcn20_program_output_csc;
- dc->hwss.update_odm = dcn20_update_odm;
- dc->hwss.blank_pixel_data = dcn20_blank_pixel_data;
- dc->hwss.dmdata_status_done = dcn20_dmdata_status_done;
- dc->hwss.program_dmdata_engine = dcn20_program_dmdata_engine;
- dc->hwss.enable_stream = dcn20_enable_stream;
- dc->hwss.disable_stream = dcn20_disable_stream;
- dc->hwss.init_sys_ctx = dcn20_init_sys_ctx;
- dc->hwss.init_vm_ctx = dcn20_init_vm_ctx;
- dc->hwss.disable_stream_gating = dcn20_disable_stream_gating;
- dc->hwss.enable_stream_gating = dcn20_enable_stream_gating;
- dc->hwss.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt;
- dc->hwss.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap;
- dc->hwss.update_mpcc = dcn20_update_mpcc;
- dc->hwss.set_flip_control_gsl = dcn20_set_flip_control_gsl;
- dc->hwss.init_blank = dcn20_init_blank;
- dc->hwss.disable_plane = dcn20_disable_plane;
- dc->hwss.plane_atomic_disable = dcn20_plane_atomic_disable;
- dc->hwss.enable_power_gating_plane = dcn20_enable_power_gating_plane;
- dc->hwss.dpp_pg_control = dcn20_dpp_pg_control;
- dc->hwss.hubp_pg_control = dcn20_hubp_pg_control;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- dc->hwss.dsc_pg_control = dcn20_dsc_pg_control;
-#else
- dc->hwss.dsc_pg_control = NULL;
-#endif
- dc->hwss.disable_vga = dcn20_disable_vga;
-
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- dc->hwss.init_hw = dcn20_fpga_init_hw;
- dc->hwss.init_pipes = NULL;
- }
-
-
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 3098f1049ed7..02c9be5ebd47 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -26,90 +26,112 @@
#ifndef __DC_HWSS_DCN20_H__
#define __DC_HWSS_DCN20_H__
-struct dc;
+#include "hw_sequencer_private.h"
-void dcn20_hw_sequencer_construct(struct dc *dc);
-
-enum dc_status dcn20_enable_stream_timing(
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context,
- struct dc *dc);
-
-void dcn20_blank_pixel_data(
+bool dcn20_set_blend_lut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+bool dcn20_set_shaper_3dlut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+void dcn20_program_front_end_for_ctx(
struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool blank);
-
+ struct dc_state *context);
+void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
+bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream);
void dcn20_program_output_csc(struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
uint16_t *matrix,
int opp_id);
-
+void dcn20_enable_stream(struct pipe_ctx *pipe_ctx);
+void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_blank_pixel_data(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank);
+void dcn20_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+void dcn20_pipe_control_lock_global(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
void dcn20_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
-
void dcn20_optimize_bandwidth(
struct dc *dc,
struct dc_state *context);
-
bool dcn20_update_bandwidth(
struct dc *dc,
struct dc_state *context);
-
+void dcn20_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context);
+enum dc_status dcn20_enable_stream_timing(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
+void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg);
+void dcn20_disable_vga(
+ struct dce_hwseq *hws);
+void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_power_gating_plane(
+ struct dce_hwseq *hws,
+ bool enable);
+void dcn20_dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on);
+void dcn20_hubp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on);
+void dcn20_program_triple_buffer(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable_triple_buffer);
+void dcn20_enable_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
void dcn20_disable_writeback(
struct dc *dc,
unsigned int dwb_pipe_inst);
-
-bool dcn20_hwss_wait_for_blank_complete(
- struct output_pixel_processor *opp);
-
-bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_stream_state *stream);
-
-bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state);
-
+void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx);
-
+void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx);
void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx);
-
-void dcn20_disable_stream(struct pipe_ctx *pipe_ctx);
-
-void dcn20_program_tripleBuffer(
- const struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool enableTripleBuffer);
-
-void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx);
-
-void dcn20_pipe_control_lock_global(
+void dcn20_init_vm_ctx(
+ struct dce_hwseq *hws,
struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
-void dcn20_setup_gsl_group_as_lock(const struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool enable);
-void dcn20_dccg_init(struct dce_hwseq *hws);
-void dcn20_init_blank(
- struct dc *dc,
- struct timing_generator *tg);
-void dcn20_display_init(struct dc *dc);
-void dcn20_pipe_control_lock(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
-void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
-void dcn20_enable_plane(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context);
-bool dcn20_set_blend_lut(
- struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
-bool dcn20_set_shaper_3dlut(
- struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
-void dcn20_get_mpctree_visual_confirm_color(
+ struct dc_virtual_addr_space_config *va_config,
+ int vmid);
+void dcn20_set_flip_control_gsl(
struct pipe_ctx *pipe_ctx,
- struct tg_color *color);
+ bool flip_immediate);
+void dcn20_dsc_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst,
+ bool power_on);
+void dcn20_fpga_init_hw(struct dc *dc);
+bool dcn20_wait_for_blank_complete(
+ struct output_pixel_processor *opp);
+void dcn20_dccg_init(struct dce_hwseq *hws);
+int dcn20_init_sys_ctx(struct dce_hwseq *hws,
+ struct dc *dc,
+ struct dc_phy_addr_space_config *pa_config);
+
#endif /* __DC_HWSS_DCN20_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
new file mode 100644
index 000000000000..d51e02fdab4d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20_hwseq.h"
+
+static const struct hw_sequencer_funcs dcn20_funcs = {
+ .program_gamut_remap = dcn10_program_gamut_remap,
+ .init_hw = dcn10_init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = NULL,
+ .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn20_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dce110_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dcn20_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn20_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn20_disable_plane,
+ .pipe_control_lock = dcn20_pipe_control_lock,
+ .pipe_control_lock_global = dcn20_pipe_control_lock_global,
+ .prepare_bandwidth = dcn20_prepare_bandwidth,
+ .optimize_bandwidth = dcn20_optimize_bandwidth,
+ .update_bandwidth = dcn20_update_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn10_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dce110_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .program_triplebuffer = dcn20_program_triple_buffer,
+ .enable_writeback = dcn20_enable_writeback,
+ .disable_writeback = dcn20_disable_writeback,
+ .dmdata_status_done = dcn20_dmdata_status_done,
+ .program_dmdata_engine = dcn20_program_dmdata_engine,
+ .set_dmdata_attributes = dcn20_set_dmdata_attributes,
+ .init_sys_ctx = dcn20_init_sys_ctx,
+ .init_vm_ctx = dcn20_init_vm_ctx,
+ .set_flip_control_gsl = dcn20_set_flip_control_gsl,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+};
+
+static const struct hwseq_private_funcs dcn20_private_funcs = {
+ .init_pipes = dcn10_init_pipes,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .update_mpcc = dcn20_update_mpcc,
+ .set_input_transfer_func = dcn20_set_input_transfer_func,
+ .set_output_transfer_func = dcn20_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn20_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn20_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .disable_stream_gating = dcn20_disable_stream_gating,
+ .enable_stream_gating = dcn20_enable_stream_gating,
+ .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = dcn20_init_blank,
+ .disable_vga = dcn20_disable_vga,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn20_plane_atomic_disable,
+ .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .enable_power_gating_plane = dcn20_enable_power_gating_plane,
+ .dpp_pg_control = dcn20_dpp_pg_control,
+ .hubp_pg_control = dcn20_hubp_pg_control,
+ .dsc_pg_control = NULL,
+ .update_odm = dcn20_update_odm,
+ .dsc_pg_control = dcn20_dsc_pg_control,
+ .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
+ .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+ .dccg_init = dcn20_dccg_init,
+ .set_blend_lut = dcn20_set_blend_lut,
+ .set_shaper_3dlut = dcn20_set_shaper_3dlut,
+};
+
+void dcn20_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn20_funcs;
+ dc->hwseq->funcs = dcn20_private_funcs;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ dc->hwss.init_hw = dcn20_fpga_init_hw;
+ dc->hwseq->funcs.init_pipes = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h
new file mode 100644
index 000000000000..12277797cd71
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN20_INIT_H__
+#define __DC_DCN20_INIT_H__
+
+struct dc;
+
+void dcn20_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN20_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index e476f27aa3a9..e4ac73035c84 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -168,10 +168,8 @@ static struct mpll_cfg dcn2_mpll_cfg[] = {
void enc2_fec_set_enable(struct link_encoder *enc, bool enable)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DC_LOG_DSC("%s FEC at link encoder inst %d",
enable ? "Enabling" : "Disabling", enc->id.enum_id);
-#endif
REG_UPDATE(DP_DPHY_CNTL, DPHY_FEC_EN, enable);
}
@@ -192,7 +190,6 @@ bool enc2_fec_is_active(struct link_encoder *enc)
return (active != 0);
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* this function reads dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
@@ -203,8 +200,8 @@ void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s)
REG_GET(DP_DPHY_CNTL, DPHY_FEC_EN, &s->dphy_fec_en);
REG_GET(DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, &s->dphy_fec_ready_shadow);
REG_GET(DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, &s->dphy_fec_active_status);
+ REG_GET(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, &s->dp_link_training_complete);
}
-#endif
static bool update_cfg_data(
struct dcn10_link_encoder *enc10,
@@ -315,9 +312,7 @@ void enc2_hw_init(struct link_encoder *enc)
}
static const struct link_encoder_funcs dcn20_link_enc_funcs = {
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.read_state = link_enc2_read_state,
-#endif
.validate_output_with_stream =
dcn10_link_encoder_validate_output_with_stream,
.hw_init = enc2_hw_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index 0c98a0bbbd14..8cab8107fd94 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -33,7 +33,142 @@
SRI(AUX_DPHY_TX_CONTROL, DP_AUX, id)
#define UNIPHY_MASK_SH_LIST(mask_sh)\
- LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh)
+ LE_SF(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh)
+
+#define DPCS_MASK_SH_LIST(mask_sh)\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX0_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX1_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX2_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX3_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_MPLLB_MULTIPLIER, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL10, RDPCS_PHY_DP_MPLLB_FRACN_REM, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_MPLLB_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_HDMI_MPLLB_HDMI_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_SSC_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_TX_CLK_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_STATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_FRACN_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_PMIX_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE0_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE1_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE3_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_EXT_REFCLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_BYPASS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_CLOCK_ON, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_CLOCK_ON, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_GATE_DIS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_CR_MUX_SEL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_REF_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_BYPASS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_EXT_LD_DONE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_HDMIMODE_ENABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_INIT_DONE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DP4_POR, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA, RDPCS_PLL_UPDATE_DATA, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_REG_FIFO_ERROR_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_TX_FIFO_ERROR_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_DISABLE_TOGGLE_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_4LANE_TOGGLE_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCS_TX_CR_ADDR, RDPCS_TX_CR_ADDR, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCS_TX_CR_DATA, RDPCS_TX_CR_DATA, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_V2I, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_FREQ_VCO, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_INT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_PROP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_CLOCK_ON, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_GATE_DIS, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_EN, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_DEBUG_CONFIG, DPCS_DBG_CBUS_DIS, mask_sh)
+
+#define DPCS_DCN2_MASK_SH_LIST(mask_sh)\
+ DPCS_MASK_SH_LIST(mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_REF_LD_VAL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_VCO_LD_VAL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_RATE, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh)
#define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\
LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\
@@ -63,6 +198,49 @@
SRI(CLOCK_ENABLE, SYMCLK, id), \
SRI(CHANNEL_XBAR_CNTL, UNIPHY, id)
+#define DPCS_DCN2_CMN_REG_LIST(id) \
+ SRI(DIG_LANE_ENABLE, DIG, id), \
+ SRI(TMDS_CTL_BITS, DIG, id), \
+ SRI(RDPCSTX_PHY_CNTL3, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL10, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL11, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL12, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL13, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL14, RDPCSTX, id), \
+ SRI(RDPCSTX_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_CLOCK_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_INTERRUPT_CONTROL, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL2, RDPCSTX, id), \
+ SRI(RDPCSTX_PLL_UPDATE_DATA, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_ADDR, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_DATA, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \
+ SRI(DPCSTX_TX_CNTL, DPCSTX, id), \
+ SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \
+ SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
+ SR(RDPCSTX0_RDPCSTX_SCRATCH)
+
+
+#define DPCS_DCN2_REG_LIST(id) \
+ DPCS_DCN2_CMN_REG_LIST(id), \
+ SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\
+ SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
+
+#define LE_DCN2_REG_LIST(id) \
+ LE_DCN10_REG_LIST(id), \
+ SR(DCIO_SOFT_RESET)
+
struct mpll_cfg {
uint32_t mpllb_ana_v2i;
uint32_t mpllb_ana_freq_vco;
@@ -158,9 +336,7 @@ void enc2_fec_set_ready(struct link_encoder *enc, bool ready);
bool enc2_fec_is_active(struct link_encoder *enc);
void enc2_hw_init(struct link_encoder *enc);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s);
-#endif
void dcn20_link_encoder_enable_dp_output(
struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 5a188b2bc033..de9c857ab3e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -33,6 +33,9 @@
#define REG(reg)\
mpc20->mpc_regs->reg
+#define IND_REG(index) \
+ (index)
+
#define CTX \
mpc20->base.ctx
@@ -132,19 +135,33 @@ void mpc2_set_output_csc(
const uint16_t *regval,
enum mpc_output_csc_mode ocsc_mode)
{
+ uint32_t cur_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
struct color_matrices_reg ocsc_regs;
- REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
-
- if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
+ if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
return;
+ }
if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
+ MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
+ MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
+
+ if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ else
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
+
ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A;
ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
@@ -157,10 +174,13 @@ void mpc2_set_output_csc(
ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]);
ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]);
}
+
cm_helper_program_color_matrices(
mpc20->base.ctx,
regval,
&ocsc_regs);
+
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
}
void mpc2_set_ocsc_default(
@@ -169,14 +189,16 @@ void mpc2_set_ocsc_default(
enum dc_color_space color_space,
enum mpc_output_csc_mode ocsc_mode)
{
+ uint32_t cur_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
uint32_t arr_size;
struct color_matrices_reg ocsc_regs;
const uint16_t *regval = NULL;
- REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
- if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
+ if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
return;
+ }
regval = find_color_matrix(color_space, &arr_size);
@@ -185,6 +207,19 @@ void mpc2_set_ocsc_default(
return;
}
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
+ MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
+ MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
+
+ if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ else
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
+
ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A;
ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
@@ -203,6 +238,8 @@ void mpc2_set_ocsc_default(
mpc20->base.ctx,
regval,
&ocsc_regs);
+
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
}
static void mpc2_ogam_get_reg_field(
@@ -345,6 +382,9 @@ static void mpc20_program_ogam_pwl(
uint32_t i;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+ PERF_TRACE();
+ REG_SEQ_START();
+
for (i = 0 ; i < num; i++) {
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg);
@@ -463,6 +503,11 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
ASSERT(!mpc_disabled);
ASSERT(!mpc_idle);
}
+
+ REG_SEQ_SUBMIT();
+ PERF_TRACE();
+ REG_SEQ_WAIT_DONE();
+ PERF_TRACE();
}
static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
index 9f53192da2dc..c78fd5123497 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
@@ -80,6 +80,10 @@
SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst),\
SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst)
+#define MPC_DBG_REG_LIST_DCN2_0() \
+ SR(MPC_OCSC_TEST_DEBUG_DATA),\
+ SR(MPC_OCSC_TEST_DEBUG_INDEX)
+
#define MPC_REG_VARIABLE_LIST_DCN2_0 \
MPC_COMMON_REG_VARIABLE_LIST \
uint32_t MPCC_TOP_GAIN[MAX_MPCC]; \
@@ -118,6 +122,8 @@
uint32_t MPCC_OGAM_LUT_RAM_CONTROL[MAX_MPCC];\
uint32_t MPCC_OGAM_LUT_DATA[MAX_MPCC];\
uint32_t MPCC_OGAM_MODE[MAX_MPCC];\
+ uint32_t MPC_OCSC_TEST_DEBUG_DATA;\
+ uint32_t MPC_OCSC_TEST_DEBUG_INDEX;\
uint32_t CSC_MODE[MAX_OPP]; \
uint32_t CSC_C11_C12_A[MAX_OPP]; \
uint32_t CSC_C33_C34_A[MAX_OPP]; \
@@ -134,6 +140,7 @@
SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\
SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\
SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\
+ SF(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_INDEX, mask_sh),\
SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\
SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\
SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\
@@ -174,6 +181,19 @@
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh)
+/*
+ * DCN2 MPC_OCSC debug status register:
+ *
+ * Status index including current OCSC Mode is 1
+ * OCSC Mode: [1..0]
+ */
+#define MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX 1
+
+#define MPC_DEBUG_REG_LIST_SH_DCN20 \
+ .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0
+
+#define MPC_DEBUG_REG_LIST_MASK_DCN20 \
+ .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0x3
#define MPC_REG_FIELD_LIST_DCN2_0(type) \
MPC_REG_FIELD_LIST(type)\
@@ -182,6 +202,8 @@
type MPCC_TOP_GAIN;\
type MPCC_BOT_GAIN_INSIDE;\
type MPCC_BOT_GAIN_OUTSIDE;\
+ type MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE;\
+ type MPC_OCSC_TEST_DEBUG_INDEX;\
type MPC_OCSC_MODE;\
type MPC_OCSC_C11_A;\
type MPC_OCSC_C12_A;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index 40164ed015ea..023cc71fad0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -41,6 +41,7 @@
void opp2_set_disp_pattern_generator(
struct output_pixel_processor *opp,
enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
@@ -100,9 +101,22 @@ void opp2_set_disp_pattern_generator(
TEST_PATTERN_DYN_RANGE_CEA :
TEST_PATTERN_DYN_RANGE_VESA);
+ switch (color_space) {
+ case CONTROLLER_DP_COLOR_SPACE_YCBCR601:
+ mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR601;
+ break;
+ case CONTROLLER_DP_COLOR_SPACE_YCBCR709:
+ mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR709;
+ break;
+ case CONTROLLER_DP_COLOR_SPACE_RGB:
+ default:
+ mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
+ break;
+ }
+
REG_UPDATE_6(DPG_CONTROL,
DPG_EN, 1,
- DPG_MODE, TEST_PATTERN_MODE_COLORSQUARES_RGB,
+ DPG_MODE, mode,
DPG_DYNAMIC_RANGE, dyn_range,
DPG_BIT_DEPTH, bit_depth,
DPG_VRES, 6,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index abd8de9a78f8..4093bec172c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -140,6 +140,7 @@ void dcn20_opp_construct(struct dcn20_opp *oppn20,
void opp2_set_disp_pattern_generator(
struct output_pixel_processor *opp,
enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 3b613fb93ef8..d875b0c38fde 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -59,11 +59,16 @@ bool optc2_enable_crtc(struct timing_generator *optc)
REG_UPDATE(CONTROL,
VTG0_ENABLE, 1);
+ REG_SEQ_START();
+
/* Enable CRTC */
REG_UPDATE_2(OTG_CONTROL,
OTG_DISABLE_POINT_CNTL, 3,
OTG_MASTER_EN, 1);
+ REG_SEQ_SUBMIT();
+ REG_SEQ_WAIT_DONE();
+
return true;
}
@@ -167,7 +172,6 @@ void optc2_set_gsl_source_select(
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* DSC encoder frame start controls: x = h position, line_num = # of lines from vstartup */
void optc2_set_dsc_encoder_frame_start(struct timing_generator *optc,
int x_position,
@@ -201,13 +205,12 @@ void optc2_set_dsc_config(struct timing_generator *optc,
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_DSC_SLICE_WIDTH, dsc_slice_width);
}
-#endif
-/**
- * PTI i think is already done somewhere else for 2ka
- * (opp?, please double check.
- * OPTC side only has 1 register to set for PTI_ENABLE)
- */
+/*TEMP: Need to figure out inheritance model here.*/
+bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
+{
+ return optc1_is_two_pixels_per_containter(timing);
+}
void optc2_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
@@ -221,7 +224,7 @@ void optc2_set_odm_bypass(struct timing_generator *optc,
OPTC_SEG1_SRC_SEL, 0xf);
REG_WRITE(OTG_H_TIMING_CNTL, 0);
- h_div_2 = optc1_is_two_pixels_per_containter(dc_crtc_timing);
+ h_div_2 = optc2_is_two_pixels_per_containter(dc_crtc_timing);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_BY2, h_div_2);
REG_SET(OPTC_MEMORY_CONFIG, 0,
@@ -233,12 +236,13 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
struct dc_crtc_timing *timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192 */
int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
/ opp_cnt;
- int memory_mask = mpcc_hactive <= 2560 ? 0x3 : 0xf;
+ uint32_t memory_mask;
uint32_t data_fmt = 0;
+ ASSERT(opp_cnt == 2);
+
/* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic
* REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1);
* Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start
@@ -246,9 +250,17 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
* MASTER_UPDATE_LOCK_DB_X, 160,
* MASTER_UPDATE_LOCK_DB_Y, 240);
*/
+
+ /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192,
+ * however, for ODM combine we can simplify by always using 4.
+ * To make sure there's no overlap, each instance "reserves" 2 memories and
+ * they are uniquely combined here.
+ */
+ memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2);
+
if (REG(OPTC_MEMORY_CONFIG))
REG_SET(OPTC_MEMORY_CONFIG, 0,
- OPTC_MEM_SEL, memory_mask << (optc->inst * 4));
+ OPTC_MEM_SEL, memory_mask);
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
data_fmt = 1;
@@ -257,7 +269,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
- ASSERT(opp_cnt == 2);
REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 1,
OPTC_SEG0_SRC_SEL, opp_id[0],
@@ -379,14 +390,8 @@ void optc2_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- REG_SET(OTG_MANUAL_FLOW_CONTROL, 0,
- MANUAL_FLOW_CONTROL, 1);
-
- REG_SET(OTG_GLOBAL_CONTROL2, 0,
- MANUAL_FLOW_CONTROL_SEL, optc->inst);
-
REG_SET_8(OTG_TRIGA_CNTL, 0,
- OTG_TRIGA_SOURCE_SELECT, 22,
+ OTG_TRIGA_SOURCE_SELECT, 21,
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
@@ -448,9 +453,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.setup_global_swap_lock = NULL,
.get_crc = optc1_get_crc,
.configure_crc = optc1_configure_crc,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.set_dsc_config = optc2_set_dsc_config,
-#endif
.set_dwb_source = optc2_set_dwb_source,
.set_odm_bypass = optc2_set_odm_bypass,
.set_odm_combine = optc2_set_odm_combine,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
index 32a58431fd09..239cc40ae474 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
@@ -86,12 +86,10 @@ void optc2_set_gsl_source_select(struct timing_generator *optc,
int group_idx,
uint32_t gsl_ready_signal);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void optc2_set_dsc_config(struct timing_generator *optc,
enum optc_dsc_mode dsc_mode,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width);
-#endif
void optc2_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
@@ -108,6 +106,7 @@ void optc2_triplebuffer_lock(struct timing_generator *optc);
void optc2_triplebuffer_unlock(struct timing_generator *optc);
void optc2_lock_doublebuffer_disable(struct timing_generator *optc);
void optc2_lock_doublebuffer_enable(struct timing_generator *optc);
+void optc2_setup_manual_trigger(struct timing_generator *optc);
void optc2_program_manual_trigger(struct timing_generator *optc);
-
+bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
#endif /* __DC_OPTC_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 23ff2f1c75b5..85f90f3e24cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1,5 +1,6 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -28,6 +29,8 @@
#include "dm_services.h"
#include "dc.h"
+#include "dcn20_init.h"
+
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn20/dcn20_resource.h"
@@ -45,9 +48,7 @@
#include "dcn10/dcn10_resource.h"
#include "dcn20_opp.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "dcn20_dsc.h"
-#endif
#include "dcn20_link_encoder.h"
#include "dcn20_stream_encoder.h"
@@ -59,11 +60,14 @@
#include "dml/display_mode_vba.h"
#include "dcn20_dccg.h"
#include "dcn20_vmid.h"
+#include "dc_link_ddc.h"
#include "navi10_ip_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_sh_mask.h"
+#include "dpcs/dpcs_2_0_0_offset.h"
+#include "dpcs/dpcs_2_0_0_sh_mask.h"
#include "nbio/nbio_2_3_offset.h"
@@ -82,8 +86,6 @@
#include "amdgpu_socbb.h"
-/* NV12 SOC BB is currently in FW, mark SW bounding box invalid. */
-#define SOC_BOUNDING_BOX_VALID false
#define DC_LOGGER_INIT(logger)
struct _vcs_dpi_ip_params_st dcn2_0_ip = {
@@ -94,11 +96,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 0,
.pte_group_size_bytes = 2048,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 6,
-#else
- .num_dsc = 0,
-#endif
.rob_buffer_size_kbytes = 168,
.det_buffer_size_kbytes = 164,
.dpte_buffer_size_in_pte_reqs_luma = 84,
@@ -553,6 +551,7 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
[id] = {\
LE_DCN10_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
+ DPCS_DCN2_REG_LIST(id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
@@ -566,11 +565,13 @@ static const struct dcn10_link_enc_registers link_enc_regs[] = {
};
static const struct dcn10_link_enc_shift le_shift = {
- LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\
+ DPCS_DCN2_MASK_SH_LIST(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
- LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\
+ DPCS_DCN2_MASK_SH_LIST(_MASK)
};
#define ipp_regs(id)\
@@ -637,6 +638,7 @@ static const struct dce110_aux_registers aux_engine_regs[] = {
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN20(id),\
+ TF_REG_LIST_DCN20_COMMON_APPEND(id),\
}
static const struct dcn2_dpp_registers tf_regs[] = {
@@ -650,12 +652,12 @@ static const struct dcn2_dpp_registers tf_regs[] = {
static const struct dcn2_dpp_shift tf_shift = {
TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
- TF_DEBUG_REG_LIST_SH_DCN10
+ TF_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn2_dpp_mask tf_mask = {
TF_REG_LIST_SH_MASK_DCN20(_MASK),
- TF_DEBUG_REG_LIST_MASK_DCN10
+ TF_DEBUG_REG_LIST_MASK_DCN20
};
#define dwbc_regs_dcn2(id)\
@@ -705,14 +707,17 @@ static const struct dcn20_mpc_registers mpc_regs = {
MPC_OUT_MUX_REG_LIST_DCN2_0(3),
MPC_OUT_MUX_REG_LIST_DCN2_0(4),
MPC_OUT_MUX_REG_LIST_DCN2_0(5),
+ MPC_DBG_REG_LIST_DCN2_0()
};
static const struct dcn20_mpc_shift mpc_shift = {
- MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT),
+ MPC_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn20_mpc_mask mpc_mask = {
- MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK),
+ MPC_DEBUG_REG_LIST_MASK_DCN20
};
#define tg_regs(id)\
@@ -838,7 +843,6 @@ static int map_transmitter_id_to_phy_instance(
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#define dsc_regsDCN20(id)\
[id] = {\
DSC_REG_LIST_DCN20(id)\
@@ -860,7 +864,6 @@ static const struct dcn20_dsc_shift dsc_shift = {
static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
-#endif
static const struct dccg_registers dccg_regs = {
DCCG_REG_LIST_DCN2()
@@ -884,9 +887,7 @@ static const struct resource_caps res_cap_nv10 = {
.num_dwb = 1,
.num_ddc = 6,
.num_vmid = 16,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 6,
-#endif
};
static const struct dc_plane_cap plane_cap = {
@@ -923,9 +924,7 @@ static const struct resource_caps res_cap_nv14 = {
.num_dwb = 1,
.num_ddc = 5,
.num_vmid = 16,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 5,
-#endif
};
static const struct dc_debug_options debug_defaults_drv = {
@@ -1284,7 +1283,6 @@ void dcn20_clock_source_destroy(struct clock_source **clk_src)
*clk_src = NULL;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst)
@@ -1307,9 +1305,8 @@ void dcn20_dsc_destroy(struct display_stream_compressor **dsc)
*dsc = NULL;
}
-#endif
-static void destruct(struct dcn20_resource_pool *pool)
+static void dcn20_resource_destruct(struct dcn20_resource_pool *pool)
{
unsigned int i;
@@ -1320,12 +1317,10 @@ static void destruct(struct dcn20_resource_pool *pool)
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
if (pool->base.dscs[i] != NULL)
dcn20_dsc_destroy(&pool->base.dscs[i]);
}
-#endif
if (pool->base.mpc != NULL) {
kfree(TO_DCN20_MPC(pool->base.mpc));
@@ -1418,6 +1413,8 @@ static void destruct(struct dcn20_resource_pool *pool)
if (pool->base.pp_smu != NULL)
dcn20_pp_smu_destroy(&pool->base.pp_smu);
+ if (pool->base.oem_device != NULL)
+ dal_ddc_service_destroy(&pool->base.oem_device);
}
struct hubp *dcn20_hubp_create(
@@ -1468,7 +1465,7 @@ static void get_pixel_clock_parameters(
if (opp_cnt == 4)
pixel_clk_params->requested_pix_clk_100hz /= 4;
- else if (optc1_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2)
+ else if (optc2_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2)
pixel_clk_params->requested_pix_clk_100hz /= 2;
if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
@@ -1534,7 +1531,6 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
return status;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
static void acquire_dsc(struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -1575,11 +1571,9 @@ static void release_dsc(struct resource_context *res_ctx,
}
}
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
+enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
struct dc_state *dc_ctx,
struct dc_stream_state *dc_stream)
{
@@ -1594,11 +1588,13 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
if (pipe_ctx->stream != dc_stream)
continue;
+ if (pipe_ctx->stream_res.dsc)
+ continue;
+
acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
/* The number of DSCs can be less than the number of pipes */
if (!pipe_ctx->stream_res.dsc) {
- dm_output_to_console("No DSCs available\n");
result = DC_NO_DSC_RESOURCE;
}
@@ -1630,7 +1626,6 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
else
return DC_OK;
}
-#endif
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
@@ -1642,11 +1637,9 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx,
if (result == DC_OK)
result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Get a DSC if required and available */
if (result == DC_OK && dc_stream->timing.flags.DSC)
- result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
-#endif
+ result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
if (result == DC_OK)
result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream);
@@ -1659,9 +1652,7 @@ enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_
{
enum dc_status result = DC_OK;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream);
-#endif
return result;
}
@@ -1744,9 +1735,7 @@ bool dcn20_split_stream_for_odm(
next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx];
next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx];
next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
next_odm_pipe->stream_res.dsc = NULL;
-#endif
if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) {
next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;
next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe;
@@ -1792,14 +1781,12 @@ bool dcn20_split_stream_for_odm(
sd->recout.x = 0;
}
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (next_odm_pipe->stream->timing.flags.DSC == 1) {
acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
ASSERT(next_odm_pipe->stream_res.dsc);
if (next_odm_pipe->stream_res.dsc == NULL)
return false;
}
-#endif
return true;
}
@@ -1823,9 +1810,7 @@ void dcn20_split_stream_for_mpc(
secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
secondary_pipe->stream_res.dsc = NULL;
-#endif
if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) {
ASSERT(!secondary_pipe->bottom_pipe);
secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
@@ -1876,11 +1861,28 @@ void dcn20_populate_dml_writeback_from_context(
}
+static int get_num_odm_heads(struct pipe_ctx *pipe)
+{
+ int odm_head_count = 0;
+ struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
+ while (next_pipe) {
+ odm_head_count++;
+ next_pipe = next_pipe->next_odm_pipe;
+ }
+ pipe = pipe->prev_odm_pipe;
+ while (pipe) {
+ odm_head_count++;
+ pipe = pipe->prev_odm_pipe;
+ }
+ return odm_head_count ? odm_head_count + 1 : 0;
+}
+
int dcn20_populate_dml_pipes_from_context(
- struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
+ struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes)
{
int pipe_cnt, i;
bool synchronized_vblank = true;
+ struct resource_context *res_ctx = &context->res_ctx;
for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
@@ -1900,25 +1902,30 @@ int dcn20_populate_dml_pipes_from_context(
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
+ unsigned int v_total;
+ unsigned int front_porch;
int output_bpc;
if (!res_ctx->pipe_ctx[i].stream)
continue;
+
+ v_total = timing->v_total;
+ front_porch = timing->v_front_porch;
/* todo:
pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
pipes[pipe_cnt].pipe.src.dcc = 0;
pipes[pipe_cnt].pipe.src.vm = 0;*/
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
+
pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
/* todo: rotation?*/
pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
-#endif
if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
/* 1/2 vblank */
pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
- (timing->v_total - timing->v_addressable
+ (v_total - timing->v_addressable
- timing->v_border_top - timing->v_border_bottom) / 2;
/* 36 bytes dp, 32 hdmi */
pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
@@ -1932,13 +1939,13 @@ int dcn20_populate_dml_pipes_from_context(
- timing->h_addressable
- timing->h_border_left
- timing->h_border_right;
- pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch;
+ pipes[pipe_cnt].pipe.dest.vblank_start = v_total - front_porch;
pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start
- timing->v_addressable
- timing->v_border_top
- timing->v_border_bottom;
pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
- pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
+ pipes[pipe_cnt].pipe.dest.vtotal = v_total;
pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
@@ -1949,8 +1956,13 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dp_lanes = 4;
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
- pipes[pipe_cnt].pipe.dest.odm_combine = res_ctx->pipe_ctx[i].prev_odm_pipe
- || res_ctx->pipe_ctx[i].next_odm_pipe;
+ switch (get_num_odm_heads(&res_ctx->pipe_ctx[i])) {
+ case 2:
+ pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1;
+ break;
+ default:
+ pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_disabled;
+ }
pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
== res_ctx->pipe_ctx[i].plane_state)
@@ -2001,14 +2013,12 @@ int dcn20_populate_dml_pipes_from_context(
case COLOR_DEPTH_161616:
output_bpc = 16;
break;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
case COLOR_DEPTH_999:
output_bpc = 9;
break;
case COLOR_DEPTH_111111:
output_bpc = 11;
break;
-#endif
default:
output_bpc = 8;
break;
@@ -2036,10 +2046,8 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
-#endif
/* todo: default max for now, until there is logic reflecting this in dc*/
pipes[pipe_cnt].dout.output_bpc = 12;
@@ -2063,6 +2071,10 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable;
if (pipes[pipe_cnt].pipe.src.viewport_height > 1080)
pipes[pipe_cnt].pipe.src.viewport_height = 1080;
+ pipes[pipe_cnt].pipe.src.surface_height_y = pipes[pipe_cnt].pipe.src.viewport_height;
+ pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width;
+ pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height;
+ pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width;
pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
@@ -2077,8 +2089,8 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
pipes[pipe_cnt].pipe.src.is_hsplit = 0;
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
- pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total;
- pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total;
+ pipes[pipe_cnt].pipe.dest.vtotal_min = v_total;
+ pipes[pipe_cnt].pipe.dest.vtotal_max = v_total;
} else {
struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
@@ -2096,6 +2108,10 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
+ pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width;
+ pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
+ pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
+ pipes[pipe_cnt].pipe.src.surface_height_c = pln->plane_size.chroma_size.height;
if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch;
pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
@@ -2261,7 +2277,6 @@ void dcn20_set_mcif_arb_params(
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
{
int i;
@@ -2295,7 +2310,6 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
}
return true;
}
-#endif
struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
struct resource_context *res_ctx,
@@ -2398,10 +2412,8 @@ void dcn20_merge_pipes_for_validate(
odm_pipe->bottom_pipe = NULL;
odm_pipe->prev_odm_pipe = NULL;
odm_pipe->next_odm_pipe = NULL;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (odm_pipe->stream_res.dsc)
release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
-#endif
/* Clear plane_res and stream_res */
memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
@@ -2513,7 +2525,7 @@ int dcn20_validate_apply_pipe_split_flags(
split[i] = true;
if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
split[i] = true;
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1;
}
context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
@@ -2544,7 +2556,7 @@ bool dcn20_fast_validate_bw(
dcn20_merge_pipes_for_validate(dc, context);
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes);
*pipe_cnt_out = pipe_cnt;
@@ -2621,14 +2633,12 @@ bool dcn20_fast_validate_bw(
ASSERT(0);
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Actual dsc count per stream dsc validation*/
if (!dcn20_validate_dsc(dc, context)) {
context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
DML_FAIL_DSC_VALIDATION_FAILURE;
goto validate_fail;
}
-#endif
*vlevel_out = vlevel;
@@ -2692,10 +2702,10 @@ static void dcn20_calculate_wm(
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- &context->res_ctx, pipes);
+ context, pipes);
else
pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
- &context->res_ctx, pipes);
+ context, pipes);
}
*out_pipe_cnt = pipe_cnt;
@@ -2715,11 +2725,9 @@ static void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
if (vlevel < 2) {
pipes[0].clks_cfg.voltage = 2;
@@ -2731,10 +2739,8 @@ static void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
if (vlevel < 3) {
pipes[0].clks_cfg.voltage = 3;
@@ -2746,10 +2752,8 @@ static void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
@@ -2759,10 +2763,8 @@ static void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
}
void dcn20_calculate_dlg_params(
@@ -2928,11 +2930,19 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool voltage_supported = false;
bool full_pstate_supported = false;
bool dummy_pstate_supported = false;
- double p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
+ double p_state_latency_us;
- if (fast_validate)
- return dcn20_validate_bandwidth_internal(dc, context, true);
+ DC_FP_START();
+ p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
+ context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
+ dc->debug.disable_dram_clock_change_vactive_support;
+ if (fast_validate) {
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true);
+
+ DC_FP_END();
+ return voltage_supported;
+ }
// Best case, we support full UCLK switch latency
voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
@@ -2940,7 +2950,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
(voltage_supported && full_pstate_supported)) {
- context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = full_pstate_supported;
goto restore_dml_state;
}
@@ -2961,6 +2971,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
restore_dml_state:
context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
+ DC_FP_END();
return voltage_supported;
}
@@ -3005,7 +3016,7 @@ static void dcn20_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool);
- destruct(dcn20_pool);
+ dcn20_resource_destruct(dcn20_pool);
kfree(dcn20_pool);
*pool = NULL;
}
@@ -3252,7 +3263,6 @@ void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
{
- kernel_fpu_begin();
if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
&& dc->bb_overrides.sr_exit_time_ns) {
bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
@@ -3276,7 +3286,6 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
bb->dram_clock_change_latency_us =
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
- kernel_fpu_end();
}
static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
@@ -3318,12 +3327,13 @@ static bool init_soc_bounding_box(struct dc *dc,
DC_LOGGER_INIT(dc->ctx->logger);
- if (!bb && !SOC_BOUNDING_BOX_VALID) {
+ /* TODO: upstream NV12 bounding box when its launched */
+ if (!bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
return false;
}
- if (bb && !SOC_BOUNDING_BOX_VALID) {
+ if (bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
int i;
dcn2_0_nv12_soc.sr_exit_time_us =
@@ -3465,7 +3475,7 @@ static bool init_soc_bounding_box(struct dc *dc,
return true;
}
-static bool construct(
+static bool dcn20_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dcn20_resource_pool *pool)
@@ -3473,6 +3483,7 @@ static bool construct(
int i;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
+ struct ddc_service_init_data ddc_init_data;
struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev);
struct _vcs_dpi_ip_params_st *loaded_ip =
@@ -3480,6 +3491,8 @@ static bool construct(
enum dml_project dml_project_version =
get_dml_project_version(ctx->asic_id.hw_internal_rev);
+ DC_FP_START();
+
ctx->dc_bios->regs = &bios_regs;
pool->base.funcs = &dcn20_res_pool_funcs;
@@ -3732,7 +3745,6 @@ static bool construct(
goto create_fail;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
pool->base.dscs[i] = dcn20_dsc_create(ctx, i);
if (pool->base.dscs[i] == NULL) {
@@ -3741,7 +3753,6 @@ static bool construct(
goto create_fail;
}
}
-#endif
if (!dcn20_dwbc_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
@@ -3768,11 +3779,24 @@ static bool construct(
dc->cap_funcs = cap_funcs;
+ if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
+ ddc_init_data.ctx = dc->ctx;
+ ddc_init_data.link = NULL;
+ ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
+ ddc_init_data.id.enum_id = 0;
+ ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
+ pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
+ } else {
+ pool->base.oem_device = NULL;
+ }
+
+ DC_FP_END();
return true;
create_fail:
- destruct(pool);
+ DC_FP_END();
+ dcn20_resource_destruct(pool);
return false;
}
@@ -3787,7 +3811,7 @@ struct resource_pool *dcn20_create_resource_pool(
if (!pool)
return NULL;
- if (construct(init_data->num_virtual_links, dc, pool))
+ if (dcn20_resource_construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index fef473d68a4a..f5893840b79b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -50,7 +50,7 @@ unsigned int dcn20_calc_max_scaled_time(
enum mmhubbub_wbif_mode mode,
unsigned int urgent_watermark);
int dcn20_populate_dml_pipes_from_context(
- struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+ struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
struct dc_state *state,
const struct resource_pool *pool,
@@ -127,9 +127,7 @@ int dcn20_validate_apply_pipe_split_flags(
struct dc_state *context,
int vlevel,
bool *split);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
-#endif
void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -159,6 +157,7 @@ void dcn20_calculate_dlg_params(
enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream);
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
+enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index fcb3877b4fcb..9b70a1e7b962 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -205,7 +205,6 @@ static void enc2_stream_encoder_stop_hdmi_info_packets(
HDMI_GENERIC7_LINE, 0);
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Update GSP7 SDP 128 byte long */
static void enc2_update_gsp7_128_info_packet(
@@ -360,7 +359,6 @@ static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s)
REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable);
}
}
-#endif
/* Set Dynamic Metadata-configuration.
* enable_dme: TRUE: enables Dynamic Metadata Enfine, FALSE: disables DME
@@ -440,10 +438,8 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
{
bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
&& !timing->dsc_cfg.ycbcr422_simple);
-#endif
return two_pix;
}
@@ -541,11 +537,16 @@ void enc2_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- enc1_stream_encoder_dp_set_stream_attribute(enc, crtc_timing, output_color_space, enable_sdp_splitting);
+ enc1_stream_encoder_dp_set_stream_attribute(enc,
+ crtc_timing,
+ output_color_space,
+ use_vsc_sdp_for_colorimetry,
+ enable_sdp_splitting);
REG_UPDATE(DP_SEC_FRAMING4,
DP_SST_SDP_SPLITTING, enable_sdp_splitting);
@@ -568,6 +569,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
enc2_stream_encoder_stop_hdmi_info_packets,
.update_dp_info_packets =
enc2_stream_encoder_update_dp_info_packets,
+ .send_immediate_sdp_message =
+ enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
.dp_blank =
@@ -590,11 +593,9 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
.dp_get_pixel_format =
enc1_stream_encoder_dp_get_pixel_format,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.enc_read_state = enc2_read_state,
.dp_set_dsc_config = enc2_dp_set_dsc_config,
.dp_set_dsc_pps_info_packet = enc2_dp_set_dsc_pps_info_packet,
-#endif
.set_dynamic_metadata = enc2_set_dynamic_metadata,
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
index 3f94a9f13c4a..d2a805bd4573 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
@@ -98,6 +98,7 @@ void enc2_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting);
void enc2_stream_encoder_dp_unblank(