aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/amdgpu_dm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c260
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c231
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h67
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c24
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c51
7 files changed, 514 insertions, 140 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 760af668f678..f4e0f27a76de 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -688,12 +688,15 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
*/
if (adev->flags & AMD_IS_APU &&
adev->asic_type >= CHIP_CARRIZO &&
- adev->asic_type < CHIP_RAVEN)
+ adev->asic_type <= CHIP_RAVEN)
init_data.flags.gpu_vm_support = true;
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
init_data.flags.fbc_support = true;
+ if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
+ init_data.flags.multi_mon_pp_mclk_switch = true;
+
init_data.flags.power_down_display_on_boot = true;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
@@ -809,6 +812,9 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ case CHIP_RENOIR:
return 0;
case CHIP_RAVEN:
if (ASICREV_IS_PICASSO(adev->external_rev_id))
@@ -2107,6 +2113,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
}
static const struct backlight_ops amdgpu_dm_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
.get_brightness = amdgpu_dm_backlight_get_brightness,
.update_status = amdgpu_dm_backlight_update_status,
};
@@ -2358,7 +2365,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN:
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ case CHIP_NAVI12:
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ case CHIP_RENOIR:
#endif
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
@@ -2373,6 +2385,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+ if (adev->asic_type == CHIP_RENOIR)
+ dm->dc->debug.disable_stutter = true;
return 0;
fail:
@@ -2428,8 +2442,7 @@ static ssize_t s3_debug_store(struct device *device,
{
int ret;
int s3_state;
- struct pci_dev *pdev = to_pci_dev(device);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(device);
struct amdgpu_device *adev = drm_dev->dev_private;
ret = kstrtoint(buf, 0, &s3_state);
@@ -2515,10 +2528,23 @@ static int dm_early_init(void *handle)
#endif
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case CHIP_NAVI10:
+ case CHIP_NAVI12:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
+ case CHIP_NAVI14:
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ break;
+#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ case CHIP_RENOIR:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 4;
+ adev->mode_info.num_dig = 4;
+ break;
#endif
default:
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
@@ -2665,7 +2691,7 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
- const union plane_size *plane_size,
+ const struct plane_size *plane_size,
const union dc_tiling_info *tiling_info,
const uint64_t info,
struct dc_plane_dcc_param *dcc,
@@ -2691,8 +2717,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
return -EINVAL;
input.format = format;
- input.surface_size.width = plane_size->grph.surface_size.width;
- input.surface_size.height = plane_size->grph.surface_size.height;
+ input.surface_size.width = plane_size->surface_size.width;
+ input.surface_size.height = plane_size->surface_size.height;
input.swizzle_mode = tiling_info->gfx9.swizzle;
if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
@@ -2710,9 +2736,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
return -EINVAL;
dcc->enable = 1;
- dcc->grph.meta_pitch =
+ dcc->meta_pitch =
AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
- dcc->grph.independent_64b_blks = i64b;
+ dcc->independent_64b_blks = i64b;
dcc_address = get_dcc_address(afb->address, info);
address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
@@ -2728,7 +2754,7 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
const enum dc_rotation_angle rotation,
const uint64_t tiling_flags,
union dc_tiling_info *tiling_info,
- union plane_size *plane_size,
+ struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address)
{
@@ -2741,11 +2767,11 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
memset(address, 0, sizeof(*address));
if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
- plane_size->grph.surface_size.x = 0;
- plane_size->grph.surface_size.y = 0;
- plane_size->grph.surface_size.width = fb->width;
- plane_size->grph.surface_size.height = fb->height;
- plane_size->grph.surface_pitch =
+ plane_size->surface_size.x = 0;
+ plane_size->surface_size.y = 0;
+ plane_size->surface_size.width = fb->width;
+ plane_size->surface_size.height = fb->height;
+ plane_size->surface_pitch =
fb->pitches[0] / fb->format->cpp[0];
address->type = PLN_ADDR_TYPE_GRAPHICS;
@@ -2754,20 +2780,20 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
uint64_t chroma_addr = afb->address + fb->offsets[1];
- plane_size->video.luma_size.x = 0;
- plane_size->video.luma_size.y = 0;
- plane_size->video.luma_size.width = fb->width;
- plane_size->video.luma_size.height = fb->height;
- plane_size->video.luma_pitch =
+ plane_size->surface_size.x = 0;
+ plane_size->surface_size.y = 0;
+ plane_size->surface_size.width = fb->width;
+ plane_size->surface_size.height = fb->height;
+ plane_size->surface_pitch =
fb->pitches[0] / fb->format->cpp[0];
- plane_size->video.chroma_size.x = 0;
- plane_size->video.chroma_size.y = 0;
+ plane_size->chroma_size.x = 0;
+ plane_size->chroma_size.y = 0;
/* TODO: set these based on surface format */
- plane_size->video.chroma_size.width = fb->width / 2;
- plane_size->video.chroma_size.height = fb->height / 2;
+ plane_size->chroma_size.width = fb->width / 2;
+ plane_size->chroma_size.height = fb->height / 2;
- plane_size->video.chroma_pitch =
+ plane_size->chroma_pitch =
fb->pitches[1] / fb->format->cpp[1];
address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
@@ -2814,6 +2840,11 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
adev->asic_type == CHIP_VEGA20 ||
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
adev->asic_type == CHIP_NAVI10 ||
+ adev->asic_type == CHIP_NAVI14 ||
+ adev->asic_type == CHIP_NAVI12 ||
+#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ adev->asic_type == CHIP_RENOIR ||
#endif
adev->asic_type == CHIP_RAVEN) {
/* Fill GFX9 params */
@@ -2995,6 +3026,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->visible = true;
plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
+ plane_info->layer_index = 0;
+
ret = fill_plane_color_attributes(plane_state, plane_info->format,
&plane_info->color_space);
if (ret)
@@ -3060,6 +3093,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->global_alpha = plane_info.global_alpha;
dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
dc_plane_state->dcc = plane_info.dcc;
+ dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
/*
* Always set input transfer function, since plane state is refreshed
@@ -3131,13 +3165,25 @@ static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector *connector,
const struct drm_connector_state *state)
{
- uint32_t bpc = connector->display_info.bpc;
+ uint8_t bpc = (uint8_t)connector->display_info.bpc;
+
+ /* Assume 8 bpc by default if no bpc is specified. */
+ bpc = bpc ? bpc : 8;
if (!state)
state = connector->state;
if (state) {
- bpc = state->max_bpc;
+ /*
+ * Cap display bpc based on the user requested value.
+ *
+ * The value for state->max_bpc may not correctly updated
+ * depending on when the connector gets added to the state
+ * or if this was called outside of atomic check, so it
+ * can't be used directly.
+ */
+ bpc = min(bpc, state->max_requested_bpc);
+
/* Round down to the nearest even number. */
bpc = bpc - (bpc & 1);
}
@@ -3491,6 +3537,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
int mode_refresh;
int preferred_refresh = 0;
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ struct dsc_dec_dpcd_caps dsc_caps;
+ uint32_t link_bandwidth_kbps;
+#endif
struct dc_sink *sink = NULL;
if (aconnector == NULL) {
@@ -3563,17 +3613,23 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
&mode, &aconnector->base, con_state, old_stream);
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- /* stream->timing.flags.DSC = 0; */
- /* */
- /* if (aconnector->dc_link && */
- /* aconnector->dc_link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT #<{(|&& */
- /* aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.is_dsc_supported|)}>#) */
- /* if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc, */
- /* &aconnector->dc_link->dpcd_caps.dsc_caps, */
- /* dc_link_bandwidth_kbps(aconnector->dc_link, dc_link_get_link_cap(aconnector->dc_link)), */
- /* &stream->timing, */
- /* &stream->timing.dsc_cfg)) */
- /* stream->timing.flags.DSC = 1; */
+ stream->timing.flags.DSC = 0;
+
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
+ &dsc_caps);
+ link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+ dc_link_get_link_cap(aconnector->dc_link));
+
+ if (dsc_caps.is_dsc_supported)
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc,
+ &dsc_caps,
+ link_bandwidth_kbps,
+ &stream->timing,
+ &stream->timing.dsc_cfg))
+ stream->timing.flags.DSC = 1;
+ }
#endif
update_stream_scaling_settings(&mode, dm_state, stream);
@@ -3657,7 +3713,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->abm_level = cur->abm_level;
state->vrr_supported = cur->vrr_supported;
state->freesync_config = cur->freesync_config;
- state->crc_enabled = cur->crc_enabled;
+ state->crc_src = cur->crc_src;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
@@ -3727,6 +3783,7 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.atomic_destroy_state = dm_crtc_destroy_state,
.set_crc_source = amdgpu_dm_crtc_set_crc_source,
.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
+ .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
};
@@ -4446,7 +4503,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
}
if (plane->type != DRM_PLANE_TYPE_CURSOR)
- domain = amdgpu_display_supported_domains(adev);
+ domain = amdgpu_display_supported_domains(adev, rbo->flags);
else
domain = AMDGPU_GEM_DOMAIN_VRAM;
@@ -4536,20 +4593,10 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
static int dm_plane_atomic_async_check(struct drm_plane *plane,
struct drm_plane_state *new_plane_state)
{
- struct drm_plane_state *old_plane_state =
- drm_atomic_get_old_plane_state(new_plane_state->state, plane);
-
/* Only support async updates on cursor planes. */
if (plane->type != DRM_PLANE_TYPE_CURSOR)
return -EINVAL;
- /*
- * DRM calls prepare_fb and cleanup_fb on new_plane_state for
- * async commits so don't allow fb changes.
- */
- if (old_plane_state->fb != new_plane_state->fb)
- return -EINVAL;
-
return 0;
}
@@ -5703,7 +5750,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
false,
msecs_to_jiffies(5000));
if (unlikely(r <= 0))
- DRM_ERROR("Waiting for fences timed out or interrupted!");
+ DRM_ERROR("Waiting for fences timed out!");
/*
* TODO This might fail and hence better not used, wait
@@ -5727,8 +5774,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
+ /*
+ * Only allow immediate flips for fast updates that don't
+ * change FB pitch, DCC state, rotation or mirroing.
+ */
bundle->flip_addrs[planes_count].flip_immediate =
- (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
+ crtc->state->async_flip &&
+ acrtc_state->update_type == UPDATE_TYPE_FAST;
timestamp_ns = ktime_get_ns();
bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
@@ -5973,6 +6025,7 @@ static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
+ enum amdgpu_dm_pipe_crc_source source;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
@@ -5998,9 +6051,11 @@ static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
#ifdef CONFIG_DEBUG_FS
/* The stream has changed so CRC capture needs to re-enabled. */
- if (dm_new_crtc_state->crc_enabled) {
- dm_new_crtc_state->crc_enabled = false;
- amdgpu_dm_crtc_set_crc_source(crtc, "auto");
+ source = dm_new_crtc_state->crc_src;
+ if (amdgpu_dm_is_valid_crc_source(source)) {
+ amdgpu_dm_crtc_configure_crc_source(
+ crtc, dm_new_crtc_state,
+ dm_new_crtc_state->crc_src);
}
#endif
}
@@ -6051,23 +6106,8 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
if (dm_old_crtc_state->interrupts_enabled &&
(!dm_new_crtc_state->interrupts_enabled ||
- drm_atomic_crtc_needs_modeset(new_crtc_state))) {
- /*
- * Drop the extra vblank reference added by CRC
- * capture if applicable.
- */
- if (dm_new_crtc_state->crc_enabled)
- drm_crtc_vblank_put(crtc);
-
- /*
- * Only keep CRC capture enabled if there's
- * still a stream for the CRTC.
- */
- if (!dm_new_crtc_state->stream)
- dm_new_crtc_state->crc_enabled = false;
-
+ drm_atomic_crtc_needs_modeset(new_crtc_state)))
manage_dm_interrupts(adev, acrtc, false);
- }
}
/*
* Add check here for SoC's that support hardware cursor plane, to
@@ -6316,7 +6356,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
amdgpu_dm_enable_crtc_interrupts(dev, state, true);
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
- if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ if (new_crtc_state->async_flip)
wait_for_vblank = false;
/* update planes when needed per crtc*/
@@ -7039,6 +7079,12 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
continue;
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
+ const struct amdgpu_framebuffer *amdgpu_fb =
+ to_amdgpu_framebuffer(new_plane_state->fb);
+ struct dc_plane_info plane_info;
+ struct dc_flip_addrs flip_addr;
+ uint64_t tiling_flags;
+
new_plane_crtc = new_plane_state->crtc;
old_plane_crtc = old_plane_state->crtc;
new_dm_plane_state = to_dm_plane_state(new_plane_state);
@@ -7082,6 +7128,24 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
updates[num_plane].scaling_info = &scaling_info;
+ if (amdgpu_fb) {
+ ret = get_fb_info(amdgpu_fb, &tiling_flags);
+ if (ret)
+ goto cleanup;
+
+ memset(&flip_addr, 0, sizeof(flip_addr));
+
+ ret = fill_dc_plane_info_and_addr(
+ dm->adev, new_plane_state, tiling_flags,
+ &plane_info,
+ &flip_addr.address);
+ if (ret)
+ goto cleanup;
+
+ updates[num_plane].plane_info = &plane_info;
+ updates[num_plane].flip_addr = &flip_addr;
+ }
+
num_plane++;
}
@@ -7278,6 +7342,26 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ if (state->legacy_cursor_update) {
+ /*
+ * This is a fast cursor update coming from the plane update
+ * helper, check if it can be done asynchronously for better
+ * performance.
+ */
+ state->async_update =
+ !drm_atomic_helper_async_check(dev, state);
+
+ /*
+ * Skip the remaining global validation if this is an async
+ * update. Cursor updates can be done without affecting
+ * state or bandwidth calcs and this avoids the performance
+ * penalty of locking the private state object and
+ * allocating a new dc_state.
+ */
+ if (state->async_update)
+ return 0;
+ }
+
/* Check scaling and underscan changes*/
/* TODO Removed scaling changes validation due to inability to commit
* new stream into context w\o causing full reset. Need to
@@ -7330,13 +7414,37 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
ret = -EINVAL;
goto fail;
}
- } else if (state->legacy_cursor_update) {
+ } else {
/*
- * This is a fast cursor update coming from the plane update
- * helper, check if it can be done asynchronously for better
- * performance.
+ * The commit is a fast update. Fast updates shouldn't change
+ * the DC context, affect global validation, and can have their
+ * commit work done in parallel with other commits not touching
+ * the same resource. If we have a new DC context as part of
+ * the DM atomic state from validation we need to free it and
+ * retain the existing one instead.
*/
- state->async_update = !drm_atomic_helper_async_check(dev, state);
+ struct dm_atomic_state *new_dm_state, *old_dm_state;
+
+ new_dm_state = dm_atomic_get_new_state(state);
+ old_dm_state = dm_atomic_get_old_state(state);
+
+ if (new_dm_state && old_dm_state) {
+ if (new_dm_state->context)
+ dc_release_state(new_dm_state->context);
+
+ new_dm_state->context = old_dm_state->context;
+
+ if (old_dm_state->context)
+ dc_retain_state(old_dm_state->context);
+ }
+ }
+
+ /* Store the overall update type for use later in atomic check. */
+ for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
+ struct dm_crtc_state *dm_new_crtc_state =
+ to_dm_crtc_state(new_crtc_state);
+
+ dm_new_crtc_state->update_type = (int)overall_update_type;
}
/* Must be success */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index b89cbbfcc0e9..c8c525a2b505 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -50,6 +50,7 @@
#include "irq_types.h"
#include "signal_types.h"
+#include "amdgpu_dm_crc.h"
/* Forward declarations */
struct amdgpu_device;
@@ -309,11 +310,12 @@ struct dm_crtc_state {
bool cm_has_degamma;
bool cm_is_degamma_srgb;
+ int update_type;
int active_planes;
bool interrupts_enabled;
int crc_skip_count;
- bool crc_enabled;
+ enum amdgpu_dm_pipe_crc_source crc_src;
bool freesync_timing_changed;
bool freesync_vrr_info_changed;
@@ -380,19 +382,6 @@ void dm_restore_drm_connector_state(struct drm_device *dev,
void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct edid *edid);
-/* amdgpu_dm_crc.c */
-#ifdef CONFIG_DEBUG_FS
-int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name);
-int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc,
- const char *src_name,
- size_t *values_cnt);
-void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
-#else
-#define amdgpu_dm_crtc_set_crc_source NULL
-#define amdgpu_dm_crtc_verify_crc_source NULL
-#define amdgpu_dm_crtc_handle_crc_irq(x)
-#endif
-
#define MAX_COLOR_LUT_ENTRIES 4096
/* Legacy gamm LUT users such as X doesn't like large LUT sizes */
#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index bc67e6502733..a549c7c717dd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -30,23 +30,57 @@
#include "amdgpu_dm.h"
#include "dc.h"
-enum amdgpu_dm_pipe_crc_source {
- AMDGPU_DM_PIPE_CRC_SOURCE_NONE = 0,
- AMDGPU_DM_PIPE_CRC_SOURCE_AUTO,
- AMDGPU_DM_PIPE_CRC_SOURCE_MAX,
- AMDGPU_DM_PIPE_CRC_SOURCE_INVALID = -1,
+static const char *const pipe_crc_sources[] = {
+ "none",
+ "crtc",
+ "crtc dither",
+ "dprx",
+ "dprx dither",
+ "auto",
};
static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source)
{
if (!source || !strcmp(source, "none"))
return AMDGPU_DM_PIPE_CRC_SOURCE_NONE;
- if (!strcmp(source, "auto"))
- return AMDGPU_DM_PIPE_CRC_SOURCE_AUTO;
+ if (!strcmp(source, "auto") || !strcmp(source, "crtc"))
+ return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC;
+ if (!strcmp(source, "dprx"))
+ return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX;
+ if (!strcmp(source, "crtc dither"))
+ return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER;
+ if (!strcmp(source, "dprx dither"))
+ return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER;
return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID;
}
+static bool dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src)
+{
+ return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) ||
+ (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER);
+}
+
+static bool dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src)
+{
+ return (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) ||
+ (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER);
+}
+
+static bool dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src)
+{
+ return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER) ||
+ (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER) ||
+ (src == AMDGPU_DM_PIPE_CRC_SOURCE_NONE);
+}
+
+const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count)
+{
+ *count = ARRAY_SIZE(pipe_crc_sources);
+ return pipe_crc_sources;
+}
+
int
amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
size_t *values_cnt)
@@ -63,14 +97,52 @@ amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
return 0;
}
-int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
+int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
+ struct dm_crtc_state *dm_crtc_state,
+ enum amdgpu_dm_pipe_crc_source source)
{
struct amdgpu_device *adev = crtc->dev->dev_private;
- struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
- struct dc_stream_state *stream_state = crtc_state->stream;
- bool enable;
+ struct dc_stream_state *stream_state = dm_crtc_state->stream;
+ bool enable = amdgpu_dm_is_valid_crc_source(source);
+ int ret = 0;
+ /* Configuration will be deferred to stream enable. */
+ if (!stream_state)
+ return 0;
+
+ mutex_lock(&adev->dm.dc_lock);
+
+ /* Enable CRTC CRC generation if necessary. */
+ if (dm_is_crc_source_crtc(source)) {
+ if (!dc_stream_configure_crc(stream_state->ctx->dc,
+ stream_state, enable, enable)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ }
+
+ /* Configure dithering */
+ if (!dm_need_crc_dither(source))
+ dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
+ else
+ dc_stream_set_dither_option(stream_state,
+ DITHER_OPTION_DEFAULT);
+
+unlock:
+ mutex_unlock(&adev->dm.dc_lock);
+
+ return ret;
+}
+
+int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
+{
enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
+ struct drm_crtc_commit *commit;
+ struct dm_crtc_state *crtc_state;
+ struct drm_dp_aux *aux = NULL;
+ bool enable = false;
+ bool enabled = false;
+ int ret = 0;
if (source < 0) {
DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
@@ -78,41 +150,124 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
return -EINVAL;
}
- if (!stream_state) {
- DRM_ERROR("No stream state for CRTC%d\n", crtc->index);
- return -EINVAL;
- }
+ ret = drm_modeset_lock(&crtc->mutex, NULL);
+ if (ret)
+ return ret;
- enable = (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO);
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
- mutex_lock(&adev->dm.dc_lock);
- if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
- enable, enable)) {
- mutex_unlock(&adev->dm.dc_lock);
- return -EINVAL;
+ if (commit) {
+ /*
+ * Need to wait for all outstanding programming to complete
+ * in commit tail since it can modify CRC related fields and
+ * hardware state. Since we're holding the CRTC lock we're
+ * guaranteed that no other commit work can be queued off
+ * before we modify the state below.
+ */
+ ret = wait_for_completion_interruptible_timeout(
+ &commit->hw_done, 10 * HZ);
+ if (ret)
+ goto cleanup;
}
- /* When enabling CRC, we should also disable dithering. */
- dc_stream_set_dither_option(stream_state,
- enable ? DITHER_OPTION_TRUN8
- : DITHER_OPTION_DEFAULT);
+ enable = amdgpu_dm_is_valid_crc_source(source);
+ crtc_state = to_dm_crtc_state(crtc->state);
- mutex_unlock(&adev->dm.dc_lock);
+ /*
+ * USER REQ SRC | CURRENT SRC | BEHAVIOR
+ * -----------------------------
+ * None | None | Do nothing
+ * None | CRTC | Disable CRTC CRC, set default to dither
+ * None | DPRX | Disable DPRX CRC, need 'aux', set default to dither
+ * None | CRTC DITHER | Disable CRTC CRC
+ * None | DPRX DITHER | Disable DPRX CRC, need 'aux'
+ * CRTC | XXXX | Enable CRTC CRC, no dither
+ * DPRX | XXXX | Enable DPRX CRC, need 'aux', no dither
+ * CRTC DITHER | XXXX | Enable CRTC CRC, set dither
+ * DPRX DITHER | XXXX | Enable DPRX CRC, need 'aux', set dither
+ */
+ if (dm_is_crc_source_dprx(source) ||
+ (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE &&
+ dm_is_crc_source_dprx(crtc_state->crc_src))) {
+ struct amdgpu_dm_connector *aconn = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(crtc->dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (!connector->state || connector->state->crtc != crtc)
+ continue;
+
+ aconn = to_amdgpu_dm_connector(connector);
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (!aconn) {
+ DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ aux = &aconn->dm_dp_aux.aux;
+
+ if (!aux) {
+ DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
/*
* Reading the CRC requires the vblank interrupt handler to be
* enabled. Keep a reference until CRC capture stops.
*/
- if (!crtc_state->crc_enabled && enable)
- drm_crtc_vblank_get(crtc);
- else if (crtc_state->crc_enabled && !enable)
+ enabled = amdgpu_dm_is_valid_crc_source(crtc_state->crc_src);
+ if (!enabled && enable) {
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ goto cleanup;
+
+ if (dm_is_crc_source_dprx(source)) {
+ if (drm_dp_start_crc(aux, crtc)) {
+ DRM_DEBUG_DRIVER("dp start crc failed\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+ } else if (enabled && !enable) {
drm_crtc_vblank_put(crtc);
+ if (dm_is_crc_source_dprx(source)) {
+ if (drm_dp_stop_crc(aux)) {
+ DRM_DEBUG_DRIVER("dp stop crc failed\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+ }
- crtc_state->crc_enabled = enable;
+ crtc_state->crc_src = source;
/* Reset crc_skipped on dm state */
crtc_state->crc_skip_count = 0;
- return 0;
+
+cleanup:
+ if (commit)
+ drm_crtc_commit_put(commit);
+
+ drm_modeset_unlock(&crtc->mutex);
+
+ return ret;
}
/**
@@ -135,7 +290,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
stream_state = crtc_state->stream;
/* Early return if CRC capture is not enabled. */
- if (!crtc_state->crc_enabled)
+ if (!amdgpu_dm_is_valid_crc_source(crtc_state->crc_src))
return;
/*
@@ -149,10 +304,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
return;
}
- if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state,
- &crcs[0], &crcs[1], &crcs[2]))
- return;
+ if (dm_is_crc_source_crtc(crtc_state->crc_src)) {
+ if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state,
+ &crcs[0], &crcs[1], &crcs[2]))
+ return;
- drm_crtc_add_crc_entry(crtc, true,
- drm_crtc_accurate_vblank_count(crtc), crcs);
+ drm_crtc_add_crc_entry(crtc, true,
+ drm_crtc_accurate_vblank_count(crtc), crcs);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
new file mode 100644
index 000000000000..f7d731797d3f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_
+#define AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_
+
+struct drm_crtc;
+struct dm_crtc_state;
+
+enum amdgpu_dm_pipe_crc_source {
+ AMDGPU_DM_PIPE_CRC_SOURCE_NONE = 0,
+ AMDGPU_DM_PIPE_CRC_SOURCE_CRTC,
+ AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER,
+ AMDGPU_DM_PIPE_CRC_SOURCE_DPRX,
+ AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER,
+ AMDGPU_DM_PIPE_CRC_SOURCE_MAX,
+ AMDGPU_DM_PIPE_CRC_SOURCE_INVALID = -1,
+};
+
+static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source source)
+{
+ return (source > AMDGPU_DM_PIPE_CRC_SOURCE_NONE) &&
+ (source < AMDGPU_DM_PIPE_CRC_SOURCE_MAX);
+}
+
+/* amdgpu_dm_crc.c */
+#ifdef CONFIG_DEBUG_FS
+int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
+ struct dm_crtc_state *dm_crtc_state,
+ enum amdgpu_dm_pipe_crc_source source);
+int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name);
+int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *src_name,
+ size_t *values_cnt);
+const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count);
+void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
+#else
+#define amdgpu_dm_crtc_set_crc_source NULL
+#define amdgpu_dm_crtc_verify_crc_source NULL
+#define amdgpu_dm_crtc_get_crc_sources NULL
+#define amdgpu_dm_crtc_handle_crc_irq(x)
+#endif
+
+#endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 36a1d794b4af..f3dfb2887ae0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1053,9 +1053,33 @@ static int target_backlight_read(struct seq_file *m, void *data)
return 0;
}
+static int mst_topo(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct amdgpu_dm_connector *aconnector;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ seq_printf(m, "\nMST topology for connector %d\n", aconnector->connector_id);
+ drm_dp_mst_dump_topology(m, &aconnector->mst_mgr);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+
static const struct drm_info_list amdgpu_dm_debugfs_list[] = {
{"amdgpu_current_backlight_pwm", &current_backlight_read},
{"amdgpu_target_backlight_pwm", &target_backlight_read},
+ {"amdgpu_mst_topology", &mst_topo},
};
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index a0ed0154a9f0..ee1dc75f5ddc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -548,7 +548,9 @@ bool dm_helpers_dp_write_dsc_enable(
bool enable
)
{
- return false;
+ uint8_t enable_dsc = enable ? 1 : 0;
+
+ return dm_helpers_dp_write_dpcd(ctx, stream->sink->link, DP_DSC_ENABLE, &enable_dsc, 1);
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 592fa499c9f8..f4cfa0caeba8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -151,18 +151,31 @@ static void get_default_clock_levels(
static enum smu_clk_type dc_to_smu_clock_type(
enum dm_pp_clock_type dm_pp_clk_type)
{
-#define DCCLK_MAP_SMUCLK(dcclk, smuclk) \
- [dcclk] = smuclk
+ enum smu_clk_type smu_clk_type = SMU_CLK_COUNT;
- static int dc_clk_type_map[] = {
- DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_DISPLAY_CLK, SMU_DISPCLK),
- DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_ENGINE_CLK, SMU_GFXCLK),
- DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_MEMORY_CLK, SMU_MCLK),
- DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_DCEFCLK, SMU_DCEFCLK),
- DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_SOCCLK, SMU_SOCCLK),
- };
+ switch (dm_pp_clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ smu_clk_type = SMU_DISPCLK;
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ smu_clk_type = SMU_GFXCLK;
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ smu_clk_type = SMU_MCLK;
+ break;
+ case DM_PP_CLOCK_TYPE_DCEFCLK:
+ smu_clk_type = SMU_DCEFCLK;
+ break;
+ case DM_PP_CLOCK_TYPE_SOCCLK:
+ smu_clk_type = SMU_SOCCLK;
+ break;
+ default:
+ DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
+ dm_pp_clk_type);
+ break;
+ }
- return dc_clk_type_map[dm_pp_clk_type];
+ return smu_clk_type;
}
static enum amd_pp_clock_type dc_to_pp_clock_type(
@@ -334,7 +347,7 @@ bool dm_pp_get_clock_levels_by_type(
}
} else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
if (smu_get_clock_by_type(&adev->smu,
- dc_to_smu_clock_type(clk_type),
+ dc_to_pp_clock_type(clk_type),
&pp_clks)) {
get_default_clock_levels(clk_type, dc_clks);
return true;
@@ -419,7 +432,7 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
return false;
} else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
if (smu_get_clock_by_type_with_latency(&adev->smu,
- dc_to_pp_clock_type(clk_type),
+ dc_to_smu_clock_type(clk_type),
&pp_clks))
return false;
}
@@ -801,6 +814,19 @@ enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
return PP_SMU_RESULT_OK;
}
+enum pp_smu_status pp_nv_set_pstate_handshake_support(
+ struct pp_smu *pp, BOOLEAN pstate_handshake_supported)
+{
+ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct smu_context *smu = &adev->smu;
+
+ if (smu_display_disable_memory_clock_switch(smu, !pstate_handshake_supported))
+ return PP_SMU_RESULT_FAIL;
+
+ return PP_SMU_RESULT_OK;
+}
+
enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
enum pp_smu_nv_clock_id clock_id, int mhz)
{
@@ -916,6 +942,7 @@ void dm_pp_get_funcs(
funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks;
/*todo compare data with window driver */
funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
+ funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
break;
#endif
default: