aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-03 11:44:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-03 11:44:24 -0700
commit2f34c1231bfc9f2550f934acb268ac7315fb3837 (patch)
treeff8114b3b4ec4723a11b041c6b74c389e9f0eeb9 /drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
parentMerge branch 'generic' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs (diff)
parentMerge branch 'linux-4.12' of git://github.com/skeggsb/linux into drm-next (diff)
downloadlinux-dev-2f34c1231bfc9f2550f934acb268ac7315fb3837.tar.xz
linux-dev-2f34c1231bfc9f2550f934acb268ac7315fb3837.zip
Merge tag 'drm-for-v4.12' of git://people.freedesktop.org/~airlied/linux
Pull drm u pdates from Dave Airlie: "This is the main drm pull request for v4.12. Apart from two fixes pulls, everything should have been in drm-next for at least 2 weeks. The biggest thing in here is AMD released the public headers for their upcoming VEGA GPUs. These as always are quite a sizeable chunk of header files. They've also added initial non-display support for those GPUs, though they aren't available in production yet. Otherwise it's pretty much normal. New bridge drivers: - megachips-stdpxxxx-ge-b850v3-fw LVDS->DP++ - generic LVDS bridge support. Core: - Displayport link train failure reporting to userspace - debugfs interface cleaned up - subsystem TODO in kerneldoc now - Extended fbdev support (flipping and vblank wait) - drm_platform removed - EDP CRC support in helper - HF-VSDB SCDC support in EDID parser - Lots of code cleanups and header extraction - Thunderbolt external GPU awareness - Atomic helper improvements - Documentation improvements panel: - Sitronix and Samsung new panel support amdgpu: - Preliminary vega10 support - Multi-level page table support - GPU sensor support for userspace - PRT support for sparse buffers - SR-IOV improvements - Non-contig VRAM CPU mapping i915: - Atomic modesetting enabled by default on Gen5+ - LSPCON improvements - Atomic state handling for cdclk - GPU reset improvements - In-kernel unit tests - Geminilake improvements and color manager support - Designware i2c fixes - vblank evasion improvements - Hotplug safe connector iterators - GVT scheduler QoS support - GVT Kabylake support nouveau: - Acceleration support for Pascal (GP10x). - Rearchitecture of code handling proprietary signed firmware - Fix GTX 970 with odd MMU configuration - GP10B support - GP107 acceleration support vmwgfx: - Atomic modesetting support for vmwgfx omapdrm: - Support for render nodes - Refactor omapdss code - Fix some probe ordering issues - Fix too dark RGB565 rendering sunxi: - prelim rework for multiple pipes. mali-dp: - Color management support - Plane scaling - Power management improvements imx-drm: - Prefetch Resolve Engine/Gasket on i.MX6QP - Deferred plane disabling - Separate alpha support mediatek: - Mediatek SoC MT2701 support rcar-du: - Gen3 HDMI support msm: - 4k support for newer chips - OPP bindings for gpu - prep work for per-process pagetables vc4: - HDMI audio support - fixes qxl: - minor fixes. dw-hdmi: - PHY improvements - CSC fixes - Amlogic GX SoC support" * tag 'drm-for-v4.12' of git://people.freedesktop.org/~airlied/linux: (1778 commits) drm/nouveau/fb/gf100-: Fix 32 bit wraparound in new ram detection drm/nouveau/secboot/gm20b: fix the error return code in gm20b_secboot_tegra_read_wpr() drm/nouveau/kms: Increase max retries in scanout position queries. drm/nouveau/bios/bitP: check that table is long enough for optional pointers drm/nouveau/fifo/nv40: no ctxsw for pre-nv44 mpeg engine drm: mali-dp: use div_u64 for expensive 64-bit divisions drm/i915: Confirm the request is still active before adding it to the await drm/i915: Avoid busy-spinning on VLV_GLTC_PW_STATUS mmio drm/i915/selftests: Allocate inode/file dynamically drm/i915: Fix system hang with EI UP masked on Haswell drm/i915: checking for NULL instead of IS_ERR() in mock selftests drm/i915: Perform link quality check unconditionally during long pulse drm/i915: Fix use after free in lpe_audio_platdev_destroy() drm/i915: Use the right mapping_gfp_mask for final shmem allocation drm/i915: Make legacy cursor updates more unsynced drm/i915: Apply a cond_resched() to the saturated signaler drm/i915: Park the signaler before sleeping drm: mali-dp: Check the mclk rate and allow up/down scaling drm: mali-dp: Enable image enhancement when scaling drm: mali-dp: Add plane upscaling support ...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c243
1 files changed, 228 insertions, 15 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 346e80a7119b..990fde2cf4fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -43,16 +43,22 @@ static const struct cg_flag_name clocks[] = {
{AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
{AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
{AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
+ {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
{AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
{AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
{AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
{AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
{AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
{AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
{0, NULL},
};
@@ -610,6 +616,174 @@ fail:
return count;
}
+static ssize_t amdgpu_get_pp_power_profile(struct device *dev,
+ char *buf, struct amd_pp_profile *query)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret = 0;
+
+ if (adev->pp_enabled)
+ ret = amdgpu_dpm_get_power_profile_state(
+ adev, query);
+ else if (adev->pm.funcs->get_power_profile_state)
+ ret = adev->pm.funcs->get_power_profile_state(
+ adev, query);
+
+ if (ret)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE,
+ "%d %d %d %d %d\n",
+ query->min_sclk / 100,
+ query->min_mclk / 100,
+ query->activity_threshold,
+ query->up_hyst,
+ query->down_hyst);
+}
+
+static ssize_t amdgpu_get_pp_gfx_power_profile(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_pp_profile query = {0};
+
+ query.type = AMD_PP_GFX_PROFILE;
+
+ return amdgpu_get_pp_power_profile(dev, buf, &query);
+}
+
+static ssize_t amdgpu_get_pp_compute_power_profile(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_pp_profile query = {0};
+
+ query.type = AMD_PP_COMPUTE_PROFILE;
+
+ return amdgpu_get_pp_power_profile(dev, buf, &query);
+}
+
+static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
+ const char *buf,
+ size_t count,
+ struct amd_pp_profile *request)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint32_t loop = 0;
+ char *sub_str, buf_cpy[128], *tmp_str;
+ const char delimiter[3] = {' ', '\n', '\0'};
+ long int value;
+ int ret = 0;
+
+ if (strncmp("reset", buf, strlen("reset")) == 0) {
+ if (adev->pp_enabled)
+ ret = amdgpu_dpm_reset_power_profile_state(
+ adev, request);
+ else if (adev->pm.funcs->reset_power_profile_state)
+ ret = adev->pm.funcs->reset_power_profile_state(
+ adev, request);
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+ return count;
+ }
+
+ if (strncmp("set", buf, strlen("set")) == 0) {
+ if (adev->pp_enabled)
+ ret = amdgpu_dpm_set_power_profile_state(
+ adev, request);
+ else if (adev->pm.funcs->set_power_profile_state)
+ ret = adev->pm.funcs->set_power_profile_state(
+ adev, request);
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+ return count;
+ }
+
+ if (count + 1 >= 128) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ memcpy(buf_cpy, buf, count + 1);
+ tmp_str = buf_cpy;
+
+ while (tmp_str[0]) {
+ sub_str = strsep(&tmp_str, delimiter);
+ ret = kstrtol(sub_str, 0, &value);
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ switch (loop) {
+ case 0:
+ /* input unit MHz convert to dpm table unit 10KHz*/
+ request->min_sclk = (uint32_t)value * 100;
+ break;
+ case 1:
+ /* input unit MHz convert to dpm table unit 10KHz*/
+ request->min_mclk = (uint32_t)value * 100;
+ break;
+ case 2:
+ request->activity_threshold = (uint16_t)value;
+ break;
+ case 3:
+ request->up_hyst = (uint8_t)value;
+ break;
+ case 4:
+ request->down_hyst = (uint8_t)value;
+ break;
+ default:
+ break;
+ }
+
+ loop++;
+ }
+
+ if (adev->pp_enabled)
+ ret = amdgpu_dpm_set_power_profile_state(
+ adev, request);
+ else if (adev->pm.funcs->set_power_profile_state)
+ ret = adev->pm.funcs->set_power_profile_state(
+ adev, request);
+
+ if (ret)
+ count = -EINVAL;
+
+fail:
+ return count;
+}
+
+static ssize_t amdgpu_set_pp_gfx_power_profile(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct amd_pp_profile request = {0};
+
+ request.type = AMD_PP_GFX_PROFILE;
+
+ return amdgpu_set_pp_power_profile(dev, buf, count, &request);
+}
+
+static ssize_t amdgpu_set_pp_compute_power_profile(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct amd_pp_profile request = {0};
+
+ request.type = AMD_PP_COMPUTE_PROFILE;
+
+ return amdgpu_set_pp_power_profile(dev, buf, count, &request);
+}
+
static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
amdgpu_get_dpm_forced_performance_level,
@@ -637,6 +811,12 @@ static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
amdgpu_get_pp_mclk_od,
amdgpu_set_pp_mclk_od);
+static DEVICE_ATTR(pp_gfx_power_profile, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_gfx_power_profile,
+ amdgpu_set_pp_gfx_power_profile);
+static DEVICE_ATTR(pp_compute_power_profile, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_compute_power_profile,
+ amdgpu_set_pp_compute_power_profile);
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
@@ -1142,11 +1322,11 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
/* XXX select vce level based on ring/task */
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
- amdgpu_pm_compute_clocks(adev);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
+ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+ amdgpu_pm_compute_clocks(adev);
} else {
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
@@ -1255,6 +1435,20 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file pp_mclk_od\n");
return ret;
}
+ ret = device_create_file(adev->dev,
+ &dev_attr_pp_gfx_power_profile);
+ if (ret) {
+ DRM_ERROR("failed to create device file "
+ "pp_gfx_power_profile\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev,
+ &dev_attr_pp_compute_power_profile);
+ if (ret) {
+ DRM_ERROR("failed to create device file "
+ "pp_compute_power_profile\n");
+ return ret;
+ }
ret = amdgpu_debugfs_pm_init(adev);
if (ret) {
@@ -1284,6 +1478,10 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
+ device_remove_file(adev->dev,
+ &dev_attr_pp_gfx_power_profile);
+ device_remove_file(adev->dev,
+ &dev_attr_pp_compute_power_profile);
}
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
@@ -1340,7 +1538,9 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
{
- int32_t value;
+ uint32_t value;
+ struct pp_gpu_power query = {0};
+ int size;
/* sanity check PP is enabled */
if (!(adev->powerplay.pp_funcs &&
@@ -1348,47 +1548,60 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
return -EINVAL;
/* GPU Clocks */
+ size = sizeof(value);
seq_printf(m, "GFX Clocks and Power:\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, &value))
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, &value))
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, &value))
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
seq_printf(m, "\t%u mV (VDDGFX)\n", value);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, &value))
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
seq_printf(m, "\t%u mV (VDDNB)\n", value);
+ size = sizeof(query);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) {
+ seq_printf(m, "\t%u.%u W (VDDC)\n", query.vddc_power >> 8,
+ query.vddc_power & 0xff);
+ seq_printf(m, "\t%u.%u W (VDDCI)\n", query.vddci_power >> 8,
+ query.vddci_power & 0xff);
+ seq_printf(m, "\t%u.%u W (max GPU)\n", query.max_gpu_power >> 8,
+ query.max_gpu_power & 0xff);
+ seq_printf(m, "\t%u.%u W (average GPU)\n", query.average_gpu_power >> 8,
+ query.average_gpu_power & 0xff);
+ }
+ size = sizeof(value);
seq_printf(m, "\n");
/* GPU Temp */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, &value))
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
seq_printf(m, "GPU Temperature: %u C\n", value/1000);
/* GPU Load */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value))
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
seq_printf(m, "GPU Load: %u %%\n", value);
seq_printf(m, "\n");
/* UVD clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, &value)) {
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
if (!value) {
seq_printf(m, "UVD: Disabled\n");
} else {
seq_printf(m, "UVD: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, &value))
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, &value))
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
}
}
seq_printf(m, "\n");
/* VCE clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, &value)) {
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
if (!value) {
seq_printf(m, "VCE: Disabled\n");
} else {
seq_printf(m, "VCE: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, &value))
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
}
}