aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_sysfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_sysfs.c')
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c49
1 files changed, 29 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index cef38fd320a7..33bcae314bf8 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -40,10 +40,13 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
struct drm_i915_private *dev_priv = dev->dev_private;
u64 raw_time; /* 32b value may overflow during fixed point math */
u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
+ u32 ret;
if (!intel_enable_rc6(dev))
return 0;
+ intel_runtime_pm_get(dev_priv);
+
/* On VLV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev)) {
u32 clkctl2;
@@ -52,7 +55,8 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
CLK_CTL2_CZCOUNT_30NS_SHIFT;
if (!clkctl2) {
WARN(!clkctl2, "bogus CZ count value");
- return 0;
+ ret = 0;
+ goto out;
}
units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
@@ -62,7 +66,11 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
}
raw_time = I915_READ(reg) * units;
- return DIV_ROUND_UP_ULL(raw_time, div);
+ ret = DIV_ROUND_UP_ULL(raw_time, div);
+
+out:
+ intel_runtime_pm_put(dev_priv);
+ return ret;
}
static ssize_t
@@ -183,13 +191,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
int slice = (int)(uintptr_t)attr->private;
int ret;
+ if (!HAS_HW_CONTEXTS(drm_dev))
+ return -ENXIO;
+
ret = l3_access_valid(drm_dev, offset);
if (ret)
return ret;
- if (dev_priv->hw_contexts_disabled)
- return -ENXIO;
-
ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
return ret;
@@ -259,7 +267,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq;
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
+ ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
}
@@ -276,8 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = dev->dev_private;
return snprintf(buf, PAGE_SIZE, "%d\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.rpe_delay));
+ vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -291,7 +298,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
- ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
+ ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
else
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -318,7 +325,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) {
- val = vlv_freq_opcode(dev_priv->mem_freq, val);
+ val = vlv_freq_opcode(dev_priv, val);
hw_max = valleyview_rps_max_freq(dev_priv);
hw_min = valleyview_rps_min_freq(dev_priv);
@@ -342,15 +349,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
DRM_DEBUG("User requested overclocking to %d\n",
val * GT_FREQUENCY_MULTIPLIER);
+ dev_priv->rps.max_delay = val;
+
if (dev_priv->rps.cur_delay > val) {
- if (IS_VALLEYVIEW(dev_priv->dev))
- valleyview_set_rps(dev_priv->dev, val);
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
else
- gen6_set_rps(dev_priv->dev, val);
+ gen6_set_rps(dev, val);
}
- dev_priv->rps.max_delay = val;
-
mutex_unlock(&dev_priv->rps.hw_lock);
return count;
@@ -367,7 +374,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
- ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
+ ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
else
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -394,7 +401,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev)) {
- val = vlv_freq_opcode(dev_priv->mem_freq, val);
+ val = vlv_freq_opcode(dev_priv, val);
hw_max = valleyview_rps_max_freq(dev_priv);
hw_min = valleyview_rps_min_freq(dev_priv);
@@ -411,15 +418,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
return -EINVAL;
}
+ dev_priv->rps.min_delay = val;
+
if (dev_priv->rps.cur_delay < val) {
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
- gen6_set_rps(dev_priv->dev, val);
+ gen6_set_rps(dev, val);
}
- dev_priv->rps.min_delay = val;
-
mutex_unlock(&dev_priv->rps.hw_lock);
return count;
@@ -449,7 +456,9 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
if (attr == &dev_attr_gt_RP0_freq_mhz) {