aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1229
1 files changed, 567 insertions, 662 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a26b4eddda25..54307f1df6cf 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -26,13 +26,16 @@
*/
#include <linux/cpufreq.h>
+#include <linux/module.h>
#include <linux/pm_runtime.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+
#include "i915_drv.h"
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
-#include <linux/module.h>
-#include <drm/drm_atomic_helper.h>
/**
* DOC: RC6
@@ -480,7 +483,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
int sprite0_start, sprite1_start;
switch (pipe) {
- uint32_t dsparb, dsparb2, dsparb3;
+ u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ(DSPARB);
dsparb2 = I915_READ(DSPARB2);
@@ -513,7 +516,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- uint32_t dsparb = I915_READ(DSPARB);
+ u32 dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
@@ -529,7 +532,7 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- uint32_t dsparb = I915_READ(DSPARB);
+ u32 dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x1ff;
@@ -546,7 +549,7 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- uint32_t dsparb = I915_READ(DSPARB);
+ u32 dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
@@ -667,9 +670,9 @@ static unsigned int intel_wm_method1(unsigned int pixel_rate,
unsigned int cpp,
unsigned int latency)
{
- uint64_t ret;
+ u64 ret;
- ret = (uint64_t) pixel_rate * cpp * latency;
+ ret = (u64)pixel_rate * cpp * latency;
ret = DIV_ROUND_UP_ULL(ret, 10000);
return ret;
@@ -1089,9 +1092,9 @@ static int g4x_fbc_fifo_size(int level)
}
}
-static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int level)
+static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int level)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -1188,9 +1191,9 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
return dirty;
}
-static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- uint32_t pri_val);
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ u32 pri_val);
static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
@@ -1399,10 +1402,9 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
return 0;
}
-static int g4x_compute_intermediate_wm(struct drm_device *dev,
- struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state)
+static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
struct intel_atomic_state *intel_state =
@@ -1599,9 +1601,9 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
}
}
-static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int level)
+static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int level)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -1969,7 +1971,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
spin_lock(&dev_priv->uncore.lock);
switch (crtc->pipe) {
- uint32_t dsparb, dsparb2, dsparb3;
+ u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ_FW(DSPARB);
dsparb2 = I915_READ_FW(DSPARB2);
@@ -2032,10 +2034,9 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
#undef VLV_FIFO
-static int vlv_compute_intermediate_wm(struct drm_device *dev,
- struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state)
+static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
struct intel_atomic_state *intel_state =
@@ -2264,8 +2265,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
{
struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
const struct intel_watermark_params *wm_info;
- uint32_t fwater_lo;
- uint32_t fwater_hi;
+ u32 fwater_lo;
+ u32 fwater_hi;
int cwm, srwm = 1;
int fifo_size;
int planea_wm, planeb_wm;
@@ -2273,7 +2274,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
if (IS_I945GM(dev_priv))
wm_info = &i945_wm_info;
- else if (!IS_GEN2(dev_priv))
+ else if (!IS_GEN(dev_priv, 2))
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
@@ -2287,7 +2288,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc->base.primary->state->fb;
int cpp;
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2302,7 +2303,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
planea_wm = wm_info->max_wm;
}
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
wm_info = &i830_bc_wm_info;
fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
@@ -2314,7 +2315,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc->base.primary->state->fb;
int cpp;
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2408,7 +2409,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
- uint32_t fwater_lo;
+ u32 fwater_lo;
int planea_wm;
crtc = single_enabled_crtc(dev_priv);
@@ -2457,8 +2458,7 @@ static unsigned int ilk_wm_method2(unsigned int pixel_rate,
return ret;
}
-static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
- uint8_t cpp)
+static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
{
/*
* Neither of these should be possible since this function shouldn't be
@@ -2475,22 +2475,21 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
}
struct ilk_wm_maximums {
- uint16_t pri;
- uint16_t spr;
- uint16_t cur;
- uint16_t fbc;
+ u16 pri;
+ u16 spr;
+ u16 cur;
+ u16 fbc;
};
/*
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- uint32_t mem_value,
- bool is_lp)
+static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ u32 mem_value, bool is_lp)
{
- uint32_t method1, method2;
+ u32 method1, method2;
int cpp;
if (mem_value == 0)
@@ -2518,11 +2517,11 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- uint32_t mem_value)
+static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ u32 mem_value)
{
- uint32_t method1, method2;
+ u32 method1, method2;
int cpp;
if (mem_value == 0)
@@ -2545,9 +2544,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- uint32_t mem_value)
+static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ u32 mem_value)
{
int cpp;
@@ -2565,9 +2564,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
}
/* Only for WM_LP. */
-static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- uint32_t pri_val)
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ u32 pri_val)
{
int cpp;
@@ -2626,13 +2625,12 @@ static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
}
/* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
+static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
bool is_sprite)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
/* if sprites aren't enabled, sprites get nothing */
@@ -2668,7 +2666,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
}
/* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
+static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
int level,
const struct intel_wm_config *config)
{
@@ -2677,19 +2675,19 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
return 64;
/* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(to_i915(dev), level);
+ return ilk_cursor_wm_reg_max(dev_priv, level);
}
-static void ilk_compute_wm_maximums(const struct drm_device *dev,
+static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
- max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
- max->cur = ilk_cursor_wm_max(dev, level, config);
- max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
+ max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
+ max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
+ max->cur = ilk_cursor_wm_max(dev_priv, level, config);
+ max->fbc = ilk_fbc_wm_reg_max(dev_priv);
}
static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
@@ -2734,9 +2732,9 @@ static bool ilk_validate_wm_level(int level,
DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
level, result->cur_val, max->cur);
- result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
- result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
- result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
+ result->pri_val = min_t(u32, result->pri_val, max->pri);
+ result->spr_val = min_t(u32, result->spr_val, max->spr);
+ result->cur_val = min_t(u32, result->cur_val, max->cur);
result->enable = true;
}
@@ -2752,9 +2750,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
- uint16_t pri_latency = dev_priv->wm.pri_latency[level];
- uint16_t spr_latency = dev_priv->wm.spr_latency[level];
- uint16_t cur_latency = dev_priv->wm.cur_latency[level];
+ u16 pri_latency = dev_priv->wm.pri_latency[level];
+ u16 spr_latency = dev_priv->wm.spr_latency[level];
+ u16 cur_latency = dev_priv->wm.cur_latency[level];
/* WM1+ latency values stored in 0.5us units */
if (level > 0) {
@@ -2778,7 +2776,7 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
-static uint32_t
+static u32
hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
{
const struct intel_atomic_state *intel_state =
@@ -2807,10 +2805,10 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
}
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
- uint16_t wm[8])
+ u16 wm[8])
{
if (INTEL_GEN(dev_priv) >= 9) {
- uint32_t val;
+ u32 val;
int ret, i;
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -2894,7 +2892,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
wm[0] += 1;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- uint64_t sskpd = I915_READ64(MCH_SSKPD);
+ u64 sskpd = I915_READ64(MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
if (wm[0] == 0)
@@ -2904,14 +2902,14 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
wm[3] = (sskpd >> 20) & 0x1FF;
wm[4] = (sskpd >> 32) & 0x1FF;
} else if (INTEL_GEN(dev_priv) >= 6) {
- uint32_t sskpd = I915_READ(MCH_SSKPD);
+ u32 sskpd = I915_READ(MCH_SSKPD);
wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
} else if (INTEL_GEN(dev_priv) >= 5) {
- uint32_t mltr = I915_READ(MLTR_ILK);
+ u32 mltr = I915_READ(MLTR_ILK);
/* ILK primary LP0 latency is 700 ns */
wm[0] = 7;
@@ -2923,18 +2921,18 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
}
static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
- uint16_t wm[5])
+ u16 wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
wm[0] = 13;
}
static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
- uint16_t wm[5])
+ u16 wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
wm[0] = 13;
}
@@ -2953,7 +2951,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
const char *name,
- const uint16_t wm[8])
+ const u16 wm[8])
{
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -2982,7 +2980,7 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
}
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
- uint16_t wm[5], uint16_t min)
+ u16 wm[5], u16 min)
{
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -2991,7 +2989,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
wm[0] = max(wm[0], min);
for (level = 1; level <= max_level; level++)
- wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
+ wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
return true;
}
@@ -3061,7 +3059,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
snb_wm_latency_quirk(dev_priv);
snb_wm_lp3_irq_quirk(dev_priv);
}
@@ -3073,7 +3071,7 @@ static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
}
-static bool ilk_validate_pipe_wm(struct drm_device *dev,
+static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
struct intel_pipe_wm *pipe_wm)
{
/* LP0 watermark maximums depend on this pipe alone */
@@ -3085,7 +3083,7 @@ static bool ilk_validate_pipe_wm(struct drm_device *dev,
struct ilk_wm_maximums max;
/* LP0 watermarks always use 1/2 DDB partitioning */
- ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
/* At least LP0 must be valid */
if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
@@ -3150,7 +3148,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
- if (!ilk_validate_pipe_wm(dev, pipe_wm))
+ if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
return -EINVAL;
ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
@@ -3180,17 +3178,17 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
* state and the new state. These can be programmed to the hardware
* immediately.
*/
-static int ilk_compute_intermediate_wm(struct drm_device *dev,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *newstate)
+static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
{
+ struct intel_crtc *intel_crtc = to_intel_crtc(newstate->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
struct intel_atomic_state *intel_state =
to_intel_atomic_state(newstate->base.state);
const struct intel_crtc_state *oldstate =
intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
- int level, max_level = ilk_wm_max_level(to_i915(dev));
+ int level, max_level = ilk_wm_max_level(dev_priv);
/*
* Start with the final, target watermarks, then combine with the
@@ -3223,7 +3221,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
* there's no safe way to transition from the old state to
* the new state, so we need to fail the atomic transaction.
*/
- if (!ilk_validate_pipe_wm(dev, a))
+ if (!ilk_validate_pipe_wm(dev_priv, a))
return -EINVAL;
/*
@@ -3239,7 +3237,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
/*
* Merge the watermarks from all active pipes for a specific level.
*/
-static void ilk_merge_wm_level(struct drm_device *dev,
+static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
int level,
struct intel_wm_level *ret_wm)
{
@@ -3247,7 +3245,7 @@ static void ilk_merge_wm_level(struct drm_device *dev,
ret_wm->enable = true;
- for_each_intel_crtc(dev, intel_crtc) {
+ for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
const struct intel_wm_level *wm = &active->wm[level];
@@ -3272,12 +3270,11 @@ static void ilk_merge_wm_level(struct drm_device *dev,
/*
* Merge all low power watermarks for all active pipes.
*/
-static void ilk_wm_merge(struct drm_device *dev,
+static void ilk_wm_merge(struct drm_i915_private *dev_priv,
const struct intel_wm_config *config,
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int level, max_level = ilk_wm_max_level(dev_priv);
int last_enabled_level = max_level;
@@ -3293,7 +3290,7 @@ static void ilk_wm_merge(struct drm_device *dev,
for (level = 1; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
- ilk_merge_wm_level(dev, level, wm);
+ ilk_merge_wm_level(dev_priv, level, wm);
if (level > last_enabled_level)
wm->enable = false;
@@ -3318,7 +3315,7 @@ static void ilk_wm_merge(struct drm_device *dev,
* What we should check here is whether FBC can be
* enabled sometime later.
*/
- if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
+ if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled &&
intel_fbc_is_active(dev_priv)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -3335,22 +3332,20 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
}
/* The value we need to program into the WM_LPx latency field */
-static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
+static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
+ int level)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 2 * level;
else
return dev_priv->wm.pri_latency[level];
}
-static void ilk_compute_wm_results(struct drm_device *dev,
+static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
const struct intel_pipe_wm *merged,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc;
int level, wm_lp;
@@ -3370,7 +3365,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
* disabled. Doing otherwise could cause underruns.
*/
results->wm_lp[wm_lp - 1] =
- (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
+ (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
(r->pri_val << WM1_LP_SR_SHIFT) |
r->cur_val;
@@ -3396,7 +3391,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
}
/* LP0 register values */
- for_each_intel_crtc(dev, intel_crtc) {
+ for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
enum pipe pipe = intel_crtc->pipe;
const struct intel_wm_level *r =
&intel_crtc->wm.active.ilk.wm[0];
@@ -3415,11 +3410,12 @@ static void ilk_compute_wm_results(struct drm_device *dev,
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
* case both are at the same level. Prefer r1 in case they're the same. */
-static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
- struct intel_pipe_wm *r1,
- struct intel_pipe_wm *r2)
+static struct intel_pipe_wm *
+ilk_find_best_result(struct drm_i915_private *dev_priv,
+ struct intel_pipe_wm *r1,
+ struct intel_pipe_wm *r2)
{
- int level, max_level = ilk_wm_max_level(to_i915(dev));
+ int level, max_level = ilk_wm_max_level(dev_priv);
int level1 = 0, level2 = 0;
for (level = 1; level <= max_level; level++) {
@@ -3540,7 +3536,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
{
struct ilk_wm_values *previous = &dev_priv->wm.hw;
unsigned int dirty;
- uint32_t val;
+ u32 val;
dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
if (!dirty)
@@ -3638,14 +3634,9 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
* FIXME: We still don't have the proper code detect if we need to apply the WA,
* so assume we'll always need it in order to avoid underruns.
*/
-static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
+static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-
- if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
- return true;
-
- return false;
+ return IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv);
}
static bool
@@ -3677,25 +3668,25 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
if (dev_priv->sagv_status == I915_SAGV_ENABLED)
return 0;
- DRM_DEBUG_KMS("Enabling the SAGV\n");
+ DRM_DEBUG_KMS("Enabling SAGV\n");
mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
- /* We don't need to wait for the SAGV when enabling */
+ /* We don't need to wait for SAGV when enabling */
mutex_unlock(&dev_priv->pcu_lock);
/*
* Some skl systems, pre-release machines in particular,
- * don't actually have an SAGV.
+ * don't actually have SAGV.
*/
if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
} else if (ret < 0) {
- DRM_ERROR("Failed to enable the SAGV\n");
+ DRM_ERROR("Failed to enable SAGV\n");
return ret;
}
@@ -3714,7 +3705,7 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
if (dev_priv->sagv_status == I915_SAGV_DISABLED)
return 0;
- DRM_DEBUG_KMS("Disabling the SAGV\n");
+ DRM_DEBUG_KMS("Disabling SAGV\n");
mutex_lock(&dev_priv->pcu_lock);
/* bspec says to keep retrying for at least 1 ms */
@@ -3726,14 +3717,14 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
/*
* Some skl systems, pre-release machines in particular,
- * don't actually have an SAGV.
+ * don't actually have SAGV.
*/
if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
} else if (ret < 0) {
- DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
+ DRM_ERROR("Failed to disable SAGV (%d)\n", ret);
return ret;
}
@@ -3756,15 +3747,15 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
if (!intel_has_sagv(dev_priv))
return false;
- if (IS_GEN9(dev_priv))
+ if (IS_GEN(dev_priv, 9))
sagv_block_time_us = 30;
- else if (IS_GEN10(dev_priv))
+ else if (IS_GEN(dev_priv, 10))
sagv_block_time_us = 20;
else
sagv_block_time_us = 10;
/*
- * SKL+ workaround: bspec recommends we disable the SAGV when we have
+ * SKL+ workaround: bspec recommends we disable SAGV when we have
* more then one pipe enabled
*
* If there are no active CRTCs, no additional checks need be performed
@@ -3797,7 +3788,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
latency = dev_priv->wm.skl_latency[level];
- if (skl_needs_memory_bw_wa(intel_state) &&
+ if (skl_needs_memory_bw_wa(dev_priv) &&
plane->base.state->fb->modifier ==
I915_FORMAT_MOD_X_TILED)
latency += 15;
@@ -3805,7 +3796,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
/*
* If any of the planes on this pipe don't enable wm levels that
* incur memory latencies higher than sagv_block_time_us we
- * can't enable the SAGV.
+ * can't enable SAGV.
*/
if (latency < sagv_block_time_us)
return false;
@@ -3834,8 +3825,13 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
/*
* 12GB/s is maximum BW supported by single DBuf slice.
+ *
+ * FIXME dbuf slice code is broken:
+ * - must wait for planes to stop using the slice before powering it off
+ * - plane straddling both slices is illegal in multi-pipe scenarios
+ * - should validate we stay within the hw bandwidth limits
*/
- if (num_active > 1 || total_data_bw >= GBps(12)) {
+ if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
ddb->enabled_slices = 2;
} else {
ddb->enabled_slices = 1;
@@ -3934,14 +3930,9 @@ static unsigned int skl_cursor_allocation(int num_active)
static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
struct skl_ddb_entry *entry, u32 reg)
{
- u16 mask;
- if (INTEL_GEN(dev_priv) >= 11)
- mask = ICL_DDB_ENTRY_MASK;
- else
- mask = SKL_DDB_ENTRY_MASK;
- entry->start = reg & mask;
- entry->end = (reg >> DDB_ENTRY_END_SHIFT) & mask;
+ entry->start = reg & DDB_ENTRY_MASK;
+ entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK;
if (entry->end)
entry->end += 1;
@@ -3994,10 +3985,12 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
enum plane_id plane_id;
power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return;
for_each_plane_id_on_crtc(crtc, plane_id)
@@ -4006,7 +3999,7 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
&ddb_y[plane_id],
&ddb_uv[plane_id]);
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
}
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
@@ -4036,7 +4029,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate)
{
struct intel_plane *plane = to_intel_plane(pstate->base.plane);
- uint32_t src_w, src_h, dst_w, dst_h;
+ u32 src_w, src_h, dst_w, dst_h;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
@@ -4082,8 +4075,8 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
return pipe_downscale;
if (crtc_state->pch_pfit.enabled) {
- uint32_t src_w, src_h, dst_w, dst_h;
- uint32_t pfit_size = crtc_state->pch_pfit.size;
+ u32 src_w, src_h, dst_w, dst_h;
+ u32 pfit_size = crtc_state->pch_pfit.size;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
@@ -4116,7 +4109,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
const struct drm_plane_state *pstate;
struct intel_plane_state *intel_pstate;
int crtc_clock, dotclk;
- uint32_t pipe_max_pixel_rate;
+ u32 pipe_max_pixel_rate;
uint_fixed_16_16_t pipe_downscale;
uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
@@ -4172,8 +4165,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
{
struct intel_plane *intel_plane =
to_intel_plane(intel_pstate->base.plane);
- uint32_t data_rate;
- uint32_t width = 0, height = 0;
+ u32 data_rate;
+ u32 width = 0, height = 0;
struct drm_framebuffer *fb;
u32 format;
uint_fixed_16_16_t down_scale_amount;
@@ -4306,102 +4299,6 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
return total_data_rate;
}
-static uint16_t
-skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
-{
- struct drm_framebuffer *fb = pstate->fb;
- struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
- uint32_t src_w, src_h;
- uint32_t min_scanlines = 8;
- uint8_t plane_bpp;
-
- if (WARN_ON(!fb))
- return 0;
-
- /* For packed formats, and uv-plane, return 0 */
- if (plane == 1 && fb->format->format != DRM_FORMAT_NV12)
- return 0;
-
- /* For Non Y-tile return 8-blocks */
- if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
- fb->modifier != I915_FORMAT_MOD_Yf_TILED &&
- fb->modifier != I915_FORMAT_MOD_Y_TILED_CCS &&
- fb->modifier != I915_FORMAT_MOD_Yf_TILED_CCS)
- return 8;
-
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
- src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
-
- /* Halve UV plane width and height for NV12 */
- if (plane == 1) {
- src_w /= 2;
- src_h /= 2;
- }
-
- plane_bpp = fb->format->cpp[plane];
-
- if (drm_rotation_90_or_270(pstate->rotation)) {
- switch (plane_bpp) {
- case 1:
- min_scanlines = 32;
- break;
- case 2:
- min_scanlines = 16;
- break;
- case 4:
- min_scanlines = 8;
- break;
- case 8:
- min_scanlines = 4;
- break;
- default:
- WARN(1, "Unsupported pixel depth %u for rotation",
- plane_bpp);
- min_scanlines = 32;
- }
- }
-
- return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
-}
-
-static void
-skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
- uint16_t *minimum, uint16_t *uv_minimum)
-{
- const struct drm_plane_state *pstate;
- struct drm_plane *plane;
-
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
- enum plane_id plane_id = to_intel_plane(plane)->id;
- struct intel_plane_state *plane_state = to_intel_plane_state(pstate);
-
- if (plane_id == PLANE_CURSOR)
- continue;
-
- /* slave plane must be invisible and calculated from master */
- if (!pstate->visible || WARN_ON(plane_state->slave))
- continue;
-
- if (!plane_state->linked_plane) {
- minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
- uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
- } else {
- enum plane_id y_plane_id =
- plane_state->linked_plane->id;
-
- minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0);
- minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
- }
- }
-
- minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
-}
-
static int
skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct skl_ddb_allocation *ddb /* out */)
@@ -4411,15 +4308,17 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
- uint16_t alloc_size, start;
- uint16_t minimum[I915_MAX_PLANES] = {};
- uint16_t uv_minimum[I915_MAX_PLANES] = {};
+ struct skl_plane_wm *wm;
+ u16 alloc_size, start = 0;
+ u16 total[I915_MAX_PLANES] = {};
+ u16 uv_total[I915_MAX_PLANES] = {};
u64 total_data_rate;
enum plane_id plane_id;
int num_active;
u64 plane_data_rate[I915_MAX_PLANES] = {};
u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
- uint16_t total_min_blocks = 0;
+ u32 blocks;
+ int level;
/* Clear the partitioning for disabled planes. */
memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
@@ -4449,81 +4348,135 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
if (alloc_size == 0)
return 0;
- skl_ddb_calc_min(cstate, num_active, minimum, uv_minimum);
+ /* Allocate fixed number of blocks for cursor. */
+ total[PLANE_CURSOR] = skl_cursor_allocation(num_active);
+ alloc_size -= total[PLANE_CURSOR];
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
+ alloc->end - total[PLANE_CURSOR];
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
+
+ if (total_data_rate == 0)
+ return 0;
/*
- * 1. Allocate the mininum required blocks for each active plane
- * and allocate the cursor, it doesn't require extra allocation
- * proportional to the data rate.
+ * Find the highest watermark level for which we can satisfy the block
+ * requirement of active planes.
*/
+ for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
+ blocks = 0;
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ if (plane_id == PLANE_CURSOR)
+ continue;
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- total_min_blocks += minimum[plane_id];
- total_min_blocks += uv_minimum[plane_id];
+ wm = &cstate->wm.skl.optimal.planes[plane_id];
+ blocks += wm->wm[level].min_ddb_alloc;
+ blocks += wm->uv_wm[level].min_ddb_alloc;
+ }
+
+ if (blocks < alloc_size) {
+ alloc_size -= blocks;
+ break;
+ }
}
- if (total_min_blocks > alloc_size) {
+ if (level < 0) {
DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
- DRM_DEBUG_KMS("minimum required %d/%d\n", total_min_blocks,
- alloc_size);
+ DRM_DEBUG_KMS("minimum required %d/%d\n", blocks,
+ alloc_size);
return -EINVAL;
}
- alloc_size -= total_min_blocks;
- cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
- cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
-
/*
- * 2. Distribute the remaining space in proportion to the amount of
- * data each plane needs to fetch from memory.
- *
- * FIXME: we may not allocate every single block here.
+ * Grant each plane the blocks it requires at the highest achievable
+ * watermark level, plus an extra share of the leftover blocks
+ * proportional to its relative data rate.
*/
- if (total_data_rate == 0)
- return 0;
-
- start = alloc->start;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- u64 data_rate, uv_data_rate;
- uint16_t plane_blocks, uv_plane_blocks;
+ u64 rate;
+ u16 extra;
if (plane_id == PLANE_CURSOR)
continue;
- data_rate = plane_data_rate[plane_id];
-
/*
- * allocation for (packed formats) or (uv-plane part of planar format):
- * promote the expression to 64 bits to avoid overflowing, the
- * result is < available as data_rate / total_data_rate < 1
+ * We've accounted for all active planes; remaining planes are
+ * all disabled.
*/
- plane_blocks = minimum[plane_id];
- plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate);
+ if (total_data_rate == 0)
+ break;
- /* Leave disabled planes at (0,0) */
- if (data_rate) {
- cstate->wm.skl.plane_ddb_y[plane_id].start = start;
- cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks;
- }
+ wm = &cstate->wm.skl.optimal.planes[plane_id];
+
+ rate = plane_data_rate[plane_id];
+ extra = min_t(u16, alloc_size,
+ DIV64_U64_ROUND_UP(alloc_size * rate,
+ total_data_rate));
+ total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
+ alloc_size -= extra;
+ total_data_rate -= rate;
- start += plane_blocks;
+ if (total_data_rate == 0)
+ break;
- /* Allocate DDB for UV plane for planar format/NV12 */
- uv_data_rate = uv_plane_data_rate[plane_id];
+ rate = uv_plane_data_rate[plane_id];
+ extra = min_t(u16, alloc_size,
+ DIV64_U64_ROUND_UP(alloc_size * rate,
+ total_data_rate));
+ uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
+ alloc_size -= extra;
+ total_data_rate -= rate;
+ }
+ WARN_ON(alloc_size != 0 || total_data_rate != 0);
- uv_plane_blocks = uv_minimum[plane_id];
- uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate);
+ /* Set the actual DDB start/end points for each plane */
+ start = alloc->start;
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ struct skl_ddb_entry *plane_alloc, *uv_plane_alloc;
+
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ plane_alloc = &cstate->wm.skl.plane_ddb_y[plane_id];
+ uv_plane_alloc = &cstate->wm.skl.plane_ddb_uv[plane_id];
/* Gen11+ uses a separate plane for UV watermarks */
- WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
+ WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
+
+ /* Leave disabled planes at (0,0) */
+ if (total[plane_id]) {
+ plane_alloc->start = start;
+ start += total[plane_id];
+ plane_alloc->end = start;
+ }
+
+ if (uv_total[plane_id]) {
+ uv_plane_alloc->start = start;
+ start += uv_total[plane_id];
+ uv_plane_alloc->end = start;
+ }
+ }
- if (uv_data_rate) {
- cstate->wm.skl.plane_ddb_uv[plane_id].start = start;
- cstate->wm.skl.plane_ddb_uv[plane_id].end =
- start + uv_plane_blocks;
+ /*
+ * When we calculated watermark values we didn't know how high
+ * of a level we'd actually be able to hit, so we just marked
+ * all levels as "enabled." Go back now and disable the ones
+ * that aren't actually possible.
+ */
+ for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ wm = &cstate->wm.skl.optimal.planes[plane_id];
+ memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
}
+ }
- start += uv_plane_blocks;
+ /*
+ * Go back and disable the transition watermark if it turns out we
+ * don't have enough DDB blocks for it.
+ */
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ wm = &cstate->wm.skl.optimal.planes[plane_id];
+ if (wm->trans_wm.plane_res_b >= total[plane_id])
+ memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
}
return 0;
@@ -4536,10 +4489,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
static uint_fixed_16_16_t
-skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
- uint8_t cpp, uint32_t latency, uint32_t dbuf_block_size)
+skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
+ u8 cpp, u32 latency, u32 dbuf_block_size)
{
- uint32_t wm_intermediate_val;
+ u32 wm_intermediate_val;
uint_fixed_16_16_t ret;
if (latency == 0)
@@ -4554,12 +4507,11 @@ skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
return ret;
}
-static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
- uint32_t pipe_htotal,
- uint32_t latency,
- uint_fixed_16_16_t plane_blocks_per_line)
+static uint_fixed_16_16_t
+skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
+ uint_fixed_16_16_t plane_blocks_per_line)
{
- uint32_t wm_intermediate_val;
+ u32 wm_intermediate_val;
uint_fixed_16_16_t ret;
if (latency == 0)
@@ -4575,8 +4527,8 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
static uint_fixed_16_16_t
intel_get_linetime_us(const struct intel_crtc_state *cstate)
{
- uint32_t pixel_rate;
- uint32_t crtc_htotal;
+ u32 pixel_rate;
+ u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
if (!cstate->base.active)
@@ -4593,11 +4545,11 @@ intel_get_linetime_us(const struct intel_crtc_state *cstate)
return linetime_us;
}
-static uint32_t
+static u32
skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate)
{
- uint64_t adjusted_pixel_rate;
+ u64 adjusted_pixel_rate;
uint_fixed_16_16_t downscale_amount;
/* Shouldn't reach here on disabled planes... */
@@ -4624,10 +4576,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_plane_state *pstate = &intel_pstate->base;
const struct drm_framebuffer *fb = pstate->fb;
- uint32_t interm_pbpl;
- struct intel_atomic_state *state =
- to_intel_atomic_state(cstate->base.state);
- bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
+ u32 interm_pbpl;
/* only NV12 format has two planes */
if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) {
@@ -4663,7 +4612,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
intel_pstate);
if (INTEL_GEN(dev_priv) >= 11 &&
- fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 8)
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
wp->dbuf_block_size = 256;
else
wp->dbuf_block_size = 512;
@@ -4688,7 +4637,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
wp->y_min_scanlines = 4;
}
- if (apply_memory_bw_wa)
+ if (skl_needs_memory_bw_wa(dev_priv))
wp->y_min_scanlines *= 2;
wp->plane_bytes_per_line = wp->width * wp->cpp;
@@ -4702,7 +4651,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
wp->y_min_scanlines);
- } else if (wp->x_tiled && IS_GEN9(dev_priv)) {
+ } else if (wp->x_tiled && IS_GEN(dev_priv, 9)) {
interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
wp->dbuf_block_size);
wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
@@ -4720,28 +4669,34 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
return 0;
}
-static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
- uint16_t ddb_allocation,
- int level,
- const struct skl_wm_params *wp,
- const struct skl_wm_level *result_prev,
- struct skl_wm_level *result /* out */)
+static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
+{
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ return true;
+
+ /* The number of lines are ignored for the level 0 watermark. */
+ return level > 0;
+}
+
+static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *intel_pstate,
+ int level,
+ const struct skl_wm_params *wp,
+ const struct skl_wm_level *result_prev,
+ struct skl_wm_level *result /* out */)
{
struct drm_i915_private *dev_priv =
to_i915(intel_pstate->base.plane->dev);
- const struct drm_plane_state *pstate = &intel_pstate->base;
- uint32_t latency = dev_priv->wm.skl_latency[level];
+ u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
- uint32_t res_blocks, res_lines;
- struct intel_atomic_state *state =
- to_intel_atomic_state(cstate->base.state);
- bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
- uint32_t min_disp_buf_needed;
+ u32 res_blocks, res_lines, min_ddb_alloc = 0;
- if (latency == 0)
- return level == 0 ? -EINVAL : 0;
+ if (latency == 0) {
+ /* reject it */
+ result->min_ddb_alloc = U16_MAX;
+ return;
+ }
/* Display WA #1141: kbl,cfl */
if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
@@ -4749,7 +4704,7 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
dev_priv->ipc_enabled)
latency += 4;
- if (apply_memory_bw_wa && wp->x_tiled)
+ if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
latency += 15;
method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
@@ -4766,15 +4721,8 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
wp->dbuf_block_size < 1) &&
(wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
- } else if (ddb_allocation >=
- fixed16_to_u32_round_up(wp->plane_blocks_per_line)) {
- if (IS_GEN9(dev_priv) &&
- !IS_GEMINILAKE(dev_priv))
- selected_result = min_fixed16(method1, method2);
- else
- selected_result = method2;
} else if (latency >= wp->linetime_us) {
- if (IS_GEN9(dev_priv) &&
+ if (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv))
selected_result = min_fixed16(method1, method2);
else
@@ -4788,85 +4736,76 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
res_lines = div_round_up_fixed16(selected_result,
wp->plane_blocks_per_line);
- /* Display WA #1125: skl,bxt,kbl,glk */
- if (level == 0 && wp->rc_surface)
- res_blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
+ if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
+ /* Display WA #1125: skl,bxt,kbl */
+ if (level == 0 && wp->rc_surface)
+ res_blocks +=
+ fixed16_to_u32_round_up(wp->y_tile_minimum);
+
+ /* Display WA #1126: skl,bxt,kbl */
+ if (level >= 1 && level <= 7) {
+ if (wp->y_tiled) {
+ res_blocks +=
+ fixed16_to_u32_round_up(wp->y_tile_minimum);
+ res_lines += wp->y_min_scanlines;
+ } else {
+ res_blocks++;
+ }
- /* Display WA #1126: skl,bxt,kbl,glk */
- if (level >= 1 && level <= 7) {
- if (wp->y_tiled) {
- res_blocks += fixed16_to_u32_round_up(
- wp->y_tile_minimum);
- res_lines += wp->y_min_scanlines;
- } else {
- res_blocks++;
+ /*
+ * Make sure result blocks for higher latency levels are
+ * atleast as high as level below the current level.
+ * Assumption in DDB algorithm optimization for special
+ * cases. Also covers Display WA #1125 for RC.
+ */
+ if (result_prev->plane_res_b > res_blocks)
+ res_blocks = result_prev->plane_res_b;
}
-
- /*
- * Make sure result blocks for higher latency levels are atleast
- * as high as level below the current level.
- * Assumption in DDB algorithm optimization for special cases.
- * Also covers Display WA #1125 for RC.
- */
- if (result_prev->plane_res_b > res_blocks)
- res_blocks = result_prev->plane_res_b;
}
if (INTEL_GEN(dev_priv) >= 11) {
if (wp->y_tiled) {
- uint32_t extra_lines;
- uint_fixed_16_16_t fp_min_disp_buf_needed;
+ int extra_lines;
if (res_lines % wp->y_min_scanlines == 0)
extra_lines = wp->y_min_scanlines;
else
extra_lines = wp->y_min_scanlines * 2 -
- res_lines % wp->y_min_scanlines;
+ res_lines % wp->y_min_scanlines;
- fp_min_disp_buf_needed = mul_u32_fixed16(res_lines +
- extra_lines,
- wp->plane_blocks_per_line);
- min_disp_buf_needed = fixed16_to_u32_round_up(
- fp_min_disp_buf_needed);
+ min_ddb_alloc = mul_round_up_u32_fixed16(res_lines + extra_lines,
+ wp->plane_blocks_per_line);
} else {
- min_disp_buf_needed = DIV_ROUND_UP(res_blocks * 11, 10);
+ min_ddb_alloc = res_blocks +
+ DIV_ROUND_UP(res_blocks, 10);
}
- } else {
- min_disp_buf_needed = res_blocks;
}
- if ((level > 0 && res_lines > 31) ||
- res_blocks >= ddb_allocation ||
- min_disp_buf_needed >= ddb_allocation) {
- /*
- * If there are no valid level 0 watermarks, then we can't
- * support this display configuration.
- */
- if (level) {
- return 0;
- } else {
- struct drm_plane *plane = pstate->plane;
+ if (!skl_wm_has_lines(dev_priv, level))
+ res_lines = 0;
- DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
- DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n",
- plane->base.id, plane->name,
- res_blocks, ddb_allocation, res_lines);
- return -EINVAL;
- }
+ if (res_lines > 31) {
+ /* reject it */
+ result->min_ddb_alloc = U16_MAX;
+ return;
}
- /* The number of lines are ignored for the level 0 watermark. */
+ /*
+ * If res_lines is valid, assume we can use this watermark level
+ * for now. We'll come back and disable it after we calculate the
+ * DDB allocation if it turns out we don't actually have enough
+ * blocks to satisfy it.
+ */
result->plane_res_b = res_blocks;
result->plane_res_l = res_lines;
+ /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
+ result->min_ddb_alloc = max(min_ddb_alloc, res_blocks) + 1;
result->plane_en = true;
-
- return 0;
}
-static int
+static void
skl_compute_wm_levels(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
- uint16_t ddb_blocks,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
@@ -4874,45 +4813,30 @@ skl_compute_wm_levels(const struct intel_crtc_state *cstate,
to_i915(intel_pstate->base.plane->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level *result_prev = &levels[0];
- int ret;
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
- ret = skl_compute_plane_wm(cstate,
- intel_pstate,
- ddb_blocks,
- level,
- wm_params,
- result_prev,
- result);
- if (ret)
- return ret;
+ skl_compute_plane_wm(cstate, intel_pstate, level, wm_params,
+ result_prev, result);
result_prev = result;
}
-
- return 0;
}
-static uint32_t
+static u32
skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
{
struct drm_atomic_state *state = cstate->base.state;
struct drm_i915_private *dev_priv = to_i915(state->dev);
uint_fixed_16_16_t linetime_us;
- uint32_t linetime_wm;
+ u32 linetime_wm;
linetime_us = intel_get_linetime_us(cstate);
-
- if (is_fixed16_zero(linetime_us))
- return 0;
-
linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
- /* Display WA #1135: bxt:ALL GLK:ALL */
- if ((IS_BROXTON(dev_priv) || IS_GEMINILAKE(dev_priv)) &&
- dev_priv->ipc_enabled)
+ /* Display WA #1135: BXT:ALL GLK:ALL */
+ if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
linetime_wm /= 2;
return linetime_wm;
@@ -4920,14 +4844,13 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
const struct skl_wm_params *wp,
- struct skl_plane_wm *wm,
- uint16_t ddb_allocation)
+ struct skl_plane_wm *wm)
{
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- uint16_t trans_min, trans_y_tile_min;
- const uint16_t trans_amount = 10; /* This is configurable amount */
- uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
+ u16 trans_min, trans_y_tile_min;
+ const u16 trans_amount = 10; /* This is configurable amount */
+ u16 wm0_sel_res_b, trans_offset_b, res_blocks;
/* Transition WM are not recommended by HW team for GEN9 */
if (INTEL_GEN(dev_priv) <= 9)
@@ -4956,8 +4879,8 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
if (wp->y_tiled) {
- trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
- wp->y_tile_minimum);
+ trans_y_tile_min =
+ (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
trans_offset_b;
} else {
@@ -4969,12 +4892,13 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
}
- res_blocks += 1;
-
- if (res_blocks < ddb_allocation) {
- wm->trans_wm.plane_res_b = res_blocks;
- wm->trans_wm.plane_en = true;
- }
+ /*
+ * Just assume we can enable the transition watermark. After
+ * computing the DDB we'll come back and disable it if that
+ * assumption turns out to be false.
+ */
+ wm->trans_wm.plane_res_b = res_blocks + 1;
+ wm->trans_wm.plane_en = true;
}
static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
@@ -4982,7 +4906,6 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
enum plane_id plane_id, int color_plane)
{
struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
- u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]);
struct skl_wm_params wm_params;
int ret;
@@ -4991,12 +4914,8 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = skl_compute_wm_levels(crtc_state, plane_state,
- ddb_blocks, &wm_params, wm->wm);
- if (ret)
- return ret;
-
- skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks);
+ skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->wm);
+ skl_compute_transition_wm(crtc_state, &wm_params, wm);
return 0;
}
@@ -5006,7 +4925,6 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
enum plane_id plane_id)
{
struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
- u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]);
struct skl_wm_params wm_params;
int ret;
@@ -5018,10 +4936,7 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = skl_compute_wm_levels(crtc_state, plane_state,
- ddb_blocks, &wm_params, wm->uv_wm);
- if (ret)
- return ret;
+ skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->uv_wm);
return 0;
}
@@ -5139,7 +5054,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
i915_reg_t reg,
const struct skl_wm_level *level)
{
- uint32_t val = 0;
+ u32 val = 0;
if (level->plane_en) {
val |= PLANE_WM_EN;
@@ -5230,6 +5145,23 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
}
+static bool skl_pipe_wm_equals(struct intel_crtc *crtc,
+ const struct skl_pipe_wm *wm1,
+ const struct skl_pipe_wm *wm2)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ if (!skl_plane_wm_equals(dev_priv,
+ &wm1->planes[plane_id],
+ &wm2->planes[plane_id]))
+ return false;
+ }
+
+ return wm1->linetime == wm2->linetime;
+}
+
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
const struct skl_ddb_entry *b)
{
@@ -5251,35 +5183,32 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
return false;
}
-static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
+static int skl_update_pipe_wm(struct intel_crtc_state *cstate,
const struct skl_pipe_wm *old_pipe_wm,
struct skl_pipe_wm *pipe_wm, /* out */
bool *changed /* out */)
{
- struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
+ struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
int ret;
- ret = skl_build_pipe_wm(intel_cstate, pipe_wm);
+ ret = skl_build_pipe_wm(cstate, pipe_wm);
if (ret)
return ret;
- if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
- *changed = false;
- else
- *changed = true;
+ *changed = !skl_pipe_wm_equals(crtc, old_pipe_wm, pipe_wm);
return 0;
}
-static uint32_t
-pipes_modified(struct drm_atomic_state *state)
+static u32
+pipes_modified(struct intel_atomic_state *state)
{
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
- uint32_t i, ret = 0;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *cstate;
+ u32 i, ret = 0;
- for_each_new_crtc_in_state(state, crtc, cstate, i)
- ret |= drm_crtc_mask(crtc);
+ for_each_new_intel_crtc_in_state(state, crtc, cstate, i)
+ ret |= drm_crtc_mask(&crtc->base);
return ret;
}
@@ -5314,11 +5243,10 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
}
static int
-skl_compute_ddb(struct drm_atomic_state *state)
+skl_compute_ddb(struct intel_atomic_state *state)
{
- const struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+ const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
@@ -5326,7 +5254,7 @@ skl_compute_ddb(struct drm_atomic_state *state)
memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
- for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state,
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
if (ret)
@@ -5372,15 +5300,13 @@ skl_print_wm_changes(struct intel_atomic_state *state)
}
static int
-skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
+skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = state->base.dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- const struct drm_crtc *crtc;
- const struct drm_crtc_state *cstate;
- struct intel_crtc *intel_crtc;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- uint32_t realloc_pipes = pipes_modified(state);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ u32 realloc_pipes = pipes_modified(state);
int ret, i;
/*
@@ -5398,7 +5324,7 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
* since any racing commits that want to update them would need to
* hold _all_ CRTC state mutexes.
*/
- for_each_new_crtc_in_state(state, crtc, cstate, i)
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
(*changed) = true;
if (!*changed)
@@ -5412,20 +5338,20 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
*/
if (dev_priv->wm.distrust_bios_wm) {
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
- state->acquire_ctx);
+ state->base.acquire_ctx);
if (ret)
return ret;
- intel_state->active_pipe_changes = ~0;
+ state->active_pipe_changes = ~0;
/*
- * We usually only initialize intel_state->active_crtcs if we
+ * We usually only initialize state->active_crtcs if we
* we're doing a modeset; make sure this field is always
* initialized during the sanitization process that happens
* on the first commit too.
*/
- if (!intel_state->modeset)
- intel_state->active_crtcs = dev_priv->active_crtcs;
+ if (!state->modeset)
+ state->active_crtcs = dev_priv->active_crtcs;
}
/*
@@ -5441,21 +5367,19 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
* any other display updates race with this transaction, so we need
* to grab the lock on *all* CRTC's.
*/
- if (intel_state->active_pipe_changes || intel_state->modeset) {
+ if (state->active_pipe_changes || state->modeset) {
realloc_pipes = ~0;
- intel_state->wm_results.dirty_pipes = ~0;
+ state->wm_results.dirty_pipes = ~0;
}
/*
* We're not recomputing for the pipes not included in the commit, so
* make sure we start with the current state.
*/
- for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
- struct intel_crtc_state *cstate;
-
- cstate = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(cstate))
- return PTR_ERR(cstate);
+ for_each_intel_crtc_mask(dev, crtc, realloc_pipes) {
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
}
return 0;
@@ -5522,12 +5446,12 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
}
static int
-skl_compute_wm(struct drm_atomic_state *state)
+skl_compute_wm(struct intel_atomic_state *state)
{
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct skl_ddb_values *results = &intel_state->wm_results;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *cstate;
+ struct intel_crtc_state *old_crtc_state;
+ struct skl_ddb_values *results = &state->wm_results;
struct skl_pipe_wm *pipe_wm;
bool changed = false;
int ret, i;
@@ -5539,47 +5463,35 @@ skl_compute_wm(struct drm_atomic_state *state)
if (ret || !changed)
return ret;
- ret = skl_compute_ddb(state);
- if (ret)
- return ret;
-
/*
* Calculate WM's for all pipes that are part of this transaction.
- * Note that the DDB allocation above may have added more CRTC's that
+ * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
* weren't otherwise being modified (and set bits in dirty_pipes) if
* pipe allocations had to change.
- *
- * FIXME: Now that we're doing this in the atomic check phase, we
- * should allow skl_update_pipe_wm() to return failure in cases where
- * no suitable watermark values can be found.
*/
- for_each_new_crtc_in_state(state, crtc, cstate, i) {
- struct intel_crtc_state *intel_cstate =
- to_intel_crtc_state(cstate);
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ cstate, i) {
const struct skl_pipe_wm *old_pipe_wm =
- &to_intel_crtc_state(crtc->state)->wm.skl.optimal;
+ &old_crtc_state->wm.skl.optimal;
- pipe_wm = &intel_cstate->wm.skl.optimal;
+ pipe_wm = &cstate->wm.skl.optimal;
ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
if (ret)
return ret;
- ret = skl_wm_add_affected_planes(intel_state,
- to_intel_crtc(crtc));
+ ret = skl_wm_add_affected_planes(state, crtc);
if (ret)
return ret;
if (changed)
- results->dirty_pipes |= drm_crtc_mask(crtc);
-
- if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
- /* This pipe's WM's did not change */
- continue;
-
- intel_cstate->update_wm_pre = true;
+ results->dirty_pipes |= drm_crtc_mask(&crtc->base);
}
- skl_print_wm_changes(intel_state);
+ ret = skl_compute_ddb(state);
+ if (ret)
+ return ret;
+
+ skl_print_wm_changes(state);
return 0;
}
@@ -5617,13 +5529,13 @@ static void skl_initial_wm(struct intel_atomic_state *state,
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static void ilk_compute_wm_config(struct drm_device *dev,
+static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
struct intel_wm_config *config)
{
struct intel_crtc *crtc;
/* Compute the currently _active_ config */
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
if (!wm->pipe_enabled)
@@ -5637,25 +5549,24 @@ static void ilk_compute_wm_config(struct drm_device *dev,
static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
struct intel_wm_config config = {};
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
- ilk_compute_wm_config(dev, &config);
+ ilk_compute_wm_config(dev_priv, &config);
- ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
+ ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
if (INTEL_GEN(dev_priv) >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
- ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
+ ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
- best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
+ best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
} else {
best_lp_wm = &lp_wm_1_2;
}
@@ -5663,7 +5574,7 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
partitioning = (best_lp_wm == &lp_wm_1_2) ?
INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
- ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+ ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
ilk_write_wm_values(dev_priv, &results);
}
@@ -5694,7 +5605,7 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state,
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static inline void skl_wm_level_from_reg_val(uint32_t val,
+static inline void skl_wm_level_from_reg_val(u32 val,
struct skl_wm_level *level)
{
level->plane_en = val & PLANE_WM_EN;
@@ -5703,19 +5614,18 @@ static inline void skl_wm_level_from_reg_val(uint32_t val,
PLANE_WM_LINES_MASK;
}
-void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
int level, max_level;
enum plane_id plane_id;
- uint32_t val;
+ u32 val;
max_level = ilk_wm_max_level(dev_priv);
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm = &out->planes[plane_id];
for (level = 0; level <= max_level; level++) {
@@ -5735,30 +5645,27 @@ void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
skl_wm_level_from_reg_val(val, &wm->trans_wm);
}
- if (!intel_crtc->active)
+ if (!crtc->active)
return;
out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
}
-void skl_wm_get_hw_state(struct drm_device *dev)
+void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
- struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
+ struct intel_crtc *crtc;
struct intel_crtc_state *cstate;
skl_ddb_get_hw_state(dev_priv, ddb);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- intel_crtc = to_intel_crtc(crtc);
- cstate = to_intel_crtc_state(crtc->state);
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ cstate = to_intel_crtc_state(crtc->base.state);
skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
- if (intel_crtc->active)
- hw->dirty_pipes |= drm_crtc_mask(crtc);
+ if (crtc->active)
+ hw->dirty_pipes |= drm_crtc_mask(&crtc->base);
}
if (dev_priv->active_crtcs) {
@@ -5767,15 +5674,14 @@ void skl_wm_get_hw_state(struct drm_device *dev)
}
}
-static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct ilk_wm_values *hw = &dev_priv->wm.hw;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+ struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->base.state);
struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
- enum pipe pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
static const i915_reg_t wm0_pipe_reg[] = {
[PIPE_A] = WM0_PIPEA_ILK,
[PIPE_B] = WM0_PIPEB_ILK,
@@ -5788,7 +5694,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
memset(active, 0, sizeof(*active));
- active->pipe_enabled = intel_crtc->active;
+ active->pipe_enabled = crtc->active;
if (active->pipe_enabled) {
u32 tmp = hw->wm_pipe[pipe];
@@ -5816,7 +5722,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
active->wm[level].enable = true;
}
- intel_crtc->wm.active.ilk = *active;
+ crtc->wm.active.ilk = *active;
}
#define _FW_WM(value, plane) \
@@ -5827,7 +5733,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
struct g4x_wm_values *wm)
{
- uint32_t tmp;
+ u32 tmp;
tmp = I915_READ(DSPFW1);
wm->sr.plane = _FW_WM(tmp, SR);
@@ -5854,7 +5760,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
struct vlv_wm_values *wm)
{
enum pipe pipe;
- uint32_t tmp;
+ u32 tmp;
for_each_pipe(dev_priv, pipe) {
tmp = I915_READ(VLV_DDL(pipe));
@@ -5926,9 +5832,8 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
#undef _FW_WM
#undef _FW_WM_VLV
-void g4x_wm_get_hw_state(struct drm_device *dev)
+void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct g4x_wm_values *wm = &dev_priv->wm.g4x;
struct intel_crtc *crtc;
@@ -5936,7 +5841,7 @@ void g4x_wm_get_hw_state(struct drm_device *dev)
wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct g4x_wm_state *active = &crtc->wm.active.g4x;
@@ -6067,9 +5972,8 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-void vlv_wm_get_hw_state(struct drm_device *dev)
+void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct vlv_wm_values *wm = &dev_priv->wm.vlv;
struct intel_crtc *crtc;
u32 val;
@@ -6113,7 +6017,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
mutex_unlock(&dev_priv->pcu_lock);
}
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct vlv_wm_state *active = &crtc->wm.active.vlv;
@@ -6230,15 +6134,14 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
*/
}
-void ilk_wm_get_hw_state(struct drm_device *dev)
+void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct ilk_wm_values *hw = &dev_priv->wm.hw;
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
ilk_init_lp_watermarks(dev_priv);
- for_each_crtc(dev, crtc)
+ for_each_intel_crtc(&dev_priv->drm, crtc)
ilk_pipe_wm_get_hw_state(crtc);
hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
@@ -6339,10 +6242,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv)
*/
DEFINE_SPINLOCK(mchdev_lock);
-/* Global for IPS driver to get at the current i915 device. Protected by
- * mchdev_lock. */
-static struct drm_i915_private *i915_mch_dev;
-
bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
{
u16 rgvswctl;
@@ -6805,7 +6704,7 @@ void gen6_rps_boost(struct i915_request *rq,
if (!rps->enabled)
return;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ if (i915_request_signaled(rq))
return;
/* Serializes with i915_request_retire() */
@@ -7049,7 +6948,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Program defaults and thresholds for RPS */
- if (IS_GEN9(dev_priv))
+ if (IS_GEN(dev_priv, 9))
I915_WRITE(GEN6_RC_VIDEO_FREQ,
GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
@@ -7285,9 +7184,9 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
- if (IS_GEN6(dev_priv) && ret) {
+ if (IS_GEN(dev_priv, 6) && ret) {
DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
- } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ } else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
rc6vids &= 0xffff00;
@@ -7412,7 +7311,7 @@ static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
- switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
+ switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) {
case 8:
/* (2 * 4) config */
rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -7985,16 +7884,17 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{
- unsigned long val;
+ intel_wakeref_t wakeref;
+ unsigned long val = 0;
- if (!IS_GEN5(dev_priv))
+ if (!IS_GEN(dev_priv, 5))
return 0;
- spin_lock_irq(&mchdev_lock);
-
- val = __i915_chipset_val(dev_priv);
-
- spin_unlock_irq(&mchdev_lock);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ val = __i915_chipset_val(dev_priv);
+ spin_unlock_irq(&mchdev_lock);
+ }
return val;
}
@@ -8071,14 +7971,16 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
- if (!IS_GEN5(dev_priv))
- return;
+ intel_wakeref_t wakeref;
- spin_lock_irq(&mchdev_lock);
-
- __i915_update_gfx_val(dev_priv);
+ if (!IS_GEN(dev_priv, 5))
+ return;
- spin_unlock_irq(&mchdev_lock);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ __i915_update_gfx_val(dev_priv);
+ spin_unlock_irq(&mchdev_lock);
+ }
}
static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
@@ -8120,18 +8022,34 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{
- unsigned long val;
+ intel_wakeref_t wakeref;
+ unsigned long val = 0;
- if (!IS_GEN5(dev_priv))
+ if (!IS_GEN(dev_priv, 5))
return 0;
- spin_lock_irq(&mchdev_lock);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ val = __i915_gfx_val(dev_priv);
+ spin_unlock_irq(&mchdev_lock);
+ }
- val = __i915_gfx_val(dev_priv);
+ return val;
+}
- spin_unlock_irq(&mchdev_lock);
+static struct drm_i915_private *i915_mch_dev;
- return val;
+static struct drm_i915_private *mchdev_get(void)
+{
+ struct drm_i915_private *i915;
+
+ rcu_read_lock();
+ i915 = i915_mch_dev;
+ if (!kref_get_unless_zero(&i915->drm.ref))
+ i915 = NULL;
+ rcu_read_unlock();
+
+ return i915;
}
/**
@@ -8142,23 +8060,24 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
*/
unsigned long i915_read_mch_val(void)
{
- struct drm_i915_private *dev_priv;
- unsigned long chipset_val, graphics_val, ret = 0;
-
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev)
- goto out_unlock;
- dev_priv = i915_mch_dev;
-
- chipset_val = __i915_chipset_val(dev_priv);
- graphics_val = __i915_gfx_val(dev_priv);
+ struct drm_i915_private *i915;
+ unsigned long chipset_val = 0;
+ unsigned long graphics_val = 0;
+ intel_wakeref_t wakeref;
- ret = chipset_val + graphics_val;
+ i915 = mchdev_get();
+ if (!i915)
+ return 0;
-out_unlock:
- spin_unlock_irq(&mchdev_lock);
+ with_intel_runtime_pm(i915, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ chipset_val = __i915_chipset_val(i915);
+ graphics_val = __i915_gfx_val(i915);
+ spin_unlock_irq(&mchdev_lock);
+ }
- return ret;
+ drm_dev_put(&i915->drm);
+ return chipset_val + graphics_val;
}
EXPORT_SYMBOL_GPL(i915_read_mch_val);
@@ -8169,23 +8088,19 @@ EXPORT_SYMBOL_GPL(i915_read_mch_val);
*/
bool i915_gpu_raise(void)
{
- struct drm_i915_private *dev_priv;
- bool ret = true;
+ struct drm_i915_private *i915;
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
- dev_priv->ips.max_delay--;
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
-out_unlock:
+ spin_lock_irq(&mchdev_lock);
+ if (i915->ips.max_delay > i915->ips.fmax)
+ i915->ips.max_delay--;
spin_unlock_irq(&mchdev_lock);
- return ret;
+ drm_dev_put(&i915->drm);
+ return true;
}
EXPORT_SYMBOL_GPL(i915_gpu_raise);
@@ -8197,23 +8112,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_raise);
*/
bool i915_gpu_lower(void)
{
- struct drm_i915_private *dev_priv;
- bool ret = true;
+ struct drm_i915_private *i915;
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
- dev_priv->ips.max_delay++;
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
-out_unlock:
+ spin_lock_irq(&mchdev_lock);
+ if (i915->ips.max_delay < i915->ips.min_delay)
+ i915->ips.max_delay++;
spin_unlock_irq(&mchdev_lock);
- return ret;
+ drm_dev_put(&i915->drm);
+ return true;
}
EXPORT_SYMBOL_GPL(i915_gpu_lower);
@@ -8224,13 +8135,16 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
*/
bool i915_gpu_busy(void)
{
- bool ret = false;
+ struct drm_i915_private *i915;
+ bool ret;
- spin_lock_irq(&mchdev_lock);
- if (i915_mch_dev)
- ret = i915_mch_dev->gt.awake;
- spin_unlock_irq(&mchdev_lock);
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+ ret = i915->gt.awake;
+
+ drm_dev_put(&i915->drm);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_busy);
@@ -8243,24 +8157,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_busy);
*/
bool i915_gpu_turbo_disable(void)
{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- dev_priv->ips.max_delay = dev_priv->ips.fstart;
+ struct drm_i915_private *i915;
+ bool ret;
- if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
- ret = false;
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
-out_unlock:
+ spin_lock_irq(&mchdev_lock);
+ i915->ips.max_delay = i915->ips.fstart;
+ ret = ironlake_set_drps(i915, i915->ips.fstart);
spin_unlock_irq(&mchdev_lock);
+ drm_dev_put(&i915->drm);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
@@ -8289,18 +8198,14 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
{
/* We only register the i915 ips part with intel-ips once everything is
* set up, to avoid intel-ips sneaking in and reading bogus values. */
- spin_lock_irq(&mchdev_lock);
- i915_mch_dev = dev_priv;
- spin_unlock_irq(&mchdev_lock);
+ rcu_assign_pointer(i915_mch_dev, dev_priv);
ips_ping_for_i915_load();
}
void intel_gpu_ips_teardown(void)
{
- spin_lock_irq(&mchdev_lock);
- i915_mch_dev = NULL;
- spin_unlock_irq(&mchdev_lock);
+ rcu_assign_pointer(i915_mch_dev, NULL);
}
static void intel_init_emon(struct drm_i915_private *dev_priv)
@@ -8410,7 +8315,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
intel_freq_opcode(dev_priv, 450));
/* After setting max-softlimit, find the overclock max freq */
- if (IS_GEN6(dev_priv) ||
+ if (IS_GEN(dev_priv, 6) ||
IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
u32 params = 0;
@@ -8639,7 +8544,7 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
{
- uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+ u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
/*
* Required for FBC
@@ -8711,7 +8616,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
int pipe;
- uint32_t val;
+ u32 val;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
@@ -8746,7 +8651,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
{
- uint32_t tmp;
+ u32 tmp;
tmp = I915_READ(MCH_SSKPD);
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
@@ -8756,7 +8661,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
{
- uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+ u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -8850,7 +8755,7 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
{
- uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+ u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
/*
* WaVSThreadDispatchOverride:ivb,vlv
@@ -8886,7 +8791,7 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_LPT_LP(dev_priv)) {
- uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -9124,7 +9029,7 @@ static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
{
- uint32_t snpcr;
+ u32 snpcr;
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
@@ -9333,7 +9238,7 @@ static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
{
- uint32_t dspclk_gate;
+ u32 dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
@@ -9480,9 +9385,9 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = ivb_init_clock_gating;
else if (IS_VALLEYVIEW(dev_priv))
dev_priv->display.init_clock_gating = vlv_init_clock_gating;
- else if (IS_GEN6(dev_priv))
+ else if (IS_GEN(dev_priv, 6))
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
- else if (IS_GEN5(dev_priv))
+ else if (IS_GEN(dev_priv, 5))
dev_priv->display.init_clock_gating = ilk_init_clock_gating;
else if (IS_G4X(dev_priv))
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
@@ -9490,11 +9395,11 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
else if (IS_I965G(dev_priv))
dev_priv->display.init_clock_gating = i965g_init_clock_gating;
- else if (IS_GEN3(dev_priv))
+ else if (IS_GEN(dev_priv, 3))
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- else if (IS_GEN2(dev_priv))
+ else if (IS_GEN(dev_priv, 2))
dev_priv->display.init_clock_gating = i830_init_clock_gating;
else {
MISSING_CASE(INTEL_DEVID(dev_priv));
@@ -9508,7 +9413,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
/* For cxsr */
if (IS_PINEVIEW(dev_priv))
i915_pineview_get_mem_freq(dev_priv);
- else if (IS_GEN5(dev_priv))
+ else if (IS_GEN(dev_priv, 5))
i915_ironlake_get_mem_freq(dev_priv);
/* For FIFO watermark updates */
@@ -9520,9 +9425,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
- if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
+ if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
- (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
+ (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
dev_priv->display.compute_intermediate_wm =
@@ -9563,12 +9468,12 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
- } else if (IS_GEN4(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 4)) {
dev_priv->display.update_wm = i965_update_wm;
- } else if (IS_GEN3(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 3)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- } else if (IS_GEN2(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 2)) {
if (INTEL_INFO(dev_priv)->num_pipes == 1) {
dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
@@ -9583,7 +9488,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
{
- uint32_t flags =
+ u32 flags =
I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
switch (flags) {
@@ -9606,7 +9511,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
{
- uint32_t flags =
+ u32 flags =
I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
switch (flags) {