aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/display/intel_display.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/display/intel_display.c')
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2529
1 files changed, 489 insertions, 2040 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 134a6acbd8fb..ff598b6cd953 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -52,6 +52,7 @@
#include "display/intel_dp_mst.h"
#include "display/intel_dpll.h"
#include "display/intel_dpll_mgr.h"
+#include "display/intel_drrs.h"
#include "display/intel_dsi.h"
#include "display/intel_dvo.h"
#include "display/intel_fb.h"
@@ -67,9 +68,10 @@
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object.h"
-#include "gt/intel_rps.h"
#include "gt/gen8_ppgtt.h"
+#include "pxp/intel_pxp.h"
+
#include "g4x_dp.h"
#include "g4x_hdmi.h"
#include "i915_drv.h"
@@ -84,35 +86,37 @@
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp_link_training.h"
+#include "intel_dpt.h"
#include "intel_fbc.h"
-#include "intel_fdi.h"
#include "intel_fbdev.h"
+#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_overlay.h"
+#include "intel_panel.h"
+#include "intel_pcode.h"
#include "intel_pipe_crc.h"
+#include "intel_plane_initial.h"
#include "intel_pm.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
-#include "intel_sideband.h"
+#include "intel_sbi.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vga.h"
#include "i9xx_plane.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
+#include "vlv_sideband.h"
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
static void ilk_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-static int intel_framebuffer_init(struct intel_framebuffer *ifb,
- struct drm_i915_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd);
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
@@ -120,186 +124,105 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
const struct intel_link_m_n *m2_n2);
static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
-struct i915_dpt {
- struct i915_address_space vm;
-
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- void __iomem *iomem;
-};
-
-#define i915_is_dpt(vm) ((vm)->is_dpt)
-
-static inline struct i915_dpt *
-i915_vm_to_dpt(struct i915_address_space *vm)
-{
- BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
- GEM_BUG_ON(!i915_is_dpt(vm));
- return container_of(vm, struct i915_dpt, vm);
-}
-
-#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
-
-static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
-{
- writeq(pte, addr);
-}
-
-static void dpt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 flags)
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ * @dev_priv: i915 device
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+static void intel_update_watermarks(struct drm_i915_private *dev_priv)
{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- gen8_pte_t __iomem *base = dpt->iomem;
-
- gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
- vm->pte_encode(addr, level, flags));
+ if (dev_priv->wm_disp->update_wm)
+ dev_priv->wm_disp->update_wm(dev_priv);
}
-static void dpt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- gen8_pte_t __iomem *base = dpt->iomem;
- const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
- struct sgt_iter sgt_iter;
- dma_addr_t addr;
- int i;
-
- /*
- * Note that we ignore PTE_READ_ONLY here. The caller must be careful
- * not to allow the user to override access to a read only page.
- */
-
- i = vma->node.start / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, sgt_iter, vma->pages)
- gen8_set_pte(&base[i++], pte_encode | addr);
-}
-
-static void dpt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
+static int intel_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->wm_disp->compute_pipe_wm)
+ return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
+ return 0;
}
-static void dpt_bind_vma(struct i915_address_space *vm,
- struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_i915_gem_object *obj = vma->obj;
- u32 pte_flags;
-
- /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
- pte_flags = 0;
- if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj))
- pte_flags |= PTE_READ_ONLY;
- if (i915_gem_object_is_lmem(obj))
- pte_flags |= PTE_LM;
-
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
-
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-
- /*
- * Without aliasing PPGTT there's no difference between
- * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
- * upgrade to both bound if we bind either to avoid double-binding.
- */
- atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (!dev_priv->wm_disp->compute_intermediate_wm)
+ return 0;
+ if (drm_WARN_ON(&dev_priv->drm,
+ !dev_priv->wm_disp->compute_pipe_wm))
+ return 0;
+ return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
}
-static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
+static bool intel_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- vm->clear_range(vm, vma->node.start, vma->size);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->wm_disp->initial_watermarks) {
+ dev_priv->wm_disp->initial_watermarks(state, crtc);
+ return true;
+ }
+ return false;
}
-static void dpt_cleanup(struct i915_address_space *vm)
+static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
-
- i915_gem_object_put(dpt->obj);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->wm_disp->atomic_update_watermarks)
+ dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
}
-static struct i915_address_space *
-intel_dpt_create(struct intel_framebuffer *fb)
+static void intel_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
- struct drm_i915_private *i915 = to_i915(obj->dev);
- struct drm_i915_gem_object *dpt_obj;
- struct i915_address_space *vm;
- struct i915_dpt *dpt;
- size_t size;
- int ret;
-
- if (intel_fb_needs_pot_stride_remap(fb))
- size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
- else
- size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
-
- size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
-
- if (HAS_LMEM(i915))
- dpt_obj = i915_gem_object_create_lmem(i915, size, 0);
- else
- dpt_obj = i915_gem_object_create_stolen(i915, size);
- if (IS_ERR(dpt_obj))
- return ERR_CAST(dpt_obj);
-
- ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
- if (ret) {
- i915_gem_object_put(dpt_obj);
- return ERR_PTR(ret);
- }
-
- dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
- if (!dpt) {
- i915_gem_object_put(dpt_obj);
- return ERR_PTR(-ENOMEM);
- }
-
- vm = &dpt->vm;
-
- vm->gt = &i915->gt;
- vm->i915 = i915;
- vm->dma = i915->drm.dev;
- vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
- vm->is_dpt = true;
-
- i915_address_space_init(vm, VM_CLASS_DPT);
-
- vm->insert_page = dpt_insert_page;
- vm->clear_range = dpt_clear_range;
- vm->insert_entries = dpt_insert_entries;
- vm->cleanup = dpt_cleanup;
-
- vm->vma_ops.bind_vma = dpt_bind_vma;
- vm->vma_ops.unbind_vma = dpt_unbind_vma;
- vm->vma_ops.set_pages = ggtt_set_pages;
- vm->vma_ops.clear_pages = clear_pages;
-
- vm->pte_encode = gen8_ggtt_pte_encode;
-
- dpt->obj = dpt_obj;
-
- return &dpt->vm;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->wm_disp->optimize_watermarks)
+ dev_priv->wm_disp->optimize_watermarks(state, crtc);
}
-static void intel_dpt_destroy(struct i915_address_space *vm)
+static int intel_compute_global_watermarks(struct intel_atomic_state *state)
{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
-
- i915_vm_close(&dpt->vm);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->wm_disp->compute_global_watermarks)
+ return dev_priv->wm_disp->compute_global_watermarks(state);
+ return 0;
}
/* returns HPLL frequency in kHz */
@@ -359,6 +282,12 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
dev_priv->czclk_freq);
}
+static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
+{
+ return (crtc_state->active_planes &
+ ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
+}
+
/* WA Display #0827: Gen9:all */
static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
@@ -384,6 +313,15 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
}
+/* Wa_1604331009:icl,jsl,ehl */
+static void
+icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool enable)
+{
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
+ enable ? CURSOR_GATING_DIS : 0);
+}
+
static bool
is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
{
@@ -464,168 +402,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
}
}
-/* Only for pre-ILK configs */
-void assert_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
-{
- u32 val;
- bool cur_state;
-
- val = intel_de_read(dev_priv, DPLL(pipe));
- cur_state = !!(val & DPLL_VCO_ENABLE);
- I915_STATE_WARN(cur_state != state,
- "PLL state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-
-/* XXX: the dsi pll is shared between MIPI DSI ports */
-void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
-{
- u32 val;
- bool cur_state;
-
- vlv_cck_get(dev_priv);
- val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
- vlv_cck_put(dev_priv);
-
- cur_state = val & DSI_PLL_VCO_EN;
- I915_STATE_WARN(cur_state != state,
- "DSI PLL state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-
-static void assert_fdi_tx(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
-{
- bool cur_state;
-
- if (HAS_DDI(dev_priv)) {
- /*
- * DDI does not have a specific FDI_TX register.
- *
- * FDI is never fed from EDP transcoder
- * so pipe->transcoder cast is fine here.
- */
- enum transcoder cpu_transcoder = (enum transcoder)pipe;
- u32 val = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(cpu_transcoder));
- cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
- } else {
- u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
- cur_state = !!(val & FDI_TX_ENABLE);
- }
- I915_STATE_WARN(cur_state != state,
- "FDI TX state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
-#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
-
-static void assert_fdi_rx(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
-{
- u32 val;
- bool cur_state;
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
- cur_state = !!(val & FDI_RX_ENABLE);
- I915_STATE_WARN(cur_state != state,
- "FDI RX state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
-#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
-
-static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- u32 val;
-
- /* ILK FDI PLL is always enabled */
- if (IS_IRONLAKE(dev_priv))
- return;
-
- /* On Haswell, DDI ports are responsible for the FDI PLL setup */
- if (HAS_DDI(dev_priv))
- return;
-
- val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
- I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
-}
-
-void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
-{
- u32 val;
- bool cur_state;
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
- cur_state = !!(val & FDI_RX_PLL_ENABLE);
- I915_STATE_WARN(cur_state != state,
- "FDI RX PLL assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-
-void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- i915_reg_t pp_reg;
- u32 val;
- enum pipe panel_pipe = INVALID_PIPE;
- bool locked = true;
-
- if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
- return;
-
- if (HAS_PCH_SPLIT(dev_priv)) {
- u32 port_sel;
-
- pp_reg = PP_CONTROL(0);
- port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
-
- switch (port_sel) {
- case PANEL_PORT_SELECT_LVDS:
- intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
- break;
- case PANEL_PORT_SELECT_DPA:
- g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
- break;
- case PANEL_PORT_SELECT_DPC:
- g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
- break;
- case PANEL_PORT_SELECT_DPD:
- g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
- break;
- default:
- MISSING_CASE(port_sel);
- break;
- }
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- /* presumably write lock depends on pipe, not port select */
- pp_reg = PP_CONTROL(pipe);
- panel_pipe = pipe;
- } else {
- u32 port_sel;
-
- pp_reg = PP_CONTROL(0);
- port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
-
- drm_WARN_ON(&dev_priv->drm,
- port_sel != PANEL_PORT_SELECT_LVDS);
- intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
- }
-
- val = intel_de_read(dev_priv, pp_reg);
- if (!(val & PANEL_POWER_ON) ||
- ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
- locked = false;
-
- I915_STATE_WARN(panel_pipe == pipe && locked,
- "panel assertion failure, pipe %c regs locked\n",
- pipe_name(pipe));
-}
-
-void assert_pipe(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder, bool state)
+void assert_transcoder(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, bool state)
{
bool cur_state;
enum intel_display_power_domain power_domain;
@@ -942,7 +720,7 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
return crtc->pipe;
}
-void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
+void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1003,7 +781,7 @@ void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
intel_wait_for_pipe_scanline_moving(crtc);
}
-void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
+void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1053,69 +831,6 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
}
-unsigned int
-intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
-{
- struct drm_i915_private *dev_priv = to_i915(fb->dev);
- unsigned int cpp = fb->format->cpp[color_plane];
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- return intel_tile_size(dev_priv);
- case I915_FORMAT_MOD_X_TILED:
- if (DISPLAY_VER(dev_priv) == 2)
- return 128;
- else
- return 512;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- if (is_ccs_plane(fb, color_plane))
- return 128;
- fallthrough;
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- if (is_ccs_plane(fb, color_plane))
- return 64;
- fallthrough;
- case I915_FORMAT_MOD_Y_TILED:
- if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv))
- return 128;
- else
- return 512;
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- if (is_ccs_plane(fb, color_plane))
- return 128;
- fallthrough;
- case I915_FORMAT_MOD_Yf_TILED:
- switch (cpp) {
- case 1:
- return 64;
- case 2:
- case 4:
- return 128;
- case 8:
- case 16:
- return 256;
- default:
- MISSING_CASE(cpp);
- return cpp;
- }
- break;
- default:
- MISSING_CASE(fb->modifier);
- return cpp;
- }
-}
-
-unsigned int
-intel_fb_align_height(const struct drm_framebuffer *fb,
- int color_plane, unsigned int height)
-{
- unsigned int tile_height = intel_tile_height(fb, color_plane);
-
- return ALIGN(height, tile_height);
-}
-
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
{
unsigned int size = 0;
@@ -1132,82 +847,16 @@ unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info
unsigned int size = 0;
int i;
- for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
+ if (rem_info->plane_alignment)
+ size = ALIGN(size, rem_info->plane_alignment);
size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
-
- return size;
-}
-
-static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 9)
- return 256 * 1024;
- else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return 128 * 1024;
- else if (DISPLAY_VER(dev_priv) >= 4)
- return 4 * 1024;
- else
- return 0;
-}
-
-static bool has_async_flips(struct drm_i915_private *i915)
-{
- return DISPLAY_VER(i915) >= 5;
-}
-
-unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
- int color_plane)
-{
- struct drm_i915_private *dev_priv = to_i915(fb->dev);
-
- if (intel_fb_uses_dpt(fb))
- return 512 * 4096;
-
- /* AUX_DIST needs only 4K alignment */
- if (is_ccs_plane(fb, color_plane))
- return 4096;
-
- if (is_semiplanar_uv_plane(fb, color_plane)) {
- /*
- * TODO: cross-check wrt. the bspec stride in bytes * 64 bytes
- * alignment for linear UV planes on all platforms.
- */
- if (DISPLAY_VER(dev_priv) >= 12) {
- if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
- return intel_linear_alignment(dev_priv);
-
- return intel_tile_row_size(fb, color_plane);
- }
-
- return 4096;
}
- drm_WARN_ON(&dev_priv->drm, color_plane != 0);
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- return intel_linear_alignment(dev_priv);
- case I915_FORMAT_MOD_X_TILED:
- if (has_async_flips(dev_priv))
- return 256 * 1024;
- return 0;
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- return 16 * 1024;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- return 1 * 1024 * 1024;
- default:
- MISSING_CASE(fb->modifier);
- return 0;
- }
+ return size;
}
-static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
+bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -1217,198 +866,6 @@ static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
}
-static struct i915_vma *
-intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
- const struct i915_ggtt_view *view,
- bool uses_fence,
- unsigned long *out_flags,
- struct i915_address_space *vm)
-{
- struct drm_device *dev = fb->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct i915_vma *vma;
- u32 alignment;
- int ret;
-
- if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
- return ERR_PTR(-EINVAL);
-
- alignment = 4096 * 512;
-
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
-
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
-
- vma = i915_vma_instance(obj, vm, view);
- if (IS_ERR(vma))
- goto err;
-
- if (i915_vma_misplaced(vma, 0, alignment, 0)) {
- ret = i915_vma_unbind(vma);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
- }
-
- ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
-
- vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
-
- i915_gem_object_flush_if_display(obj);
-
- i915_vma_get(vma);
-err:
- atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
-
- return vma;
-}
-
-struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
- bool phys_cursor,
- const struct i915_ggtt_view *view,
- bool uses_fence,
- unsigned long *out_flags)
-{
- struct drm_device *dev = fb->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- intel_wakeref_t wakeref;
- struct i915_gem_ww_ctx ww;
- struct i915_vma *vma;
- unsigned int pinctl;
- u32 alignment;
- int ret;
-
- if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
- return ERR_PTR(-EINVAL);
-
- if (phys_cursor)
- alignment = intel_cursor_alignment(dev_priv);
- else
- alignment = intel_surf_alignment(fb, 0);
- if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
- return ERR_PTR(-EINVAL);
-
- /* Note that the w/a also requires 64 PTE of padding following the
- * bo. We currently fill all unused PTE with the shadow page and so
- * we should always have valid PTE following the scanout preventing
- * the VT-d warning.
- */
- if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
- alignment = 256 * 1024;
-
- /*
- * Global gtt pte registers are special registers which actually forward
- * writes to a chunk of system memory. Which means that there is no risk
- * that the register values disappear as soon as we call
- * intel_runtime_pm_put(), so it is correct to wrap only the
- * pin/unpin/fence and not more.
- */
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
-
- /*
- * Valleyview is definitely limited to scanning out the first
- * 512MiB. Lets presume this behaviour was inherited from the
- * g4x display engine and that all earlier gen are similarly
- * limited. Testing suggests that it is a little more
- * complicated than this. For example, Cherryview appears quite
- * happy to scanout from anywhere within its global aperture.
- */
- pinctl = 0;
- if (HAS_GMCH(dev_priv))
- pinctl |= PIN_MAPPABLE;
-
- i915_gem_ww_ctx_init(&ww, true);
-retry:
- ret = i915_gem_object_lock(obj, &ww);
- if (!ret && phys_cursor)
- ret = i915_gem_object_attach_phys(obj, alignment);
- else if (!ret && HAS_LMEM(dev_priv))
- ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
- /* TODO: Do we need to sync when migration becomes async? */
- if (!ret)
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto err;
-
- if (!ret) {
- vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
- view, pinctl);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unpin;
- }
- }
-
- if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
- /*
- * Install a fence for tiled scan-out. Pre-i965 always needs a
- * fence, whereas 965+ only requires a fence if using
- * framebuffer compression. For simplicity, we always, when
- * possible, install a fence as the cost is not that onerous.
- *
- * If we fail to fence the tiled scanout, then either the
- * modeset will reject the change (which is highly unlikely as
- * the affected systems, all but one, do not have unmappable
- * space) or we will not be able to enable full powersaving
- * techniques (also likely not to apply due to various limits
- * FBC and the like impose on the size of the buffer, which
- * presumably we violated anyway with this unmappable buffer).
- * Anyway, it is presumably better to stumble onwards with
- * something and try to run the system in a "less than optimal"
- * mode that matches the user configuration.
- */
- ret = i915_vma_pin_fence(vma);
- if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
- i915_vma_unpin(vma);
- goto err_unpin;
- }
- ret = 0;
-
- if (vma->fence)
- *out_flags |= PLANE_HAS_FENCE;
- }
-
- i915_vma_get(vma);
-
-err_unpin:
- i915_gem_object_unpin_pages(obj);
-err:
- if (ret == -EDEADLK) {
- ret = i915_gem_ww_ctx_backoff(&ww);
- if (!ret)
- goto retry;
- }
- i915_gem_ww_ctx_fini(&ww);
- if (ret)
- vma = ERR_PTR(ret);
-
- atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- return vma;
-}
-
-void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
-{
- if (flags & PLANE_HAS_FENCE)
- i915_vma_unpin_fence(vma);
- i915_vma_unpin(vma);
- i915_vma_put(vma);
-}
-
/*
* Convert the x/y offsets into a linear offset.
* Only valid with 0/180 degree rotation, which is fine since linear
@@ -1440,22 +897,6 @@ void intel_add_fb_offsets(int *x, int *y,
*y += state->view.color_plane[color_plane].y;
}
-static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
-{
- switch (fb_modifier) {
- case I915_FORMAT_MOD_X_TILED:
- return I915_TILING_X;
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- return I915_TILING_Y;
- default:
- return I915_TILING_NONE;
- }
-}
-
/*
* From the Sky Lake PRM:
* "The Color Control Surface (CCS) contains the compression status of
@@ -1586,12 +1027,6 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
}
}
-static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
-{
- return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
- 512) * 64;
-}
-
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier)
{
@@ -1616,188 +1051,6 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0);
}
-static
-u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
- u32 pixel_format, u64 modifier)
-{
- /*
- * Arbitrary limit for gen4+ chosen to match the
- * render engine max stride.
- *
- * The new CCS hash mode makes remapping impossible
- */
- if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) ||
- intel_modifier_uses_dpt(dev_priv, modifier))
- return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
- else if (DISPLAY_VER(dev_priv) >= 7)
- return 256 * 1024;
- else
- return 128 * 1024;
-}
-
-static u32
-intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
-{
- struct drm_i915_private *dev_priv = to_i915(fb->dev);
- u32 tile_width;
-
- if (is_surface_linear(fb, color_plane)) {
- u32 max_stride = intel_plane_fb_max_stride(dev_priv,
- fb->format->format,
- fb->modifier);
-
- /*
- * To make remapping with linear generally feasible
- * we need the stride to be page aligned.
- */
- if (fb->pitches[color_plane] > max_stride &&
- !is_ccs_modifier(fb->modifier))
- return intel_tile_size(dev_priv);
- else
- return 64;
- }
-
- tile_width = intel_tile_width_bytes(fb, color_plane);
- if (is_ccs_modifier(fb->modifier)) {
- /*
- * Display WA #0531: skl,bxt,kbl,glk
- *
- * Render decompression and plane width > 3840
- * combined with horizontal panning requires the
- * plane stride to be a multiple of 4. We'll just
- * require the entire fb to accommodate that to avoid
- * potential runtime errors at plane configuration time.
- */
- if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
- color_plane == 0 && fb->width > 3840)
- tile_width *= 4;
- /*
- * The main surface pitch must be padded to a multiple of four
- * tile widths.
- */
- else if (DISPLAY_VER(dev_priv) >= 12)
- tile_width *= 4;
- }
- return tile_width;
-}
-
-static struct i915_vma *
-initial_plane_vma(struct drm_i915_private *i915,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- u32 base, size;
-
- if (plane_config->size == 0)
- return NULL;
-
- base = round_down(plane_config->base,
- I915_GTT_MIN_ALIGNMENT);
- size = round_up(plane_config->base + plane_config->size,
- I915_GTT_MIN_ALIGNMENT);
- size -= base;
-
- /*
- * If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features.
- */
- if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
- size * 2 > i915->stolen_usable_size)
- return NULL;
-
- obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
- if (IS_ERR(obj))
- return NULL;
-
- /*
- * Mark it WT ahead of time to avoid changing the
- * cache_level during fbdev initialization. The
- * unbind there would get stuck waiting for rcu.
- */
- i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
- I915_CACHE_WT : I915_CACHE_NONE);
-
- switch (plane_config->tiling) {
- case I915_TILING_NONE:
- break;
- case I915_TILING_X:
- case I915_TILING_Y:
- obj->tiling_and_stride =
- plane_config->fb->base.pitches[0] |
- plane_config->tiling;
- break;
- default:
- MISSING_CASE(plane_config->tiling);
- goto err_obj;
- }
-
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma))
- goto err_obj;
-
- if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
- goto err_obj;
-
- if (i915_gem_object_is_tiled(obj) &&
- !i915_vma_is_map_and_fenceable(vma))
- goto err_obj;
-
- return vma;
-
-err_obj:
- i915_gem_object_put(obj);
- return NULL;
-}
-
-static bool
-intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
- struct drm_framebuffer *fb = &plane_config->fb->base;
- struct i915_vma *vma;
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- break;
- default:
- drm_dbg(&dev_priv->drm,
- "Unsupported modifier for initial FB: 0x%llx\n",
- fb->modifier);
- return false;
- }
-
- vma = initial_plane_vma(dev_priv, plane_config);
- if (!vma)
- return false;
-
- mode_cmd.pixel_format = fb->format->format;
- mode_cmd.width = fb->width;
- mode_cmd.height = fb->height;
- mode_cmd.pitches[0] = fb->pitches[0];
- mode_cmd.modifier[0] = fb->modifier;
- mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
-
- if (intel_framebuffer_init(to_intel_framebuffer(fb),
- vma->obj, &mode_cmd)) {
- drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
- goto err_vma;
- }
-
- plane_config->vma = vma;
- return true;
-
-err_vma:
- i915_vma_put(vma);
- return false;
-}
-
static void
intel_set_plane_visible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state,
@@ -1833,8 +1086,8 @@ static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
}
}
-static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
- struct intel_plane *plane)
+void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
@@ -1879,168 +1132,6 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_wait_for_vblank(dev_priv, crtc->pipe);
}
-static struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
-{
- struct drm_i915_private *i915 = vm->i915;
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- intel_wakeref_t wakeref;
- struct i915_vma *vma;
- void __iomem *iomem;
-
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- atomic_inc(&i915->gpu_error.pending_fb_pin);
-
- vma = i915_gem_object_ggtt_pin(dpt->obj, NULL, 0, 4096,
- HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
- if (IS_ERR(vma))
- goto err;
-
- iomem = i915_vma_pin_iomap(vma);
- i915_vma_unpin(vma);
- if (IS_ERR(iomem)) {
- vma = iomem;
- goto err;
- }
-
- dpt->vma = vma;
- dpt->iomem = iomem;
-
- i915_vma_get(vma);
-
-err:
- atomic_dec(&i915->gpu_error.pending_fb_pin);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-
- return vma;
-}
-
-static void intel_dpt_unpin(struct i915_address_space *vm)
-{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
-
- i915_vma_unpin_iomap(dpt->vma);
- i915_vma_put(dpt->vma);
-}
-
-static bool
-intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
- const struct intel_initial_plane_config *plane_config,
- struct drm_framebuffer **fb,
- struct i915_vma **vma)
-{
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane *plane =
- to_intel_plane(crtc->base.primary);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (!crtc_state->uapi.active)
- continue;
-
- if (!plane_state->ggtt_vma)
- continue;
-
- if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
- *fb = plane_state->hw.fb;
- *vma = plane_state->ggtt_vma;
- return true;
- }
- }
-
- return false;
-}
-
-static void
-intel_find_initial_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane *plane =
- to_intel_plane(crtc->base.primary);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- struct drm_framebuffer *fb;
- struct i915_vma *vma;
-
- /*
- * TODO:
- * Disable planes if get_initial_plane_config() failed.
- * Make sure things work if the surface base is not page aligned.
- */
- if (!plane_config->fb)
- return;
-
- if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
- fb = &plane_config->fb->base;
- vma = plane_config->vma;
- goto valid_fb;
- }
-
- /*
- * Failed to alloc the obj, check to see if we should share
- * an fb with another CRTC instead
- */
- if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
- goto valid_fb;
-
- /*
- * We've failed to reconstruct the BIOS FB. Current display state
- * indicates that the primary plane is visible, but has a NULL FB,
- * which will lead to problems later if we don't fix it up. The
- * simplest solution is to just disable the primary plane now and
- * pretend the BIOS never had it enabled.
- */
- intel_plane_disable_noatomic(crtc, plane);
- if (crtc_state->bigjoiner) {
- struct intel_crtc *slave =
- crtc_state->bigjoiner_linked_crtc;
- intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
- }
-
- return;
-
-valid_fb:
- plane_state->uapi.rotation = plane_config->rotation;
- intel_fb_fill_view(to_intel_framebuffer(fb),
- plane_state->uapi.rotation, &plane_state->view);
-
- __i915_vma_pin(vma);
- plane_state->ggtt_vma = i915_vma_get(vma);
- if (intel_plane_uses_fence(plane_state) &&
- i915_vma_pin_fence(vma) == 0 && vma->fence)
- plane_state->flags |= PLANE_HAS_FENCE;
-
- plane_state->uapi.src_x = 0;
- plane_state->uapi.src_y = 0;
- plane_state->uapi.src_w = fb->width << 16;
- plane_state->uapi.src_h = fb->height << 16;
-
- plane_state->uapi.crtc_x = 0;
- plane_state->uapi.crtc_y = 0;
- plane_state->uapi.crtc_w = fb->width;
- plane_state->uapi.crtc_h = fb->height;
-
- if (plane_config->tiling)
- dev_priv->preserve_bios_swizzle = true;
-
- plane_state->uapi.fb = fb;
- drm_framebuffer_get(fb);
-
- plane_state->uapi.crtc = &crtc->base;
- intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
-
- intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
-
- atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
-}
-
unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
{
@@ -2449,55 +1540,6 @@ static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_s
intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
}
-static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
-{
- u32 temp;
-
- temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
- if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
- return;
-
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
- FDI_RX_ENABLE);
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
- FDI_RX_ENABLE);
-
- temp &= ~FDI_BC_BIFURCATION_SELECT;
- if (enable)
- temp |= FDI_BC_BIFURCATION_SELECT;
-
- drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
- enable ? "en" : "dis");
- intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
- intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
-}
-
-static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- switch (crtc->pipe) {
- case PIPE_A:
- break;
- case PIPE_B:
- if (crtc_state->fdi_lanes > 2)
- cpt_set_fdi_bc_bifurcation(dev_priv, false);
- else
- cpt_set_fdi_bc_bifurcation(dev_priv, true);
-
- break;
- case PIPE_C:
- cpt_set_fdi_bc_bifurcation(dev_priv, true);
-
- break;
- default:
- BUG();
- }
-}
-
/*
* Finds the encoder associated with the given CRTC. This can only be
* used when we know that the CRTC isn't feeding multiple encoders!
@@ -2547,16 +1589,8 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
assert_pch_transcoder_disabled(dev_priv, pipe);
- if (IS_IVYBRIDGE(dev_priv))
- ivb_update_fdi_bc_bifurcation(crtc_state);
-
- /* Write the TU size bits before fdi link training, so that error
- * detection works. */
- intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
- intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
-
/* For PCH output, training FDI link */
- dev_priv->display.fdi_link_train(crtc, crtc_state);
+ intel_fdi_link_train(crtc, crtc_state);
/* We need to program the right clock selection before writing the pixel
* mutliplier into the DPLL. */
@@ -2584,7 +1618,7 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
intel_enable_shared_dpll(crtc_state);
/* set transcoder timing, panel must allow it */
- assert_panel_unlocked(dev_priv, pipe);
+ assert_pps_unlocked(dev_priv, pipe);
ilk_pch_transcoder_set_timings(crtc_state, pipe);
intel_fdi_normal_train(crtc);
@@ -2842,6 +1876,46 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
return false;
}
+static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ /* Wa_1604331009:icl,jsl,ehl */
+ if (is_hdr_mode(crtc_state) &&
+ crtc_state->active_planes & BIT(PLANE_CURSOR) &&
+ DISPLAY_VER(dev_priv) == 11)
+ return true;
+
+ return false;
+}
+
+static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
+ enum pipe pipe, bool enable)
+{
+ if (DISPLAY_VER(i915) == 9) {
+ /*
+ * "Plane N strech max must be programmed to 11b (x1)
+ * when Async flips are enabled on that plane."
+ */
+ intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ SKL_PLANE1_STRETCH_MAX_MASK,
+ enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
+ } else {
+ /* Also needed on HSW/BDW albeit undocumented */
+ intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ HSW_PRI_STRETCH_MAX_MASK,
+ enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
+ }
+}
+
+static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return crtc_state->uapi.async_flip && intel_vtd_active() &&
+ (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
+}
+
static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
@@ -2869,12 +1943,17 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
- intel_update_watermarks(crtc);
+ intel_update_watermarks(dev_priv);
if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
hsw_enable_ips(new_crtc_state);
intel_fbc_post_update(state, crtc);
+ intel_drrs_page_flip(state, crtc);
+
+ if (needs_async_flip_vtd_wa(old_crtc_state) &&
+ !needs_async_flip_vtd_wa(new_crtc_state))
+ intel_async_flip_vtd_wa(dev_priv, pipe, false);
if (needs_nv12_wa(old_crtc_state) &&
!needs_nv12_wa(new_crtc_state))
@@ -2883,6 +1962,11 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (needs_scalerclk_wa(old_crtc_state) &&
!needs_scalerclk_wa(new_crtc_state))
icl_wa_scalerclkgating(dev_priv, pipe, false);
+
+ if (needs_cursorclk_wa(old_crtc_state) &&
+ !needs_cursorclk_wa(new_crtc_state))
+ icl_wa_cursorclkgating(dev_priv, pipe, false);
+
}
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
@@ -2969,6 +2053,10 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
if (intel_fbc_pre_update(state, crtc))
intel_wait_for_vblank(dev_priv, pipe);
+ if (!needs_async_flip_vtd_wa(old_crtc_state) &&
+ needs_async_flip_vtd_wa(new_crtc_state))
+ intel_async_flip_vtd_wa(dev_priv, pipe, true);
+
/* Display WA 827 */
if (!needs_nv12_wa(old_crtc_state) &&
needs_nv12_wa(new_crtc_state))
@@ -2979,6 +2067,11 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
needs_scalerclk_wa(new_crtc_state))
icl_wa_scalerclkgating(dev_priv, pipe, true);
+ /* Wa_1604331009:icl,jsl,ehl */
+ if (!needs_cursorclk_wa(old_crtc_state) &&
+ needs_cursorclk_wa(new_crtc_state))
+ icl_wa_cursorclkgating(dev_priv, pipe, true);
+
/*
* Vblank time updates from the shadow to live plane control register
* are blocked if the memory self-refresh mode is active at that
@@ -3022,10 +2115,9 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* we'll continue to update watermarks the old way, if flags tell
* us to.
*/
- if (dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state, crtc);
- else if (new_crtc_state->update_wm_pre)
- intel_update_watermarks(crtc);
+ if (!intel_initial_watermarks(state, crtc))
+ if (new_crtc_state->update_wm_pre)
+ intel_update_watermarks(dev_priv);
}
/*
@@ -3360,9 +2452,6 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- if (new_crtc_state->has_pch_encoder)
- intel_prepare_shared_dpll(new_crtc_state);
-
if (intel_crtc_has_dp_encoder(new_crtc_state))
intel_dp_set_m_n(new_crtc_state, M1_N1);
@@ -3400,9 +2489,8 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(new_crtc_state);
- if (dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state, crtc);
- intel_enable_pipe(new_crtc_state);
+ intel_initial_watermarks(state, crtc);
+ intel_enable_transcoder(new_crtc_state);
if (new_crtc_state->has_pch_encoder)
ilk_pch_enable(state, new_crtc_state);
@@ -3578,10 +2666,9 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
&new_crtc_state->fdi_m_n, NULL);
hsw_set_frame_start_delay(new_crtc_state);
- }
- if (!transcoder_is_dsi(cpu_transcoder))
- hsw_set_pipeconf(new_crtc_state);
+ hsw_set_transconf(new_crtc_state);
+ }
crtc->active = true;
@@ -3611,8 +2698,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 11)
icl_set_pipe_chicken(new_crtc_state);
- if (dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state, crtc);
+ intel_initial_watermarks(state, crtc);
if (DISPLAY_VER(dev_priv) >= 11) {
const struct intel_dbuf_state *dbuf_state =
@@ -3676,7 +2762,7 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
intel_crtc_vblank_off(old_crtc_state);
- intel_disable_pipe(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
ilk_pfit_disable(old_crtc_state);
@@ -3738,7 +2824,7 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
*/
drm_WARN_ON(&dev_priv->drm,
intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
- assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
intel_de_write(dev_priv, PFIT_PGM_RATIOS,
crtc_state->gmch_pfit.pgm_ratios);
@@ -3858,11 +2944,7 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
-
- if (intel_phy_is_tc(dev_priv, phy) &&
- dig_port->tc_mode == TC_PORT_TBT_ALT) {
+ if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
switch (dig_port->aux_ch) {
case AUX_CH_C:
return POWER_DOMAIN_AUX_C_TBT;
@@ -3923,16 +3005,16 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct drm_encoder *encoder;
enum pipe pipe = crtc->pipe;
u64 mask;
- enum transcoder transcoder = crtc_state->cpu_transcoder;
if (!crtc_state->hw.active)
return 0;
mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
- mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
+ mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (crtc_state->pch_pfit.enabled ||
crtc_state->pch_pfit.force_thru)
mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
@@ -3951,7 +3033,7 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
if (crtc_state->dsc.compression_enable)
- mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
+ mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
return mask;
}
@@ -4015,13 +3097,10 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
intel_encoders_pre_pll_enable(state, crtc);
- if (IS_CHERRYVIEW(dev_priv)) {
- chv_prepare_pll(crtc, new_crtc_state);
- chv_enable_pll(crtc, new_crtc_state);
- } else {
- vlv_prepare_pll(crtc, new_crtc_state);
- vlv_enable_pll(crtc, new_crtc_state);
- }
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_enable_pll(new_crtc_state);
+ else
+ vlv_enable_pll(new_crtc_state);
intel_encoders_pre_enable(state, crtc);
@@ -4032,25 +3111,14 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(new_crtc_state);
- dev_priv->display.initial_watermarks(state, crtc);
- intel_enable_pipe(new_crtc_state);
+ intel_initial_watermarks(state, crtc);
+ intel_enable_transcoder(new_crtc_state);
intel_crtc_vblank_on(new_crtc_state);
intel_encoders_enable(state, crtc);
}
-static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- intel_de_write(dev_priv, FP0(crtc->pipe),
- crtc_state->dpll_hw_state.fp0);
- intel_de_write(dev_priv, FP1(crtc->pipe),
- crtc_state->dpll_hw_state.fp1);
-}
-
static void i9xx_crtc_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -4062,8 +3130,6 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
- i9xx_set_pll_dividers(new_crtc_state);
-
if (intel_crtc_has_dp_encoder(new_crtc_state))
intel_dp_set_m_n(new_crtc_state, M1_N1);
@@ -4079,7 +3145,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_encoders_pre_enable(state, crtc);
- i9xx_enable_pll(crtc, new_crtc_state);
+ i9xx_enable_pll(new_crtc_state);
i9xx_pfit_enable(new_crtc_state);
@@ -4088,11 +3154,9 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(new_crtc_state);
- if (dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state, crtc);
- else
- intel_update_watermarks(crtc);
- intel_enable_pipe(new_crtc_state);
+ if (!intel_initial_watermarks(state, crtc))
+ intel_update_watermarks(dev_priv);
+ intel_enable_transcoder(new_crtc_state);
intel_crtc_vblank_on(new_crtc_state);
@@ -4111,7 +3175,7 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
if (!old_crtc_state->gmch_pfit.control)
return;
- assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
intel_de_read(dev_priv, PFIT_CONTROL));
@@ -4137,7 +3201,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
intel_crtc_vblank_off(old_crtc_state);
- intel_disable_pipe(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
i9xx_pfit_disable(old_crtc_state);
@@ -4157,8 +3221,8 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) != 2)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- if (!dev_priv->display.initial_watermarks)
- intel_update_watermarks(crtc);
+ if (!dev_priv->wm_disp->initial_watermarks)
+ intel_update_watermarks(dev_priv);
/* clock the pipe down to 640x480@60 to potentially save power */
if (IS_I830(dev_priv))
@@ -4211,7 +3275,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
- dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
+ dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
@@ -4234,12 +3298,11 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
encoder->base.crtc = NULL;
intel_fbc_disable(crtc);
- intel_update_watermarks(crtc);
+ intel_update_watermarks(dev_priv);
intel_disable_shared_dpll(crtc_state);
intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
- dev_priv->active_pipes &= ~BIT(pipe);
cdclk_state->min_cdclk[pipe] = 0;
cdclk_state->min_voltage_level[pipe] = 0;
cdclk_state->active_pipes &= ~BIT(pipe);
@@ -4596,13 +3659,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
}
}
- /* Cantiga+ cannot handle modes with a hsync front porch of 0.
- * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
- */
- if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
- pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
- return -EINVAL;
-
intel_crtc_compute_pixel_rate(pipe_config);
if (pipe_config->has_pch_encoder)
@@ -5412,102 +4468,6 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
BUG_ON(val != final);
}
-static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
-{
- u32 tmp;
-
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
-
- if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
-
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
-
- if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
-}
-
-/* WaMPhyProgramming:hsw */
-static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
-{
- u32 tmp;
-
- tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
- tmp &= ~(0xFF << 24);
- tmp |= (0x12 << 24);
- intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
- tmp |= (1 << 11);
- intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
- tmp |= (1 << 11);
- intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
- tmp |= (1 << 24) | (1 << 21) | (1 << 18);
- intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
- tmp |= (1 << 24) | (1 << 21) | (1 << 18);
- intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
- tmp &= ~(7 << 13);
- tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
- tmp &= ~(7 << 13);
- tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
- tmp &= ~0xFF;
- tmp |= 0x1C;
- intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
- tmp &= ~0xFF;
- tmp |= 0x1C;
- intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
- tmp &= ~(0xFF << 16);
- tmp |= (0x1C << 16);
- intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
- tmp &= ~(0xFF << 16);
- tmp |= (0x1C << 16);
- intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
- tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
- tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
- tmp &= ~(0xF << 28);
- tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
- tmp &= ~(0xF << 28);
- tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
-}
-
/* Implements 3 different sequences from BSpec chapter "Display iCLK
* Programming" based on the parameters passed:
* - Sequence to enable CLKOUT_DP
@@ -5540,10 +4500,8 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
tmp &= ~SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
- if (with_fdi) {
- lpt_reset_fdi_mphy(dev_priv);
- lpt_program_fdi_mphy(dev_priv);
- }
+ if (with_fdi)
+ lpt_fdi_program_mphy(dev_priv);
}
reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
@@ -5806,7 +4764,7 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
intel_de_posting_read(dev_priv, PIPECONF(pipe));
}
-static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
+static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -5870,9 +4828,7 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
val |= PIPEMISC_YUV420_ENABLE |
PIPEMISC_YUV420_MODE_FULL_BLEND;
- if (DISPLAY_VER(dev_priv) >= 11 &&
- (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
- BIT(PLANE_CURSOR))) == 0)
+ if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
val |= PIPEMISC_HDR_MODE_PRECISION;
if (DISPLAY_VER(dev_priv) >= 12)
@@ -6211,59 +5167,64 @@ out:
return ret;
}
-static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config,
- struct intel_display_power_domain_set *power_domain_set)
+static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
- unsigned long enabled_panel_transcoders = 0;
- enum transcoder panel_transcoder;
- u32 tmp;
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ u32 tmp = 0;
- if (DISPLAY_VER(dev_priv) >= 11)
- panel_transcoder_mask |=
- BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- /*
- * The pipe->transcoder mapping is fixed with the exception of the eDP
- * and DSI transcoders handled below.
- */
- pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+ with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ return tmp & TRANS_DDI_FUNC_ENABLE;
+}
+
+static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
+{
+ u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
+
+ if (DISPLAY_VER(i915) >= 11)
+ panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+
+ return panel_transcoder_mask;
+}
+
+static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
+ enum transcoder cpu_transcoder;
+ u8 enabled_transcoders = 0;
/*
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
- for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
panel_transcoder_mask) {
- bool force_thru = false;
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
enum pipe trans_pipe;
+ u32 tmp = 0;
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(panel_transcoder));
- if (!(tmp & TRANS_DDI_FUNC_ENABLE))
- continue;
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
- /*
- * Log all enabled ones, only use the first one.
- *
- * FIXME: This won't work for two separate DSI displays.
- */
- enabled_panel_transcoders |= BIT(panel_transcoder);
- if (enabled_panel_transcoders != BIT(panel_transcoder))
+ if (!(tmp & TRANS_DDI_FUNC_ENABLE))
continue;
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
drm_WARN(dev, 1,
"unknown pipe linked to transcoder %s\n",
- transcoder_name(panel_transcoder));
+ transcoder_name(cpu_transcoder));
fallthrough;
case TRANS_DDI_EDP_INPUT_A_ONOFF:
- force_thru = true;
- fallthrough;
case TRANS_DDI_EDP_INPUT_A_ON:
trans_pipe = PIPE_A;
break;
@@ -6278,22 +5239,83 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
break;
}
- if (trans_pipe == crtc->pipe) {
- pipe_config->cpu_transcoder = panel_transcoder;
- pipe_config->pch_pfit.force_thru = force_thru;
- }
+ if (trans_pipe == crtc->pipe)
+ enabled_transcoders |= BIT(cpu_transcoder);
}
+ cpu_transcoder = (enum transcoder) crtc->pipe;
+ if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
+ enabled_transcoders |= BIT(cpu_transcoder);
+
+ return enabled_transcoders;
+}
+
+static bool has_edp_transcoders(u8 enabled_transcoders)
+{
+ return enabled_transcoders & BIT(TRANSCODER_EDP);
+}
+
+static bool has_dsi_transcoders(u8 enabled_transcoders)
+{
+ return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
+ BIT(TRANSCODER_DSI_1));
+}
+
+static bool has_pipe_transcoders(u8 enabled_transcoders)
+{
+ return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
+ BIT(TRANSCODER_DSI_0) |
+ BIT(TRANSCODER_DSI_1));
+}
+
+static void assert_enabled_transcoders(struct drm_i915_private *i915,
+ u8 enabled_transcoders)
+{
+ /* Only one type of transcoder please */
+ drm_WARN_ON(&i915->drm,
+ has_edp_transcoders(enabled_transcoders) +
+ has_dsi_transcoders(enabled_transcoders) +
+ has_pipe_transcoders(enabled_transcoders) > 1);
+
+ /* Only DSI transcoders can be ganged */
+ drm_WARN_ON(&i915->drm,
+ !has_dsi_transcoders(enabled_transcoders) &&
+ !is_power_of_2(enabled_transcoders));
+}
+
+static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ struct intel_display_power_domain_set *power_domain_set)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned long enabled_transcoders;
+ u32 tmp;
+
+ enabled_transcoders = hsw_enabled_transcoders(crtc);
+ if (!enabled_transcoders)
+ return false;
+
+ assert_enabled_transcoders(dev_priv, enabled_transcoders);
+
/*
- * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
+ * With the exception of DSI we should only ever have
+ * a single enabled transcoder. With DSI let's just
+ * pick the first one.
*/
- drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
- enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+ pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
+ if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
+
+ if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
+ pipe_config->pch_pfit.force_thru = true;
+ }
+
tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
return tmp & PIPECONF_ENABLE;
@@ -6515,7 +5537,7 @@ static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!i915->display.get_pipe_config(crtc, crtc_state))
+ if (!i915->display->get_pipe_config(crtc, crtc_state))
return false;
crtc_state->hw.active = true;
@@ -6531,28 +5553,6 @@ static const struct drm_display_mode load_detect_mode = {
704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
};
-struct drm_framebuffer *
-intel_framebuffer_create(struct drm_i915_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct intel_framebuffer *intel_fb;
- int ret;
-
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
- if (!intel_fb)
- return ERR_PTR(-ENOMEM);
-
- ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
- if (ret)
- goto err;
-
- return &intel_fb->base;
-
-err:
- kfree(intel_fb);
- return ERR_PTR(ret);
-}
-
static int intel_modeset_disable_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
@@ -6780,7 +5780,6 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
struct dpll clock;
@@ -6830,11 +5829,13 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
- u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
- LVDS);
- bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
+ enum pipe lvds_pipe;
+
+ if (IS_I85X(dev_priv) &&
+ intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
+ lvds_pipe == crtc->pipe) {
+ u32 lvds = intel_de_read(dev_priv, LVDS);
- if (is_lvds) {
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
@@ -6985,27 +5986,27 @@ static bool needs_scaling(const struct intel_plane_state *state)
}
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *crtc_state,
+ struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
- struct intel_plane_state *plane_state)
+ struct intel_plane_state *new_plane_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- bool mode_changed = intel_crtc_needs_modeset(crtc_state);
+ bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
bool was_crtc_enabled = old_crtc_state->hw.active;
- bool is_crtc_enabled = crtc_state->hw.active;
+ bool is_crtc_enabled = new_crtc_state->hw.active;
bool turn_off, turn_on, visible, was_visible;
int ret;
if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
- ret = skl_update_scaler_plane(crtc_state, plane_state);
+ ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
if (ret)
return ret;
}
was_visible = old_plane_state->uapi.visible;
- visible = plane_state->uapi.visible;
+ visible = new_plane_state->uapi.visible;
if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
was_visible = false;
@@ -7021,7 +6022,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
* only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled) {
- intel_plane_set_invisible(crtc_state, plane_state);
+ intel_plane_set_invisible(new_crtc_state, new_plane_state);
visible = false;
}
@@ -7040,28 +6041,28 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
if (turn_on) {
if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- crtc_state->update_wm_pre = true;
+ new_crtc_state->update_wm_pre = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
- crtc_state->disable_cxsr = true;
+ new_crtc_state->disable_cxsr = true;
} else if (turn_off) {
if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- crtc_state->update_wm_post = true;
+ new_crtc_state->update_wm_post = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
- crtc_state->disable_cxsr = true;
- } else if (intel_wm_need_update(old_plane_state, plane_state)) {
+ new_crtc_state->disable_cxsr = true;
+ } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
/* FIXME bollocks */
- crtc_state->update_wm_pre = true;
- crtc_state->update_wm_post = true;
+ new_crtc_state->update_wm_pre = true;
+ new_crtc_state->update_wm_post = true;
}
}
if (visible || was_visible)
- crtc_state->fb_bits |= plane->frontbuffer_bit;
+ new_crtc_state->fb_bits |= plane->frontbuffer_bit;
/*
* ILK/SNB DVSACNTR/Sprite Enable
@@ -7100,8 +6101,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
(IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
IS_IVYBRIDGE(dev_priv)) &&
(turn_on || (!needs_scaling(old_plane_state) &&
- needs_scaling(plane_state))))
- crtc_state->disable_lp_wm = true;
+ needs_scaling(new_plane_state))))
+ new_crtc_state->disable_lp_wm = true;
return 0;
}
@@ -7363,10 +6364,10 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
crtc_state->update_wm_post = true;
if (mode_changed && crtc_state->hw.enable &&
- dev_priv->display.crtc_compute_clock &&
+ dev_priv->dpll_funcs &&
!crtc_state->bigjoiner_slave &&
!drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
- ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
+ ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state);
if (ret)
return ret;
}
@@ -7385,32 +6386,23 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
- if (dev_priv->display.compute_pipe_wm) {
- ret = dev_priv->display.compute_pipe_wm(state, crtc);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "Target pipe watermarks are invalid\n");
- return ret;
- }
-
+ ret = intel_compute_pipe_wm(state, crtc);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Target pipe watermarks are invalid\n");
+ return ret;
}
- if (dev_priv->display.compute_intermediate_wm) {
- if (drm_WARN_ON(&dev_priv->drm,
- !dev_priv->display.compute_pipe_wm))
- return 0;
-
- /*
- * Calculate 'intermediate' watermarks that satisfy both the
- * old state and the new state. We can program these
- * immediately.
- */
- ret = dev_priv->display.compute_intermediate_wm(state, crtc);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "No valid intermediate pipe watermarks are possible\n");
- return ret;
- }
+ /*
+ * Calculate 'intermediate' watermarks that satisfy both the
+ * old state and the new state. We can program these
+ * immediately.
+ */
+ ret = intel_compute_intermediate_wm(state, crtc);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "No valid intermediate pipe watermarks are possible\n");
+ return ret;
}
if (DISPLAY_VER(dev_priv) >= 9) {
@@ -7439,11 +6431,9 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
}
- if (!mode_changed) {
- ret = intel_psr2_sel_fetch_update(state, crtc);
- if (ret)
- return ret;
- }
+ ret = intel_psr2_sel_fetch_update(state, crtc);
+ if (ret)
+ return ret;
return 0;
}
@@ -8153,11 +7143,10 @@ encoder_retry:
ret = encoder->compute_config(encoder, pipe_config,
connector_state);
+ if (ret == -EDEADLK)
+ return ret;
if (ret < 0) {
- if (ret != -EDEADLK)
- drm_dbg_kms(&i915->drm,
- "Encoder config failure: %d\n",
- ret);
+ drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret);
return ret;
}
}
@@ -8171,12 +7160,7 @@ encoder_retry:
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
if (ret == -EDEADLK)
return ret;
- if (ret < 0) {
- drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
- return ret;
- }
-
- if (ret == I915_DISPLAY_CONFIG_RETRY) {
+ if (ret == -EAGAIN) {
if (drm_WARN(&i915->drm, !retry,
"loop in pipe configuration computation\n"))
return -EINVAL;
@@ -8185,6 +7169,10 @@ encoder_retry:
retry = false;
goto encoder_retry;
}
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret);
+ return ret;
+ }
/* Dithering seems to not pass-through bits correctly when it should, so
* only enable it on 6bpc panels and when its not a compliance
@@ -8720,10 +7708,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (bp_gamma)
PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
- PIPE_CONF_CHECK_BOOL(has_psr);
- PIPE_CONF_CHECK_BOOL(has_psr2);
- PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
- PIPE_CONF_CHECK_I(dc3co_exitline);
+ if (current_config->active_planes) {
+ PIPE_CONF_CHECK_BOOL(has_psr);
+ PIPE_CONF_CHECK_BOOL(has_psr2);
+ PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
+ PIPE_CONF_CHECK_I(dc3co_exitline);
+ }
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -8780,7 +7770,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(min_voltage_level);
}
- if (fastset && (current_config->has_psr || pipe_config->has_psr))
+ if (current_config->has_psr || pipe_config->has_psr)
PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
~intel_hdmi_infoframe_enable(DP_SDP_VSC));
else
@@ -9402,7 +8392,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state)
struct intel_crtc *crtc;
int i;
- if (!dev_priv->display.crtc_compute_clock)
+ if (!dev_priv->dpll_funcs)
return;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -9503,23 +8493,6 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
return 0;
}
-/*
- * Handle calculation of various watermark data at the end of the atomic check
- * phase. The code here should be run after the per-crtc and per-plane 'check'
- * handlers to ensure that all derived state has been updated.
- */
-static int calc_watermark_data(struct intel_atomic_state *state)
-{
- struct drm_device *dev = state->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- /* Is there platform-specific watermark information to calculate? */
- if (dev_priv->display.compute_global_watermarks)
- return dev_priv->display.compute_global_watermarks(state);
-
- return 0;
-}
-
static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
@@ -9627,13 +8600,28 @@ static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
return 0;
}
+static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0;
+}
+
+static bool pxp_is_borked(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj);
+}
+
static int intel_atomic_check_planes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_plane_state *plane_state;
struct intel_plane *plane;
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane_state *old_plane_state;
struct intel_crtc *crtc;
+ const struct drm_framebuffer *fb;
int i, ret;
ret = icl_add_linked_planes(state);
@@ -9681,6 +8669,19 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state)
return ret;
}
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ new_plane_state = intel_atomic_get_new_plane_state(state, plane);
+ old_plane_state = intel_atomic_get_old_plane_state(state, plane);
+ fb = new_plane_state->hw.fb;
+ if (fb) {
+ new_plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb));
+ new_plane_state->force_black = pxp_is_borked(intel_fb_obj(fb));
+ } else {
+ new_plane_state->decrypt = old_plane_state->decrypt;
+ new_plane_state->force_black = old_plane_state->force_black;
+ }
+ }
+
return 0;
}
@@ -9715,7 +8716,7 @@ static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
*need_cdclk_calc = true;
- ret = dev_priv->display.bw_calc_min_cdclk(state);
+ ret = intel_cdclk_bw_calc_min_cdclk(state);
if (ret)
return ret;
@@ -9967,6 +8968,10 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
return -EINVAL;
}
+
+ /* plane decryption is allow to change only in synchronous flips */
+ if (old_plane_state->decrypt != new_plane_state->decrypt)
+ return -EINVAL;
}
return 0;
@@ -10166,7 +9171,7 @@ static int intel_atomic_check(struct drm_device *dev,
goto fail;
intel_fbc_choose_crtc(dev_priv, state);
- ret = calc_watermark_data(state);
+ ret = intel_compute_global_watermarks(state);
if (ret)
goto fail;
@@ -10336,12 +9341,11 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
if (new_crtc_state->update_pipe)
intel_pipe_fastset(old_crtc_state, new_crtc_state);
-
- intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
}
- if (dev_priv->display.atomic_update_watermarks)
- dev_priv->display.atomic_update_watermarks(state, crtc);
+ intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
+
+ intel_atomic_update_watermarks(state, crtc);
}
static void commit_pipe_post_planes(struct intel_atomic_state *state,
@@ -10373,7 +9377,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_update_active_timings(new_crtc_state);
- dev_priv->display.crtc_enable(state, crtc);
+ dev_priv->display->crtc_enable(state, crtc);
if (new_crtc_state->bigjoiner_slave)
return;
@@ -10404,10 +9408,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_encoders_update_pipe(state, crtc);
}
- if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
- intel_fbc_disable(crtc);
- else
- intel_fbc_enable(state, crtc);
+ intel_fbc_update(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
@@ -10464,16 +9465,15 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
*/
intel_crtc_disable_pipe_crc(crtc);
- dev_priv->display.crtc_disable(state, crtc);
+ dev_priv->display->crtc_disable(state, crtc);
crtc->active = false;
intel_fbc_disable(crtc);
intel_disable_shared_dpll(old_crtc_state);
/* FIXME unify this for all platforms */
if (!new_crtc_state->hw.active &&
- !HAS_GMCH(dev_priv) &&
- dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state, crtc);
+ !HAS_GMCH(dev_priv))
+ intel_initial_watermarks(state, crtc);
}
static void intel_commit_modeset_disables(struct intel_atomic_state *state)
@@ -10837,6 +9837,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_encoders_update_prepare(state);
intel_dbuf_pre_plane_update(state);
+ intel_psr_pre_plane_update(state);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip)
@@ -10844,7 +9845,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- dev_priv->display.commit_modeset_enables(state);
+ dev_priv->display->commit_modeset_enables(state);
if (state->modeset) {
intel_encoders_update_complete(state);
@@ -10895,11 +9896,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
- if (dev_priv->display.optimize_watermarks)
- dev_priv->display.optimize_watermarks(state, crtc);
+ intel_optimize_watermarks(state, crtc);
}
intel_dbuf_post_plane_update(state);
+ intel_psr_post_plane_update(state);
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
@@ -11088,280 +10089,6 @@ static int intel_atomic_commit(struct drm_device *dev,
return 0;
}
-struct wait_rps_boost {
- struct wait_queue_entry wait;
-
- struct drm_crtc *crtc;
- struct i915_request *request;
-};
-
-static int do_rps_boost(struct wait_queue_entry *_wait,
- unsigned mode, int sync, void *key)
-{
- struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
- struct i915_request *rq = wait->request;
-
- /*
- * If we missed the vblank, but the request is already running it
- * is reasonable to assume that it will complete before the next
- * vblank without our intervention, so leave RPS alone.
- */
- if (!i915_request_started(rq))
- intel_rps_boost(rq);
- i915_request_put(rq);
-
- drm_crtc_vblank_put(wait->crtc);
-
- list_del(&wait->wait.entry);
- kfree(wait);
- return 1;
-}
-
-static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
- struct dma_fence *fence)
-{
- struct wait_rps_boost *wait;
-
- if (!dma_fence_is_i915(fence))
- return;
-
- if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
- return;
-
- if (drm_crtc_vblank_get(crtc))
- return;
-
- wait = kmalloc(sizeof(*wait), GFP_KERNEL);
- if (!wait) {
- drm_crtc_vblank_put(crtc);
- return;
- }
-
- wait->request = to_request(dma_fence_get(fence));
- wait->crtc = crtc;
-
- wait->wait.func = do_rps_boost;
- wait->wait.flags = 0;
-
- add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
-}
-
-int intel_plane_pin_fb(struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- struct drm_framebuffer *fb = plane_state->hw.fb;
- struct i915_vma *vma;
- bool phys_cursor =
- plane->id == PLANE_CURSOR &&
- INTEL_INFO(dev_priv)->display.cursor_needs_physical;
-
- if (!intel_fb_uses_dpt(fb)) {
- vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
- &plane_state->view.gtt,
- intel_plane_uses_fence(plane_state),
- &plane_state->flags);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- plane_state->ggtt_vma = vma;
- } else {
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-
- vma = intel_dpt_pin(intel_fb->dpt_vm);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- plane_state->ggtt_vma = vma;
-
- vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
- &plane_state->flags, intel_fb->dpt_vm);
- if (IS_ERR(vma)) {
- intel_dpt_unpin(intel_fb->dpt_vm);
- plane_state->ggtt_vma = NULL;
- return PTR_ERR(vma);
- }
-
- plane_state->dpt_vma = vma;
-
- WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
- }
-
- return 0;
-}
-
-void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
-{
- struct drm_framebuffer *fb = old_plane_state->hw.fb;
- struct i915_vma *vma;
-
- if (!intel_fb_uses_dpt(fb)) {
- vma = fetch_and_zero(&old_plane_state->ggtt_vma);
- if (vma)
- intel_unpin_fb_vma(vma, old_plane_state->flags);
- } else {
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-
- vma = fetch_and_zero(&old_plane_state->dpt_vma);
- if (vma)
- intel_unpin_fb_vma(vma, old_plane_state->flags);
-
- vma = fetch_and_zero(&old_plane_state->ggtt_vma);
- if (vma)
- intel_dpt_unpin(intel_fb->dpt_vm);
- }
-}
-
-/**
- * intel_prepare_plane_fb - Prepare fb for usage on plane
- * @_plane: drm plane to prepare for
- * @_new_plane_state: the plane state being prepared
- *
- * Prepares a framebuffer for usage on a display plane. Generally this
- * involves pinning the underlying object and updating the frontbuffer tracking
- * bits. Some older platforms need special physical address handling for
- * cursor planes.
- *
- * Returns 0 on success, negative error code on failure.
- */
-int
-intel_prepare_plane_fb(struct drm_plane *_plane,
- struct drm_plane_state *_new_plane_state)
-{
- struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
- struct intel_plane *plane = to_intel_plane(_plane);
- struct intel_plane_state *new_plane_state =
- to_intel_plane_state(_new_plane_state);
- struct intel_atomic_state *state =
- to_intel_atomic_state(new_plane_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct intel_plane_state *old_plane_state =
- intel_atomic_get_old_plane_state(state, plane);
- struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
- struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
- int ret;
-
- if (old_obj) {
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state,
- to_intel_crtc(old_plane_state->hw.crtc));
-
- /* Big Hammer, we also need to ensure that any pending
- * MI_WAIT_FOR_EVENT inside a user batch buffer on the
- * current scanout is retired before unpinning the old
- * framebuffer. Note that we rely on userspace rendering
- * into the buffer attached to the pipe they are waiting
- * on. If not, userspace generates a GPU hang with IPEHR
- * point to the MI_WAIT_FOR_EVENT.
- *
- * This should only fail upon a hung GPU, in which case we
- * can safely continue.
- */
- if (intel_crtc_needs_modeset(crtc_state)) {
- ret = i915_sw_fence_await_reservation(&state->commit_ready,
- old_obj->base.resv, NULL,
- false, 0,
- GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
- }
-
- if (new_plane_state->uapi.fence) { /* explicit fencing */
- i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
- &attr);
- ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
- new_plane_state->uapi.fence,
- i915_fence_timeout(dev_priv),
- GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
-
- if (!obj)
- return 0;
-
-
- ret = intel_plane_pin_fb(new_plane_state);
- if (ret)
- return ret;
-
- i915_gem_object_wait_priority(obj, 0, &attr);
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
-
- if (!new_plane_state->uapi.fence) { /* implicit fencing */
- struct dma_fence *fence;
-
- ret = i915_sw_fence_await_reservation(&state->commit_ready,
- obj->base.resv, NULL,
- false,
- i915_fence_timeout(dev_priv),
- GFP_KERNEL);
- if (ret < 0)
- goto unpin_fb;
-
- fence = dma_resv_get_excl_unlocked(obj->base.resv);
- if (fence) {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- fence);
- dma_fence_put(fence);
- }
- } else {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- new_plane_state->uapi.fence);
- }
-
- /*
- * We declare pageflips to be interactive and so merit a small bias
- * towards upclocking to deliver the frame on time. By only changing
- * the RPS thresholds to sample more regularly and aim for higher
- * clocks we can hopefully deliver low power workloads (like kodi)
- * that are not quite steady state without resorting to forcing
- * maximum clocks following a vblank miss (see do_rps_boost()).
- */
- if (!state->rps_interactive) {
- intel_rps_mark_interactive(&dev_priv->gt.rps, true);
- state->rps_interactive = true;
- }
-
- return 0;
-
-unpin_fb:
- intel_plane_unpin_fb(new_plane_state);
-
- return ret;
-}
-
-/**
- * intel_cleanup_plane_fb - Cleans up an fb after plane use
- * @plane: drm plane to clean up for
- * @_old_plane_state: the state from the previous modeset
- *
- * Cleans up a framebuffer that has just been removed from a plane.
- */
-void
-intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *_old_plane_state)
-{
- struct intel_plane_state *old_plane_state =
- to_intel_plane_state(_old_plane_state);
- struct intel_atomic_state *state =
- to_intel_atomic_state(old_plane_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
-
- if (!obj)
- return;
-
- if (state->rps_interactive) {
- intel_rps_mark_interactive(&dev_priv->gt.rps, false);
- state->rps_interactive = false;
- }
-
- /* Should only be called after a successful intel_prepare_plane_fb()! */
- intel_plane_unpin_fb(old_plane_state);
-}
-
/**
* intel_plane_destroy - destroy a plane
* @plane: plane to destroy
@@ -11491,6 +10218,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_TC2);
intel_ddi_init(dev_priv, PORT_TC3);
intel_ddi_init(dev_priv, PORT_TC4);
+ icl_dsi_init(dev_priv);
} else if (IS_ALDERLAKE_S(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_TC1);
@@ -11708,249 +10436,6 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
}
-static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-
- drm_framebuffer_cleanup(fb);
-
- if (intel_fb_uses_dpt(fb))
- intel_dpt_destroy(intel_fb->dpt_vm);
-
- intel_frontbuffer_put(intel_fb->frontbuffer);
-
- kfree(intel_fb);
-}
-
-static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file,
- unsigned int *handle)
-{
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
- if (i915_gem_object_is_userptr(obj)) {
- drm_dbg(&i915->drm,
- "attempting to use a userptr for a framebuffer, denied\n");
- return -EINVAL;
- }
-
- return drm_gem_handle_create(file, &obj->base, handle);
-}
-
-static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips)
-{
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-
- i915_gem_object_flush_if_display(obj);
- intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
-
- return 0;
-}
-
-static const struct drm_framebuffer_funcs intel_fb_funcs = {
- .destroy = intel_user_framebuffer_destroy,
- .create_handle = intel_user_framebuffer_create_handle,
- .dirty = intel_user_framebuffer_dirty,
-};
-
-static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
- struct drm_i915_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct drm_framebuffer *fb = &intel_fb->base;
- u32 max_stride;
- unsigned int tiling, stride;
- int ret = -EINVAL;
- int i;
-
- intel_fb->frontbuffer = intel_frontbuffer_get(obj);
- if (!intel_fb->frontbuffer)
- return -ENOMEM;
-
- i915_gem_object_lock(obj, NULL);
- tiling = i915_gem_object_get_tiling(obj);
- stride = i915_gem_object_get_stride(obj);
- i915_gem_object_unlock(obj);
-
- if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
- /*
- * If there's a fence, enforce that
- * the fb modifier and tiling mode match.
- */
- if (tiling != I915_TILING_NONE &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
- "tiling_mode doesn't match fb modifier\n");
- goto err;
- }
- } else {
- if (tiling == I915_TILING_X) {
- mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
- } else if (tiling == I915_TILING_Y) {
- drm_dbg_kms(&dev_priv->drm,
- "No Y tiling for legacy addfb\n");
- goto err;
- }
- }
-
- if (!drm_any_plane_has_format(&dev_priv->drm,
- mode_cmd->pixel_format,
- mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
- "unsupported pixel format %p4cc / modifier 0x%llx\n",
- &mode_cmd->pixel_format, mode_cmd->modifier[0]);
- goto err;
- }
-
- /*
- * gen2/3 display engine uses the fence if present,
- * so the tiling mode must match the fb modifier exactly.
- */
- if (DISPLAY_VER(dev_priv) < 4 &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
- "tiling_mode must match fb modifier exactly on gen2/3\n");
- goto err;
- }
-
- max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
- mode_cmd->modifier[0]);
- if (mode_cmd->pitches[0] > max_stride) {
- drm_dbg_kms(&dev_priv->drm,
- "%s pitch (%u) must be at most %d\n",
- mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
- "tiled" : "linear",
- mode_cmd->pitches[0], max_stride);
- goto err;
- }
-
- /*
- * If there's a fence, enforce that
- * the fb pitch and fence stride match.
- */
- if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
- drm_dbg_kms(&dev_priv->drm,
- "pitch (%d) must match tiling stride (%d)\n",
- mode_cmd->pitches[0], stride);
- goto err;
- }
-
- /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
- if (mode_cmd->offsets[0] != 0) {
- drm_dbg_kms(&dev_priv->drm,
- "plane 0 offset (0x%08x) must be 0\n",
- mode_cmd->offsets[0]);
- goto err;
- }
-
- drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
-
- for (i = 0; i < fb->format->num_planes; i++) {
- u32 stride_alignment;
-
- if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
- drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
- i);
- goto err;
- }
-
- stride_alignment = intel_fb_stride_alignment(fb, i);
- if (fb->pitches[i] & (stride_alignment - 1)) {
- drm_dbg_kms(&dev_priv->drm,
- "plane %d pitch (%d) must be at least %u byte aligned\n",
- i, fb->pitches[i], stride_alignment);
- goto err;
- }
-
- if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
- int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
-
- if (fb->pitches[i] != ccs_aux_stride) {
- drm_dbg_kms(&dev_priv->drm,
- "ccs aux plane %d pitch (%d) must be %d\n",
- i,
- fb->pitches[i], ccs_aux_stride);
- goto err;
- }
- }
-
- /* TODO: Add POT stride remapping support for CCS formats as well. */
- if (IS_ALDERLAKE_P(dev_priv) &&
- mode_cmd->modifier[i] != DRM_FORMAT_MOD_LINEAR &&
- !intel_fb_needs_pot_stride_remap(intel_fb) &&
- !is_power_of_2(mode_cmd->pitches[i])) {
- drm_dbg_kms(&dev_priv->drm,
- "plane %d pitch (%d) must be power of two for tiled buffers\n",
- i, mode_cmd->pitches[i]);
- goto err;
- }
-
- fb->obj[i] = &obj->base;
- }
-
- ret = intel_fill_fb_info(dev_priv, intel_fb);
- if (ret)
- goto err;
-
- if (intel_fb_uses_dpt(fb)) {
- struct i915_address_space *vm;
-
- vm = intel_dpt_create(intel_fb);
- if (IS_ERR(vm)) {
- ret = PTR_ERR(vm);
- goto err;
- }
-
- intel_fb->dpt_vm = vm;
- }
-
- ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
- if (ret) {
- drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
- goto err;
- }
-
- return 0;
-
-err:
- intel_frontbuffer_put(intel_fb->frontbuffer);
- return ret;
-}
-
-static struct drm_framebuffer *
-intel_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *user_mode_cmd)
-{
- struct drm_framebuffer *fb;
- struct drm_i915_gem_object *obj;
- struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
- struct drm_i915_private *i915;
-
- obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- /* object is backed with LMEM for discrete */
- i915 = to_i915(obj->base.dev);
- if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
- /* object is "remote", not in local memory */
- i915_gem_object_put(obj);
- return ERR_PTR(-EREMOTE);
- }
-
- fb = intel_framebuffer_create(obj, &mode_cmd);
- i915_gem_object_put(obj);
-
- return fb;
-}
-
static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
@@ -12039,6 +10524,14 @@ intel_mode_valid(struct drm_device *dev,
return MODE_V_ILLEGAL;
}
+ /*
+ * Cantiga+ cannot handle modes with a hsync front porch of 0.
+ * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
+ */
+ if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
+ mode->hsync_start == mode->hdisplay)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
@@ -12090,6 +10583,46 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.atomic_state_free = intel_atomic_state_free,
};
+static const struct drm_i915_display_funcs skl_display_funcs = {
+ .get_pipe_config = hsw_get_pipe_config,
+ .crtc_enable = hsw_crtc_enable,
+ .crtc_disable = hsw_crtc_disable,
+ .commit_modeset_enables = skl_commit_modeset_enables,
+ .get_initial_plane_config = skl_get_initial_plane_config,
+};
+
+static const struct drm_i915_display_funcs ddi_display_funcs = {
+ .get_pipe_config = hsw_get_pipe_config,
+ .crtc_enable = hsw_crtc_enable,
+ .crtc_disable = hsw_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
+static const struct drm_i915_display_funcs pch_split_display_funcs = {
+ .get_pipe_config = ilk_get_pipe_config,
+ .crtc_enable = ilk_crtc_enable,
+ .crtc_disable = ilk_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
+static const struct drm_i915_display_funcs vlv_display_funcs = {
+ .get_pipe_config = i9xx_get_pipe_config,
+ .crtc_enable = valleyview_crtc_enable,
+ .crtc_disable = i9xx_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
+static const struct drm_i915_display_funcs i9xx_display_funcs = {
+ .get_pipe_config = i9xx_get_pipe_config,
+ .crtc_enable = i9xx_crtc_enable,
+ .crtc_disable = i9xx_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
/**
* intel_init_display_hooks - initialize the display modesetting hooks
* @dev_priv: device private
@@ -12105,38 +10638,19 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
intel_dpll_init_clock_hook(dev_priv);
if (DISPLAY_VER(dev_priv) >= 9) {
- dev_priv->display.get_pipe_config = hsw_get_pipe_config;
- dev_priv->display.crtc_enable = hsw_crtc_enable;
- dev_priv->display.crtc_disable = hsw_crtc_disable;
+ dev_priv->display = &skl_display_funcs;
} else if (HAS_DDI(dev_priv)) {
- dev_priv->display.get_pipe_config = hsw_get_pipe_config;
- dev_priv->display.crtc_enable = hsw_crtc_enable;
- dev_priv->display.crtc_disable = hsw_crtc_disable;
+ dev_priv->display = &ddi_display_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->display.get_pipe_config = ilk_get_pipe_config;
- dev_priv->display.crtc_enable = ilk_crtc_enable;
- dev_priv->display.crtc_disable = ilk_crtc_disable;
+ dev_priv->display = &pch_split_display_funcs;
} else if (IS_CHERRYVIEW(dev_priv) ||
IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.crtc_enable = valleyview_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ dev_priv->display = &vlv_display_funcs;
} else {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.crtc_enable = i9xx_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ dev_priv->display = &i9xx_display_funcs;
}
intel_fdi_init_hook(dev_priv);
-
- if (DISPLAY_VER(dev_priv) >= 9) {
- dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
- dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
- } else {
- dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
- dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
- }
-
}
void intel_modeset_init_hw(struct drm_i915_private *i915)
@@ -12206,7 +10720,7 @@ static void sanitize_watermarks(struct drm_i915_private *dev_priv)
int i;
/* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->display.optimize_watermarks)
+ if (!dev_priv->wm_disp->optimize_watermarks)
return;
state = drm_atomic_state_alloc(&dev_priv->drm);
@@ -12239,7 +10753,7 @@ retry:
/* Write calculated watermark values back */
for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
crtc_state->wm.need_postvbl_update = true;
- dev_priv->display.optimize_watermarks(intel_state, crtc);
+ intel_optimize_watermarks(intel_state, crtc);
to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
}
@@ -12271,22 +10785,6 @@ fail:
drm_modeset_acquire_fini(&ctx);
}
-static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
-{
- if (IS_IRONLAKE(dev_priv)) {
- u32 fdi_pll_clk =
- intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
-
- dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
- } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
- dev_priv->fdi_pll_freq = 270000;
- } else {
- return;
- }
-
- drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
-}
-
static int intel_initial_commit(struct drm_device *dev)
{
struct drm_atomic_state *state = NULL;
@@ -12381,7 +10879,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->funcs = &intel_mode_funcs;
- mode_config->async_page_flip = has_async_flips(i915);
+ mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
/*
* Maximum framebuffer dimensions, chosen to match
@@ -12420,22 +10918,6 @@ static void intel_mode_config_cleanup(struct drm_i915_private *i915)
drm_mode_config_cleanup(&i915->drm);
}
-static void plane_config_fini(struct intel_initial_plane_config *plane_config)
-{
- if (plane_config->fb) {
- struct drm_framebuffer *fb = &plane_config->fb->base;
-
- /* We may only have the stub and not a full framebuffer */
- if (drm_framebuffer_read_refcount(fb))
- drm_framebuffer_put(fb);
- else
- kfree(fb);
- }
-
- if (plane_config->vma)
- i915_vma_put(plane_config->vma);
-}
-
/* part #1: call before irq install */
int intel_modeset_init_noirq(struct drm_i915_private *i915)
{
@@ -12540,7 +11022,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
intel_plane_possible_crtcs_init(i915);
intel_shared_dpll_init(dev);
- intel_update_fdi_pll_freq(i915);
+ intel_fdi_pll_freq_update(i915);
intel_update_czclk(i915);
intel_modeset_init_hw(i915);
@@ -12564,30 +11046,13 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
+ intel_acpi_assign_connector_fwnodes(i915);
drm_modeset_unlock_all(dev);
for_each_intel_crtc(dev, crtc) {
- struct intel_initial_plane_config plane_config = {};
-
if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
continue;
-
- /*
- * Note that reserving the BIOS fb up front prevents us
- * from stuffing other stolen allocations like the ring
- * on top. This prevents some ugliness at boot time, and
- * can even allow for smooth boot transitions if the BIOS
- * fb is large enough for the active pipe configuration.
- */
- i915->display.get_initial_plane_config(crtc, &plane_config);
-
- /*
- * If the fb is shared between multiple heads, we'll
- * just get the first one.
- */
- intel_find_initial_plane_obj(crtc, &plane_config);
-
- plane_config_fini(&plane_config);
+ intel_crtc_initial_plane_config(crtc);
}
/*
@@ -12869,13 +11334,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
intel_plane_disable_noatomic(crtc, plane);
}
- /*
- * Disable any background color set by the BIOS, but enable the
- * gamma and CSC to match how we program our planes.
- */
- if (DISPLAY_VER(dev_priv) >= 9)
- intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
- SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
+ /* Disable any background color/etc. set by the BIOS */
+ intel_color_commit(crtc_state);
}
/* Adjust the state of the output pipe according to whether we
@@ -13076,24 +11536,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
enableddisabled(crtc_state->hw.active));
}
- dev_priv->active_pipes = cdclk_state->active_pipes =
- dbuf_state->active_pipes = active_pipes;
+ cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
readout_plane_state(dev_priv);
for_each_intel_encoder(dev, encoder) {
+ struct intel_crtc_state *crtc_state = NULL;
+
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
- struct intel_crtc_state *crtc_state;
-
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
intel_encoder_get_config(encoder, crtc_state);
- if (encoder->sync_state)
- encoder->sync_state(encoder, crtc_state);
/* read out to slave crtc as well for bigjoiner */
if (crtc_state->bigjoiner) {
@@ -13108,6 +11565,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
encoder->base.crtc = NULL;
}
+ if (encoder->sync_state)
+ encoder->sync_state(encoder, crtc_state);
+
drm_dbg_kms(&dev_priv->drm,
"[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id, encoder->base.name,
@@ -13390,17 +11850,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_readout_hw_state(dev);
/* HW state is read out, now we need to sanitize this mess. */
-
- /* Sanitize the TypeC port mode upfront, encoders depend on this */
- for_each_intel_encoder(dev, encoder) {
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
- /* We need to sanitize only the MST primary port. */
- if (encoder->type != INTEL_OUTPUT_DP_MST &&
- intel_phy_is_tc(dev_priv, phy))
- intel_tc_port_sanitize(enc_to_dig_port(encoder));
- }
-
get_encoder_power_domains(dev_priv);
if (HAS_PCH_IBX(dev_priv))