aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile5
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c123
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h4
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c166
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c276
-rw-r--r--drivers/gpu/drm/i915/display/intel_csr.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c2645
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c1394
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h100
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2623
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h48
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c132
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c231
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h32
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h111
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c134
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c509
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c95
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c138
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c67
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c611
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c1640
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c8
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c556
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h29
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c2266
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.h37
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c68
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c84
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h130
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c5
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c106
-rw-r--r--drivers/gpu/drm/i915/i915_params.h4
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c18
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c84
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h8
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h53
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c4
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c4
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c11
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c11
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c178
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h5
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c2
96 files changed, 8277 insertions, 6885 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 2385a7505f5d..921db06232c3 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -223,7 +223,9 @@ i915-y += \
display/intel_sprite.o \
display/intel_tc.o \
display/intel_vga.o \
- display/i9xx_plane.o
+ display/i9xx_plane.o \
+ display/skl_scaler.o \
+ display/skl_universal_plane.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
@@ -241,6 +243,7 @@ i915-y += \
display/icl_dsi.o \
display/intel_crt.o \
display/intel_ddi.o \
+ display/intel_ddi_buf_trans.o \
display/intel_dp.o \
display/intel_dp_aux.o \
display/intel_dp_aux_backlight.o \
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index e3e69e6cef65..8a52beaed2da 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -770,10 +770,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
int num_formats;
int ret, zpos;
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_universal_plane_create(dev_priv, pipe,
- PLANE_PRIMARY);
-
plane = intel_plane_alloc();
if (IS_ERR(plane))
return plane;
@@ -924,3 +920,122 @@ fail:
return ERR_PTR(ret);
}
+static int i9xx_format_to_fourcc(int format)
+{
+ switch (format) {
+ case DISPPLANE_8BPP:
+ return DRM_FORMAT_C8;
+ case DISPPLANE_BGRA555:
+ return DRM_FORMAT_ARGB1555;
+ case DISPPLANE_BGRX555:
+ return DRM_FORMAT_XRGB1555;
+ case DISPPLANE_BGRX565:
+ return DRM_FORMAT_RGB565;
+ default:
+ case DISPPLANE_BGRX888:
+ return DRM_FORMAT_XRGB8888;
+ case DISPPLANE_RGBX888:
+ return DRM_FORMAT_XBGR8888;
+ case DISPPLANE_BGRA888:
+ return DRM_FORMAT_ARGB8888;
+ case DISPPLANE_RGBA888:
+ return DRM_FORMAT_ABGR8888;
+ case DISPPLANE_BGRX101010:
+ return DRM_FORMAT_XRGB2101010;
+ case DISPPLANE_RGBX101010:
+ return DRM_FORMAT_XBGR2101010;
+ case DISPPLANE_BGRA101010:
+ return DRM_FORMAT_ARGB2101010;
+ case DISPPLANE_RGBA101010:
+ return DRM_FORMAT_ABGR2101010;
+ case DISPPLANE_RGBX161616:
+ return DRM_FORMAT_XBGR16161616F;
+ }
+}
+
+void
+i9xx_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ enum pipe pipe;
+ u32 val, base, offset;
+ int fourcc, pixel_format;
+ unsigned int aligned_height;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+
+ if (!plane->get_hw_state(plane, &pipe))
+ return;
+
+ drm_WARN_ON(dev, pipe != crtc->pipe);
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
+ drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
+ return;
+ }
+
+ fb = &intel_fb->base;
+
+ fb->dev = dev;
+
+ val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
+
+ if (INTEL_GEN(dev_priv) >= 4) {
+ if (val & DISPPLANE_TILED) {
+ plane_config->tiling = I915_TILING_X;
+ fb->modifier = I915_FORMAT_MOD_X_TILED;
+ }
+
+ if (val & DISPPLANE_ROTATE_180)
+ plane_config->rotation = DRM_MODE_ROTATE_180;
+ }
+
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
+ val & DISPPLANE_MIRROR)
+ plane_config->rotation |= DRM_MODE_REFLECT_X;
+
+ pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
+ fourcc = i9xx_format_to_fourcc(pixel_format);
+ fb->format = drm_format_info(fourcc);
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
+ base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ if (plane_config->tiling)
+ offset = intel_de_read(dev_priv,
+ DSPTILEOFF(i9xx_plane));
+ else
+ offset = intel_de_read(dev_priv,
+ DSPLINOFF(i9xx_plane));
+ base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
+ } else {
+ base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
+ }
+ plane_config->base = base;
+
+ val = intel_de_read(dev_priv, PIPESRC(pipe));
+ fb->width = ((val >> 16) & 0xfff) + 1;
+ fb->height = ((val >> 0) & 0xfff) + 1;
+
+ val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
+ fb->pitches[0] = val & 0xffffffc0;
+
+ aligned_height = intel_fb_align_height(fb, 0, fb->height);
+
+ plane_config->size = fb->pitches[0] * aligned_height;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
+ plane_config->size);
+
+ plane_config->fb = intel_fb;
+}
+
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
index ca963c2a8457..027b66053984 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.h
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -10,6 +10,8 @@
enum pipe;
struct drm_i915_private;
+struct intel_crtc;
+struct intel_initial_plane_config;
struct intel_plane;
struct intel_plane_state;
@@ -21,4 +23,6 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
struct intel_plane *
intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
+void i9xx_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config);
#endif
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 9d245a689323..7f2abc088a66 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -35,6 +35,8 @@
#include "intel_dsi.h"
#include "intel_panel.h"
#include "intel_vdsc.h"
+#include "skl_scaler.h"
+#include "skl_universal_plane.h"
static int header_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
@@ -653,6 +655,24 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpll.lock);
}
+static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ bool clock_enabled = false;
+ enum phy phy;
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
+
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ if (!(tmp & ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)))
+ clock_enabled = true;
+ }
+
+ return clock_enabled;
+}
+
static void gen11_dsi_map_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -1488,14 +1508,10 @@ static void gen11_dsi_get_cmd_mode_config(struct intel_dsi *intel_dsi,
static void gen11_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
- pipe_config->port_clock = intel_dpll_get_freq(i915,
- pipe_config->shared_dpll,
- &pipe_config->dpll_hw_state);
+ intel_ddi_get_clock(encoder, pipe_config, icl_ddi_combo_get_pll(encoder));
pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk;
if (intel_dsi->dual_link)
@@ -1940,6 +1956,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->pipe_mask = ~0;
encoder->power_domain = POWER_DOMAIN_PORT_DSI;
encoder->get_power_domains = gen11_dsi_get_power_domains;
+ encoder->disable_clock = gen11_dsi_gate_clocks;
+ encoder->is_clock_enabled = gen11_dsi_is_clock_enabled;
/* register DSI connector with DRM subsystem */
drm_connector_init(dev, connector, &gen11_dsi_connector_funcs,
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index e00fdc47c0eb..27f7d7109ca3 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -40,7 +40,7 @@
#include "intel_global_state.h"
#include "intel_hdcp.h"
#include "intel_psr.h"
-#include "intel_sprite.h"
+#include "skl_universal_plane.h"
/**
* intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property.
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 987cf509337f..f3fa1441ce16 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1630,16 +1630,36 @@ static const u8 rkl_pch_tgp_ddc_pin_map[] = {
[RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP,
};
+static const u8 adls_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ADLS_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP,
+ [ADLS_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP,
+ [ADLS_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP,
+ [ADLS_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP,
+};
+
+static const u8 gen9bc_tgp_ddc_pin_map[] = {
+ [DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
+ [DDC_BUS_DDI_D] = GMBUS_PIN_10_TC2_ICP,
+};
+
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
const u8 *ddc_pin_map;
int n_entries;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) {
+ if (HAS_PCH_ADP(dev_priv)) {
+ ddc_pin_map = adls_ddc_pin_map;
+ n_entries = ARRAY_SIZE(adls_ddc_pin_map);
+ } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) {
return vbt_pin;
} else if (IS_ROCKETLAKE(dev_priv) && INTEL_PCH_TYPE(dev_priv) == PCH_TGP) {
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
+ } else if (HAS_PCH_TGP(dev_priv) && IS_GEN9_BC(dev_priv)) {
+ ddc_pin_map = gen9bc_tgp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
@@ -1708,8 +1728,26 @@ static enum port dvo_port_to_port(struct drm_i915_private *dev_priv,
[PORT_TC1] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
[PORT_TC2] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
};
+ /*
+ * Alderlake S ports used in the driver are PORT_A, PORT_D, PORT_E,
+ * PORT_F and PORT_G, we need to map that to correct VBT sections.
+ */
+ static const int adls_port_mapping[][3] = {
+ [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 },
+ [PORT_B] = { -1 },
+ [PORT_C] = { -1 },
+ [PORT_TC1] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 },
+ [PORT_TC2] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
+ [PORT_TC3] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
+ [PORT_TC4] = { DVO_PORT_HDMIE, DVO_PORT_DPE, -1 },
+ };
- if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ if (IS_ALDERLAKE_S(dev_priv))
+ return __dvo_port_to_port(ARRAY_SIZE(adls_port_mapping),
+ ARRAY_SIZE(adls_port_mapping[0]),
+ adls_port_mapping,
+ dvo_port);
+ else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
return __dvo_port_to_port(ARRAY_SIZE(rkl_port_mapping),
ARRAY_SIZE(rkl_port_mapping[0]),
rkl_port_mapping,
@@ -1721,6 +1759,44 @@ static enum port dvo_port_to_port(struct drm_i915_private *dev_priv,
dvo_port);
}
+static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
+{
+ switch (vbt_max_link_rate) {
+ default:
+ case BDB_230_VBT_DP_MAX_LINK_RATE_DEF:
+ return 0;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR20:
+ return 2000000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR13P5:
+ return 1350000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR10:
+ return 1000000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_HBR3:
+ return 810000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_HBR2:
+ return 540000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_HBR:
+ return 270000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_LBR:
+ return 162000;
+ }
+}
+
+static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
+{
+ switch (vbt_max_link_rate) {
+ default:
+ case BDB_216_VBT_DP_MAX_LINK_RATE_HBR3:
+ return 810000;
+ case BDB_216_VBT_DP_MAX_LINK_RATE_HBR2:
+ return 540000;
+ case BDB_216_VBT_DP_MAX_LINK_RATE_HBR:
+ return 270000;
+ case BDB_216_VBT_DP_MAX_LINK_RATE_LBR:
+ return 162000;
+ }
+}
+
static void parse_ddi_port(struct drm_i915_private *dev_priv,
struct display_device_data *devdata,
u8 bdb_version)
@@ -1800,7 +1876,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
/* The VBT HDMI level shift values match the table we have. */
u8 hdmi_level_shift = child->hdmi_level_shifter_value;
drm_dbg_kms(&dev_priv->drm,
- "VBT HDMI level shift for port %c: %d\n",
+ "Port %c VBT HDMI level shift: %d\n",
port_name(port),
hdmi_level_shift);
info->hdmi_level_shift = hdmi_level_shift;
@@ -1827,7 +1903,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
if (max_tmds_clock)
drm_dbg_kms(&dev_priv->drm,
- "VBT HDMI max TMDS clock for port %c: %d kHz\n",
+ "Port %c VBT HDMI max TMDS clock: %d kHz\n",
port_name(port), max_tmds_clock);
info->max_tmds_clock = max_tmds_clock;
}
@@ -1836,33 +1912,23 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
if (bdb_version >= 196 && child->iboost) {
info->dp_boost_level = translate_iboost(child->dp_iboost_level);
drm_dbg_kms(&dev_priv->drm,
- "VBT (e)DP boost level for port %c: %d\n",
+ "Port %c VBT (e)DP boost level: %d\n",
port_name(port), info->dp_boost_level);
info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level);
drm_dbg_kms(&dev_priv->drm,
- "VBT HDMI boost level for port %c: %d\n",
+ "Port %c VBT HDMI boost level: %d\n",
port_name(port), info->hdmi_boost_level);
}
/* DP max link rate for CNL+ */
if (bdb_version >= 216) {
- switch (child->dp_max_link_rate) {
- default:
- case VBT_DP_MAX_LINK_RATE_HBR3:
- info->dp_max_link_rate = 810000;
- break;
- case VBT_DP_MAX_LINK_RATE_HBR2:
- info->dp_max_link_rate = 540000;
- break;
- case VBT_DP_MAX_LINK_RATE_HBR:
- info->dp_max_link_rate = 270000;
- break;
- case VBT_DP_MAX_LINK_RATE_LBR:
- info->dp_max_link_rate = 162000;
- break;
- }
+ if (bdb_version >= 230)
+ info->dp_max_link_rate = parse_bdb_230_dp_max_link_rate(child->dp_max_link_rate);
+ else
+ info->dp_max_link_rate = parse_bdb_216_dp_max_link_rate(child->dp_max_link_rate);
+
drm_dbg_kms(&dev_priv->drm,
- "VBT DP max link rate for port %c: %d\n",
+ "Port %c VBT DP max link rate: %d\n",
port_name(port), info->dp_max_link_rate);
}
@@ -2098,7 +2164,7 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
static struct vbt_header *oprom_get_vbt(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
void __iomem *p = NULL, *oprom;
struct vbt_header *vbt;
u16 vbt_size;
@@ -2645,6 +2711,23 @@ intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
return HAS_LSPCON(i915) && child && child->lspcon;
}
+/**
+ * intel_bios_is_lane_reversal_needed - if lane reversal needed on port
+ * @i915: i915 device instance
+ * @port: port to check
+ *
+ * Return true if port requires lane reversal
+ */
+bool
+intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
+ enum port port)
+{
+ const struct child_device_config *child =
+ i915->vbt.ddi_port_info[port].child;
+
+ return child && child->lane_reversal;
+}
+
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -2661,27 +2744,44 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
return aux_ch;
}
+ /*
+ * RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D
+ * map to DDI A,B,TC1,TC2 respectively.
+ *
+ * ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E
+ * map to DDI A,TC1,TC2,TC3,TC4 respectively.
+ */
switch (info->alternate_aux_channel) {
case DP_AUX_A:
aux_ch = AUX_CH_A;
break;
case DP_AUX_B:
- aux_ch = AUX_CH_B;
+ if (IS_ALDERLAKE_S(dev_priv))
+ aux_ch = AUX_CH_USBC1;
+ else
+ aux_ch = AUX_CH_B;
break;
case DP_AUX_C:
- /*
- * RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D
- * map to DDI A,B,TC1,TC2 respectively.
- */
- aux_ch = (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) ?
- AUX_CH_USBC1 : AUX_CH_C;
+ if (IS_ALDERLAKE_S(dev_priv))
+ aux_ch = AUX_CH_USBC2;
+ else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ aux_ch = AUX_CH_USBC1;
+ else
+ aux_ch = AUX_CH_C;
break;
case DP_AUX_D:
- aux_ch = (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) ?
- AUX_CH_USBC2 : AUX_CH_D;
+ if (IS_ALDERLAKE_S(dev_priv))
+ aux_ch = AUX_CH_USBC3;
+ else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ aux_ch = AUX_CH_USBC2;
+ else
+ aux_ch = AUX_CH_D;
break;
case DP_AUX_E:
- aux_ch = AUX_CH_E;
+ if (IS_ALDERLAKE_S(dev_priv))
+ aux_ch = AUX_CH_USBC4;
+ else
+ aux_ch = AUX_CH_E;
break;
case DP_AUX_F:
aux_ch = AUX_CH_F;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index e29e79faa01b..f25190ecfe97 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -241,6 +241,8 @@ bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
enum port port);
bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
enum port port);
+bool intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
+ enum port port);
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 4b5a30ac84bc..d122b9965532 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -78,7 +78,17 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->num_points = dram_info->num_qgv_points;
if (IS_GEN(dev_priv, 12))
- qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 16;
+ switch (dram_info->type) {
+ case INTEL_DRAM_DDR4:
+ qi->t_bl = 4;
+ break;
+ case INTEL_DRAM_DDR5:
+ qi->t_bl = 8;
+ break;
+ default:
+ qi->t_bl = 16;
+ break;
+ }
else if (IS_GEN(dev_priv, 11))
qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
@@ -142,6 +152,12 @@ static const struct intel_sa_info rkl_sa_info = {
.displayrtids = 128,
};
+static const struct intel_sa_info adls_sa_info = {
+ .deburst = 16,
+ .deprogbwlimit = 38, /* GB/s */
+ .displayrtids = 256,
+};
+
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
@@ -251,7 +267,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_ROCKETLAKE(dev_priv))
+ if (IS_ALDERLAKE_S(dev_priv))
+ icl_get_bw_info(dev_priv, &adls_sa_info);
+ else if (IS_ROCKETLAKE(dev_priv))
icl_get_bw_info(dev_priv, &rkl_sa_info);
else if (IS_GEN(dev_priv, 12))
icl_get_bw_info(dev_priv, &tgl_sa_info);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 2e878cc274b7..a9019287f7d5 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -96,7 +96,7 @@ static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv,
static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u16 hpllcc = 0;
/*
@@ -138,7 +138,7 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -162,7 +162,7 @@ static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -256,7 +256,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
static void g33_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 };
static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 };
static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 };
@@ -305,7 +305,7 @@ fail:
static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -339,7 +339,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
static const u8 div_3200[] = { 16, 10, 8 };
static const u8 div_4000[] = { 20, 12, 10 };
static const u8 div_5333[] = { 24, 16, 14 };
@@ -384,7 +384,7 @@ fail:
static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
unsigned int cdclk_sel;
u16 tmp = 0;
@@ -2145,10 +2145,10 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
if (IS_ERR(bw_state))
return PTR_ERR(bw_state);
- if (cdclk_state->min_cdclk[i] == min_cdclk)
+ if (cdclk_state->min_cdclk[crtc->pipe] == min_cdclk)
continue;
- cdclk_state->min_cdclk[i] = min_cdclk;
+ cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
ret = intel_atomic_lock_global_state(&cdclk_state->base);
if (ret)
@@ -2199,10 +2199,10 @@ static int bxt_compute_min_voltage_level(struct intel_cdclk_state *cdclk_state)
else
min_voltage_level = 0;
- if (cdclk_state->min_voltage_level[i] == min_voltage_level)
+ if (cdclk_state->min_voltage_level[crtc->pipe] == min_voltage_level)
continue;
- cdclk_state->min_voltage_level[i] = min_voltage_level;
+ cdclk_state->min_voltage_level[crtc->pipe] = min_voltage_level;
ret = intel_atomic_lock_global_state(&cdclk_state->base);
if (ret)
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 996ae0608a62..c55813c6194a 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -187,10 +187,16 @@ static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
* Some platforms only expect PHY_MISC to be programmed for PHY-A and
* PHY-B and may not even have instances of the register for the
* other combo PHY's.
+ *
+ * ADL-S technically has three instances of PHY_MISC, but only requires
+ * that we program it for PHY A.
*/
- if (IS_JSL_EHL(i915) ||
- IS_ROCKETLAKE(i915) ||
- IS_DG1(i915))
+
+ if (IS_ALDERLAKE_S(i915))
+ return phy == PHY_A;
+ else if (IS_JSL_EHL(i915) ||
+ IS_ROCKETLAKE(i915) ||
+ IS_DG1(i915))
return phy < PHY_C;
return true;
@@ -246,14 +252,21 @@ static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy)
* RKL,DG1:
* A(master) -> B(slave)
* C(master) -> D(slave)
+ * ADL-S:
+ * A(master) -> B(slave), C(slave)
+ * D(master) -> E(slave)
*
* We must set the IREFGEN bit for any PHY acting as a master
* to another PHY.
*/
- if ((IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) && phy == PHY_C)
+ if (phy == PHY_A)
return true;
+ else if (IS_ALDERLAKE_S(dev_priv))
+ return phy == PHY_D;
+ else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ return phy == PHY_C;
- return phy == PHY_A;
+ return false;
}
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 4934edd51cb0..7f3d11c5ce3e 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -38,6 +38,7 @@
#include "intel_crt.h"
#include "intel_ddi.h"
#include "intel_display_types.h"
+#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hotplug.h"
@@ -141,7 +142,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_ddi_get_config(encoder, pipe_config);
+ hsw_ddi_get_config(encoder, pipe_config);
pipe_config->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
DRM_MODE_FLAG_NHSYNC |
@@ -1075,6 +1076,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->base.enable = hsw_enable_crt;
crt->base.disable = hsw_disable_crt;
crt->base.post_disable = hsw_post_disable_crt;
+ crt->base.enable_clock = hsw_ddi_enable_clock;
+ crt->base.disable_clock = hsw_ddi_disable_clock;
+ crt->base.is_clock_enabled = hsw_ddi_is_clock_enabled;
} else {
if (HAS_PCH_SPLIT(dev_priv)) {
crt->base.compute_config = pch_crt_compute_config;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 8e77ca7ddf11..3248f49999bb 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -10,6 +10,9 @@
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_color.h"
@@ -17,9 +20,13 @@
#include "intel_cursor.h"
#include "intel_display_debugfs.h"
#include "intel_display_types.h"
+#include "intel_dsi.h"
#include "intel_pipe_crc.h"
+#include "intel_psr.h"
#include "intel_sprite.h"
+#include "intel_vrr.h"
#include "i9xx_plane.h"
+#include "skl_universal_plane.h"
static void assert_vblank_disabled(struct drm_crtc *crtc)
{
@@ -32,6 +39,9 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
+ if (!crtc->active)
+ return 0;
+
if (!vblank->max_vblank_count)
return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
@@ -41,8 +51,6 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 mode_flags = crtc->mode_flags;
/*
* From Gen 11, In case of dsi cmd mode, frame counter wouldnt
@@ -50,7 +58,8 @@ u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
* the hw counter, then we would find it updated in only
* the next TE, hence switching to sw counter.
*/
- if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
+ if (crtc_state->mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 |
+ I915_MODE_FLAG_DSI_USE_TE1))
return 0;
/*
@@ -77,12 +86,26 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
drm_crtc_set_max_vblank_count(&crtc->base,
intel_crtc_max_vblank_count(crtc_state));
drm_crtc_vblank_on(&crtc->base);
+
+ /*
+ * Should really happen exactly when we enable the pipe
+ * but we want the frame counters in the trace, and that
+ * requires vblank support on some platforms/outputs.
+ */
+ trace_intel_pipe_enable(crtc);
}
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ /*
+ * Should really happen exactly when we disable the pipe
+ * but we want the frame counters in the trace, and that
+ * requires vblank support on some platforms/outputs.
+ */
+ trace_intel_pipe_disable(crtc);
+
drm_crtc_vblank_off(&crtc->base);
assert_vblank_disabled(&crtc->base);
}
@@ -242,7 +265,11 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
crtc->pipe = pipe;
crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
- primary = intel_primary_plane_create(dev_priv, pipe);
+ if (INTEL_GEN(dev_priv) >= 9)
+ primary = skl_universal_plane_create(dev_priv, pipe,
+ PLANE_PRIMARY);
+ else
+ primary = intel_primary_plane_create(dev_priv, pipe);
if (IS_ERR(primary)) {
ret = PTR_ERR(primary);
goto fail;
@@ -252,7 +279,11 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
for_each_sprite(dev_priv, pipe, sprite) {
struct intel_plane *plane;
- plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+ if (INTEL_GEN(dev_priv) >= 9)
+ plane = skl_universal_plane_create(dev_priv, pipe,
+ PLANE_SPRITE0 + sprite);
+ else
+ plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
goto fail;
@@ -322,3 +353,238 @@ fail:
return ret;
}
+
+int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+ int usecs)
+{
+ /* paranoia */
+ if (!adjusted_mode->crtc_htotal)
+ return 1;
+
+ return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock,
+ 1000 * adjusted_mode->crtc_htotal);
+}
+
+static int intel_mode_vblank_start(const struct drm_display_mode *mode)
+{
+ int vblank_start = mode->crtc_vblank_start;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vblank_start = DIV_ROUND_UP(vblank_start, 2);
+
+ return vblank_start;
+}
+
+/**
+ * intel_pipe_update_start() - start update of a set of display registers
+ * @new_crtc_state: the new crtc state
+ *
+ * Mark the start of an update to pipe registers that should be updated
+ * atomically regarding vblank. If the next vblank will happens within
+ * the next 100 us, this function waits until the vblank passes.
+ *
+ * After a successful call to this function, interrupts will be disabled
+ * until a subsequent call to intel_pipe_update_end(). That is done to
+ * avoid random delays.
+ */
+void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
+ long timeout = msecs_to_jiffies_timeout(1);
+ int scanline, min, max, vblank_start;
+ wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+ bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
+ DEFINE_WAIT(wait);
+
+ if (new_crtc_state->uapi.async_flip)
+ return;
+
+ if (new_crtc_state->vrr.enable)
+ vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
+ else
+ vblank_start = intel_mode_vblank_start(adjusted_mode);
+
+ /* FIXME needs to be calibrated sensibly */
+ min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
+ VBLANK_EVASION_TIME_US);
+ max = vblank_start - 1;
+
+ if (min <= 0 || max <= 0)
+ goto irq_disable;
+
+ if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
+ goto irq_disable;
+
+ /*
+ * Wait for psr to idle out after enabling the VBL interrupts
+ * VBL interrupts will start the PSR exit and prevent a PSR
+ * re-entry as well.
+ */
+ intel_psr_wait_for_idle(new_crtc_state);
+
+ local_irq_disable();
+
+ crtc->debug.min_vbl = min;
+ crtc->debug.max_vbl = max;
+ trace_intel_pipe_update_start(crtc);
+
+ for (;;) {
+ /*
+ * prepare_to_wait() has a memory barrier, which guarantees
+ * other CPUs can see the task state update by the time we
+ * read the scanline.
+ */
+ prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
+
+ scanline = intel_get_crtc_scanline(crtc);
+ if (scanline < min || scanline > max)
+ break;
+
+ if (!timeout) {
+ drm_err(&dev_priv->drm,
+ "Potential atomic update failure on pipe %c\n",
+ pipe_name(crtc->pipe));
+ break;
+ }
+
+ local_irq_enable();
+
+ timeout = schedule_timeout(timeout);
+
+ local_irq_disable();
+ }
+
+ finish_wait(wq, &wait);
+
+ drm_crtc_vblank_put(&crtc->base);
+
+ /*
+ * On VLV/CHV DSI the scanline counter would appear to
+ * increment approx. 1/3 of a scanline before start of vblank.
+ * The registers still get latched at start of vblank however.
+ * This means we must not write any registers on the first
+ * line of vblank (since not the whole line is actually in
+ * vblank). And unfortunately we can't use the interrupt to
+ * wait here since it will fire too soon. We could use the
+ * frame start interrupt instead since it will fire after the
+ * critical scanline, but that would require more changes
+ * in the interrupt code. So for now we'll just do the nasty
+ * thing and poll for the bad scanline to pass us by.
+ *
+ * FIXME figure out if BXT+ DSI suffers from this as well
+ */
+ while (need_vlv_dsi_wa && scanline == vblank_start)
+ scanline = intel_get_crtc_scanline(crtc);
+
+ crtc->debug.scanline_start = scanline;
+ crtc->debug.start_vbl_time = ktime_get();
+ crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
+
+ trace_intel_pipe_update_vblank_evaded(crtc);
+ return;
+
+irq_disable:
+ local_irq_disable();
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end)
+{
+ u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time));
+ unsigned int h;
+
+ h = ilog2(delta >> 9);
+ if (h >= ARRAY_SIZE(crtc->debug.vbl.times))
+ h = ARRAY_SIZE(crtc->debug.vbl.times) - 1;
+ crtc->debug.vbl.times[h]++;
+
+ crtc->debug.vbl.sum += delta;
+ if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min)
+ crtc->debug.vbl.min = delta;
+ if (delta > crtc->debug.vbl.max)
+ crtc->debug.vbl.max = delta;
+
+ if (delta > 1000 * VBLANK_EVASION_TIME_US) {
+ drm_dbg_kms(crtc->base.dev,
+ "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
+ pipe_name(crtc->pipe),
+ div_u64(delta, 1000),
+ VBLANK_EVASION_TIME_US);
+ crtc->debug.vbl.over++;
+ }
+}
+#else
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
+#endif
+
+/**
+ * intel_pipe_update_end() - end update of a set of display registers
+ * @new_crtc_state: the new crtc state
+ *
+ * Mark the end of an update started with intel_pipe_update_start(). This
+ * re-enables interrupts and verifies the update was actually completed
+ * before a vblank.
+ */
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ int scanline_end = intel_get_crtc_scanline(crtc);
+ u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
+ ktime_t end_vbl_time = ktime_get();
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (new_crtc_state->uapi.async_flip)
+ return;
+
+ trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end);
+
+ /*
+ * Incase of mipi dsi command mode, we need to set frame update
+ * request for every commit.
+ */
+ if (INTEL_GEN(dev_priv) >= 11 &&
+ intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
+ icl_dsi_frame_update(new_crtc_state);
+
+ /* We're still in the vblank-evade critical section, this can't race.
+ * Would be slightly nice to just grab the vblank count and arm the
+ * event outside of the critical section - the spinlock might spin for a
+ * while ... */
+ if (new_crtc_state->uapi.event) {
+ drm_WARN_ON(&dev_priv->drm,
+ drm_crtc_vblank_get(&crtc->base) != 0);
+
+ spin_lock(&crtc->base.dev->event_lock);
+ drm_crtc_arm_vblank_event(&crtc->base,
+ new_crtc_state->uapi.event);
+ spin_unlock(&crtc->base.dev->event_lock);
+
+ new_crtc_state->uapi.event = NULL;
+ }
+
+ local_irq_enable();
+
+ /* Send VRR Push to terminate Vblank */
+ intel_vrr_send_push(new_crtc_state);
+
+ if (intel_vgpu_active(dev_priv))
+ return;
+
+ if (crtc->debug.start_vbl_count &&
+ crtc->debug.start_vbl_count != end_vbl_count) {
+ drm_err(&dev_priv->drm,
+ "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
+ pipe_name(pipe), crtc->debug.start_vbl_count,
+ end_vbl_count,
+ ktime_us_delta(end_vbl_time,
+ crtc->debug.start_vbl_time),
+ crtc->debug.min_vbl, crtc->debug.max_vbl,
+ crtc->debug.scanline_start, scanline_end);
+ }
+
+ dbg_vblank_evade(crtc, end_vbl_time);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c
index 67dc64df78a5..42005c1b5f0e 100644
--- a/drivers/gpu/drm/i915/display/intel_csr.c
+++ b/drivers/gpu/drm/i915/display/intel_csr.c
@@ -40,6 +40,10 @@
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
+#define ADLS_CSR_PATH "i915/adls_dmc_ver2_01.bin"
+#define ADLS_CSR_VERSION_REQUIRED CSR_VERSION(2, 1)
+MODULE_FIRMWARE(ADLS_CSR_PATH);
+
#define DG1_CSR_PATH "i915/dg1_dmc_ver2_02.bin"
#define DG1_CSR_VERSION_REQUIRED CSR_VERSION(2, 2)
MODULE_FIRMWARE(DG1_CSR_PATH);
@@ -640,7 +644,7 @@ static void csr_load_work_fn(struct work_struct *work)
dev_priv = container_of(work, typeof(*dev_priv), csr.work);
csr = &dev_priv->csr;
- request_firmware(&fw, dev_priv->csr.fw_path, &dev_priv->drm.pdev->dev);
+ request_firmware(&fw, dev_priv->csr.fw_path, dev_priv->drm.dev);
parse_csr_fw(dev_priv, fw);
if (dev_priv->csr.dmc_payload) {
@@ -689,7 +693,11 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
*/
intel_csr_runtime_pm_get(dev_priv);
- if (IS_DG1(dev_priv)) {
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ csr->fw_path = ADLS_CSR_PATH;
+ csr->required_version = ADLS_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+ } else if (IS_DG1(dev_priv)) {
csr->fw_path = DG1_CSR_PATH;
csr->required_version = DG1_CSR_VERSION_REQUIRED;
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 1bb40ec5fe5d..ba83682e1d3e 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -28,17 +28,18 @@
#include <drm/drm_scdc_helper.h>
#include "i915_drv.h"
-#include "i915_trace.h"
#include "intel_audio.h"
#include "intel_combo_phy.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
#include "intel_display_types.h"
#include "intel_dp.h"
-#include "intel_dp_mst.h"
#include "intel_dp_link_training.h"
+#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
#include "intel_dsi.h"
+#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
@@ -52,12 +53,8 @@
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
-
-struct ddi_buf_trans {
- u32 trans1; /* balance leg enable, de-emph level */
- u32 trans2; /* vref sel, vswing */
- u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
-};
+#include "skl_scaler.h"
+#include "skl_universal_plane.h"
static const u8 index_to_dp_signal_levels[] = {
[0] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0,
@@ -72,1389 +69,15 @@ static const u8 index_to_dp_signal_levels[] = {
[9] = DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0,
};
-/* HDMI/DVI modes ignore everything but the last 2 items. So we share
- * them for both DP and FDI transports, allowing those ports to
- * automatically adapt to HDMI connections as well
- */
-static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
- { 0x00FFFFFF, 0x0006000E, 0x0 },
- { 0x00D75FFF, 0x0005000A, 0x0 },
- { 0x00C30FFF, 0x00040006, 0x0 },
- { 0x80AAAFFF, 0x000B0000, 0x0 },
- { 0x00FFFFFF, 0x0005000A, 0x0 },
- { 0x00D75FFF, 0x000C0004, 0x0 },
- { 0x80C30FFF, 0x000B0000, 0x0 },
- { 0x00FFFFFF, 0x00040006, 0x0 },
- { 0x80D75FFF, 0x000B0000, 0x0 },
-};
-
-static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
- { 0x00FFFFFF, 0x0007000E, 0x0 },
- { 0x00D75FFF, 0x000F000A, 0x0 },
- { 0x00C30FFF, 0x00060006, 0x0 },
- { 0x00AAAFFF, 0x001E0000, 0x0 },
- { 0x00FFFFFF, 0x000F000A, 0x0 },
- { 0x00D75FFF, 0x00160004, 0x0 },
- { 0x00C30FFF, 0x001E0000, 0x0 },
- { 0x00FFFFFF, 0x00060006, 0x0 },
- { 0x00D75FFF, 0x001E0000, 0x0 },
-};
-
-static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
- /* Idx NT mV d T mV d db */
- { 0x00FFFFFF, 0x0006000E, 0x0 },/* 0: 400 400 0 */
- { 0x00E79FFF, 0x000E000C, 0x0 },/* 1: 400 500 2 */
- { 0x00D75FFF, 0x0005000A, 0x0 },/* 2: 400 600 3.5 */
- { 0x00FFFFFF, 0x0005000A, 0x0 },/* 3: 600 600 0 */
- { 0x00E79FFF, 0x001D0007, 0x0 },/* 4: 600 750 2 */
- { 0x00D75FFF, 0x000C0004, 0x0 },/* 5: 600 900 3.5 */
- { 0x00FFFFFF, 0x00040006, 0x0 },/* 6: 800 800 0 */
- { 0x80E79FFF, 0x00030002, 0x0 },/* 7: 800 1000 2 */
- { 0x00FFFFFF, 0x00140005, 0x0 },/* 8: 850 850 0 */
- { 0x00FFFFFF, 0x000C0004, 0x0 },/* 9: 900 900 0 */
- { 0x00FFFFFF, 0x001C0003, 0x0 },/* 10: 950 950 0 */
- { 0x80FFFFFF, 0x00030002, 0x0 },/* 11: 1000 1000 0 */
-};
-
-static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
- { 0x00FFFFFF, 0x00000012, 0x0 },
- { 0x00EBAFFF, 0x00020011, 0x0 },
- { 0x00C71FFF, 0x0006000F, 0x0 },
- { 0x00AAAFFF, 0x000E000A, 0x0 },
- { 0x00FFFFFF, 0x00020011, 0x0 },
- { 0x00DB6FFF, 0x0005000F, 0x0 },
- { 0x00BEEFFF, 0x000A000C, 0x0 },
- { 0x00FFFFFF, 0x0005000F, 0x0 },
- { 0x00DB6FFF, 0x000A000C, 0x0 },
-};
-
-static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
- { 0x00FFFFFF, 0x0007000E, 0x0 },
- { 0x00D75FFF, 0x000E000A, 0x0 },
- { 0x00BEFFFF, 0x00140006, 0x0 },
- { 0x80B2CFFF, 0x001B0002, 0x0 },
- { 0x00FFFFFF, 0x000E000A, 0x0 },
- { 0x00DB6FFF, 0x00160005, 0x0 },
- { 0x80C71FFF, 0x001A0002, 0x0 },
- { 0x00F7DFFF, 0x00180004, 0x0 },
- { 0x80D75FFF, 0x001B0002, 0x0 },
-};
-
-static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
- { 0x00FFFFFF, 0x0001000E, 0x0 },
- { 0x00D75FFF, 0x0004000A, 0x0 },
- { 0x00C30FFF, 0x00070006, 0x0 },
- { 0x00AAAFFF, 0x000C0000, 0x0 },
- { 0x00FFFFFF, 0x0004000A, 0x0 },
- { 0x00D75FFF, 0x00090004, 0x0 },
- { 0x00C30FFF, 0x000C0000, 0x0 },
- { 0x00FFFFFF, 0x00070006, 0x0 },
- { 0x00D75FFF, 0x000C0000, 0x0 },
-};
-
-static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
- /* Idx NT mV d T mV df db */
- { 0x00FFFFFF, 0x0007000E, 0x0 },/* 0: 400 400 0 */
- { 0x00D75FFF, 0x000E000A, 0x0 },/* 1: 400 600 3.5 */
- { 0x00BEFFFF, 0x00140006, 0x0 },/* 2: 400 800 6 */
- { 0x00FFFFFF, 0x0009000D, 0x0 },/* 3: 450 450 0 */
- { 0x00FFFFFF, 0x000E000A, 0x0 },/* 4: 600 600 0 */
- { 0x00D7FFFF, 0x00140006, 0x0 },/* 5: 600 800 2.5 */
- { 0x80CB2FFF, 0x001B0002, 0x0 },/* 6: 600 1000 4.5 */
- { 0x00FFFFFF, 0x00140006, 0x0 },/* 7: 800 800 0 */
- { 0x80E79FFF, 0x001B0002, 0x0 },/* 8: 800 1000 2 */
- { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */
-};
-
-/* Skylake H and S */
-static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
- { 0x00002016, 0x000000A0, 0x0 },
- { 0x00005012, 0x0000009B, 0x0 },
- { 0x00007011, 0x00000088, 0x0 },
- { 0x80009010, 0x000000C0, 0x1 },
- { 0x00002016, 0x0000009B, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000C0, 0x1 },
- { 0x00002016, 0x000000DF, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 },
-};
-
-/* Skylake U */
-static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
- { 0x0000201B, 0x000000A2, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x1 },
- { 0x80009010, 0x000000C0, 0x1 },
- { 0x0000201B, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 },
- { 0x80007011, 0x000000C0, 0x1 },
- { 0x00002016, 0x00000088, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 },
-};
-
-/* Skylake Y */
-static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
- { 0x00000018, 0x000000A2, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x3 },
- { 0x80009010, 0x000000C0, 0x3 },
- { 0x00000018, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
- { 0x80007011, 0x000000C0, 0x3 },
- { 0x00000018, 0x00000088, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
-};
-
-/* Kabylake H and S */
-static const struct ddi_buf_trans kbl_ddi_translations_dp[] = {
- { 0x00002016, 0x000000A0, 0x0 },
- { 0x00005012, 0x0000009B, 0x0 },
- { 0x00007011, 0x00000088, 0x0 },
- { 0x80009010, 0x000000C0, 0x1 },
- { 0x00002016, 0x0000009B, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000C0, 0x1 },
- { 0x00002016, 0x00000097, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 },
-};
-
-/* Kabylake U */
-static const struct ddi_buf_trans kbl_u_ddi_translations_dp[] = {
- { 0x0000201B, 0x000000A1, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x3 },
- { 0x80009010, 0x000000C0, 0x3 },
- { 0x0000201B, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
- { 0x80007011, 0x000000C0, 0x3 },
- { 0x00002016, 0x0000004F, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
-};
-
-/* Kabylake Y */
-static const struct ddi_buf_trans kbl_y_ddi_translations_dp[] = {
- { 0x00001017, 0x000000A1, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x3 },
- { 0x8000800F, 0x000000C0, 0x3 },
- { 0x00001017, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
- { 0x80007011, 0x000000C0, 0x3 },
- { 0x00001017, 0x0000004C, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
-};
-
-/*
- * Skylake/Kabylake H and S
- * eDP 1.4 low vswing translation parameters
- */
-static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
- { 0x00000018, 0x000000A8, 0x0 },
- { 0x00004013, 0x000000A9, 0x0 },
- { 0x00007011, 0x000000A2, 0x0 },
- { 0x00009010, 0x0000009C, 0x0 },
- { 0x00000018, 0x000000A9, 0x0 },
- { 0x00006013, 0x000000A2, 0x0 },
- { 0x00007011, 0x000000A6, 0x0 },
- { 0x00000018, 0x000000AB, 0x0 },
- { 0x00007013, 0x0000009F, 0x0 },
- { 0x00000018, 0x000000DF, 0x0 },
-};
-
-/*
- * Skylake/Kabylake U
- * eDP 1.4 low vswing translation parameters
- */
-static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
- { 0x00000018, 0x000000A8, 0x0 },
- { 0x00004013, 0x000000A9, 0x0 },
- { 0x00007011, 0x000000A2, 0x0 },
- { 0x00009010, 0x0000009C, 0x0 },
- { 0x00000018, 0x000000A9, 0x0 },
- { 0x00006013, 0x000000A2, 0x0 },
- { 0x00007011, 0x000000A6, 0x0 },
- { 0x00002016, 0x000000AB, 0x0 },
- { 0x00005013, 0x0000009F, 0x0 },
- { 0x00000018, 0x000000DF, 0x0 },
-};
-
-/*
- * Skylake/Kabylake Y
- * eDP 1.4 low vswing translation parameters
- */
-static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
- { 0x00000018, 0x000000A8, 0x0 },
- { 0x00004013, 0x000000AB, 0x0 },
- { 0x00007011, 0x000000A4, 0x0 },
- { 0x00009010, 0x000000DF, 0x0 },
- { 0x00000018, 0x000000AA, 0x0 },
- { 0x00006013, 0x000000A4, 0x0 },
- { 0x00007011, 0x0000009D, 0x0 },
- { 0x00000018, 0x000000A0, 0x0 },
- { 0x00006012, 0x000000DF, 0x0 },
- { 0x00000018, 0x0000008A, 0x0 },
-};
-
-/* Skylake/Kabylake U, H and S */
-static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
- { 0x00000018, 0x000000AC, 0x0 },
- { 0x00005012, 0x0000009D, 0x0 },
- { 0x00007011, 0x00000088, 0x0 },
- { 0x00000018, 0x000000A1, 0x0 },
- { 0x00000018, 0x00000098, 0x0 },
- { 0x00004013, 0x00000088, 0x0 },
- { 0x80006012, 0x000000CD, 0x1 },
- { 0x00000018, 0x000000DF, 0x0 },
- { 0x80003015, 0x000000CD, 0x1 }, /* Default */
- { 0x80003015, 0x000000C0, 0x1 },
- { 0x80000018, 0x000000C0, 0x1 },
-};
-
-/* Skylake/Kabylake Y */
-static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
- { 0x00000018, 0x000000A1, 0x0 },
- { 0x00005012, 0x000000DF, 0x0 },
- { 0x80007011, 0x000000CB, 0x3 },
- { 0x00000018, 0x000000A4, 0x0 },
- { 0x00000018, 0x0000009D, 0x0 },
- { 0x00004013, 0x00000080, 0x0 },
- { 0x80006013, 0x000000C0, 0x3 },
- { 0x00000018, 0x0000008A, 0x0 },
- { 0x80003015, 0x000000C0, 0x3 }, /* Default */
- { 0x80003015, 0x000000C0, 0x3 },
- { 0x80000018, 0x000000C0, 0x3 },
-};
-
-struct bxt_ddi_buf_trans {
- u8 margin; /* swing value */
- u8 scale; /* scale value */
- u8 enable; /* scale enable */
- u8 deemphasis;
-};
-
-static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
- /* Idx NT mV diff db */
- { 52, 0x9A, 0, 128, }, /* 0: 400 0 */
- { 78, 0x9A, 0, 85, }, /* 1: 400 3.5 */
- { 104, 0x9A, 0, 64, }, /* 2: 400 6 */
- { 154, 0x9A, 0, 43, }, /* 3: 400 9.5 */
- { 77, 0x9A, 0, 128, }, /* 4: 600 0 */
- { 116, 0x9A, 0, 85, }, /* 5: 600 3.5 */
- { 154, 0x9A, 0, 64, }, /* 6: 600 6 */
- { 102, 0x9A, 0, 128, }, /* 7: 800 0 */
- { 154, 0x9A, 0, 85, }, /* 8: 800 3.5 */
- { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */
-};
-
-static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
- /* Idx NT mV diff db */
- { 26, 0, 0, 128, }, /* 0: 200 0 */
- { 38, 0, 0, 112, }, /* 1: 200 1.5 */
- { 48, 0, 0, 96, }, /* 2: 200 4 */
- { 54, 0, 0, 69, }, /* 3: 200 6 */
- { 32, 0, 0, 128, }, /* 4: 250 0 */
- { 48, 0, 0, 104, }, /* 5: 250 1.5 */
- { 54, 0, 0, 85, }, /* 6: 250 4 */
- { 43, 0, 0, 128, }, /* 7: 300 0 */
- { 54, 0, 0, 101, }, /* 8: 300 1.5 */
- { 48, 0, 0, 128, }, /* 9: 300 0 */
-};
-
-/* BSpec has 2 recommended values - entries 0 and 8.
- * Using the entry with higher vswing.
- */
-static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
- /* Idx NT mV diff db */
- { 52, 0x9A, 0, 128, }, /* 0: 400 0 */
- { 52, 0x9A, 0, 85, }, /* 1: 400 3.5 */
- { 52, 0x9A, 0, 64, }, /* 2: 400 6 */
- { 42, 0x9A, 0, 43, }, /* 3: 400 9.5 */
- { 77, 0x9A, 0, 128, }, /* 4: 600 0 */
- { 77, 0x9A, 0, 85, }, /* 5: 600 3.5 */
- { 77, 0x9A, 0, 64, }, /* 6: 600 6 */
- { 102, 0x9A, 0, 128, }, /* 7: 800 0 */
- { 102, 0x9A, 0, 85, }, /* 8: 800 3.5 */
- { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */
-};
-
-struct cnl_ddi_buf_trans {
- u8 dw2_swing_sel;
- u8 dw7_n_scalar;
- u8 dw4_cursor_coeff;
- u8 dw4_post_cursor_2;
- u8 dw4_post_cursor_1;
-};
-
-/* Voltage Swing Programming for VccIO 0.85V for DP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_85V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
- { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
- { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
- { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
- { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */
- { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
- { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 0.85V for HDMI */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_85V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
- { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
- { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
- { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 */
- { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
-};
-
-/* Voltage Swing Programming for VccIO 0.85V for eDP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_85V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x66, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
- { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
- { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
- { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
- { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
- { 0xA, 0x66, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
- { 0xB, 0x70, 0x3C, 0x00, 0x03 }, /* 460 600 2.3 */
- { 0xC, 0x75, 0x3C, 0x00, 0x03 }, /* 537 700 2.3 */
- { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 0.95V for DP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_95V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
- { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
- { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
- { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
- { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */
- { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
- { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 0.95V for HDMI */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_95V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x5C, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
- { 0xB, 0x69, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
- { 0x5, 0x76, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
- { 0xA, 0x5E, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
- { 0xB, 0x69, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
- { 0xB, 0x79, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
- { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */
- { 0x5, 0x76, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */
- { 0x6, 0x7D, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */
- { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 0.95V for eDP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_95V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x61, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
- { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
- { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
- { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
- { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
- { 0xA, 0x61, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
- { 0xB, 0x68, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */
- { 0xC, 0x6E, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */
- { 0x4, 0x7F, 0x3A, 0x00, 0x05 }, /* 460 600 2.3 */
- { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 1.05V for DP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_1_05V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
- { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
- { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
- { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 400 1050 8.4 */
- { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
- { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
- { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 550 1050 5.6 */
- { 0x5, 0x76, 0x3E, 0x00, 0x01 }, /* 850 900 0.5 */
- { 0x6, 0x7F, 0x36, 0x00, 0x09 }, /* 750 1050 2.9 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 1.05V for HDMI */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_1_05V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
- { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
- { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
- { 0xA, 0x5B, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
- { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
- { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
- { 0x6, 0x7C, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */
- { 0x5, 0x70, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */
- { 0x6, 0x7C, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */
- { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 1.05V for eDP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x5E, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
- { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
- { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
- { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
- { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
- { 0xA, 0x5E, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
- { 0xB, 0x64, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */
- { 0xE, 0x6A, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */
- { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
-};
-
-/* icl_combo_phy_ddi_translations */
-static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
- { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
- { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = {
- /* NT mV Trans mV db */
- { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
- { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
- { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
- { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
- { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
- { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
- { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
- { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
- { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
- { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
- { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
- { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
- { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
- { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */
- { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
-};
-
-static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
- { 0xC, 0x64, 0x34, 0x00, 0x0B }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 350 900 8.2 */
- { 0xA, 0x46, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x64, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */
- { 0xC, 0x61, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x38, 0x00, 0x07 }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr[] = {
- /* NT mV Trans mV db */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
- { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
- { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
- { 0xA, 0x35, 0x36, 0x00, 0x09 }, /* 200 350 4.9 */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
- { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
- { 0xA, 0x35, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
- { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
- { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[] = {
- /* NT mV Trans mV db */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 250 1.9 */
- { 0x1, 0x7F, 0x3D, 0x00, 0x02 }, /* 200 300 3.5 */
- { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 200 350 4.9 */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
- { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 300 1.6 */
- { 0xA, 0x35, 0x3A, 0x00, 0x05 }, /* 250 350 2.9 */
- { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
- { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
- { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
- { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
- { 0xC, 0x60, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
- { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
- { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
- { 0xC, 0x58, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-struct icl_mg_phy_ddi_buf_trans {
- u32 cri_txdeemph_override_11_6;
- u32 cri_txdeemph_override_5_0;
- u32 cri_txdeemph_override_17_12;
-};
-
-static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr[] = {
- /* Voltage swing pre-emphasis */
- { 0x18, 0x00, 0x00 }, /* 0 0 */
- { 0x1D, 0x00, 0x05 }, /* 0 1 */
- { 0x24, 0x00, 0x0C }, /* 0 2 */
- { 0x2B, 0x00, 0x14 }, /* 0 3 */
- { 0x21, 0x00, 0x00 }, /* 1 0 */
- { 0x2B, 0x00, 0x08 }, /* 1 1 */
- { 0x30, 0x00, 0x0F }, /* 1 2 */
- { 0x31, 0x00, 0x03 }, /* 2 0 */
- { 0x34, 0x00, 0x0B }, /* 2 1 */
- { 0x3F, 0x00, 0x00 }, /* 3 0 */
-};
-
-static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3[] = {
- /* Voltage swing pre-emphasis */
- { 0x18, 0x00, 0x00 }, /* 0 0 */
- { 0x1D, 0x00, 0x05 }, /* 0 1 */
- { 0x24, 0x00, 0x0C }, /* 0 2 */
- { 0x2B, 0x00, 0x14 }, /* 0 3 */
- { 0x26, 0x00, 0x00 }, /* 1 0 */
- { 0x2C, 0x00, 0x07 }, /* 1 1 */
- { 0x33, 0x00, 0x0C }, /* 1 2 */
- { 0x2E, 0x00, 0x00 }, /* 2 0 */
- { 0x36, 0x00, 0x09 }, /* 2 1 */
- { 0x3F, 0x00, 0x00 }, /* 3 0 */
-};
-
-static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi[] = {
- /* HDMI Preset VS Pre-emph */
- { 0x1A, 0x0, 0x0 }, /* 1 400mV 0dB */
- { 0x20, 0x0, 0x0 }, /* 2 500mV 0dB */
- { 0x29, 0x0, 0x0 }, /* 3 650mV 0dB */
- { 0x32, 0x0, 0x0 }, /* 4 800mV 0dB */
- { 0x3F, 0x0, 0x0 }, /* 5 1000mV 0dB */
- { 0x3A, 0x0, 0x5 }, /* 6 Full -1.5 dB */
- { 0x39, 0x0, 0x6 }, /* 7 Full -1.8 dB */
- { 0x38, 0x0, 0x7 }, /* 8 Full -2 dB */
- { 0x37, 0x0, 0x8 }, /* 9 Full -2.5 dB */
- { 0x36, 0x0, 0x9 }, /* 10 Full -3 dB */
-};
-
-struct tgl_dkl_phy_ddi_buf_trans {
- u32 dkl_vswing_control;
- u32 dkl_preshoot_control;
- u32 dkl_de_emphasis_control;
-};
-
-static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = {
- /* VS pre-emp Non-trans mV Pre-emph dB */
- { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
- { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
- { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
- { 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */
- { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
- { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
- { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
- { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
- { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
- { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
-};
-
-static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans_hbr2[] = {
- /* VS pre-emp Non-trans mV Pre-emph dB */
- { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
- { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
- { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
- { 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */
- { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
- { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
- { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
- { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
- { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
- { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
-};
-
-static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = {
- /* HDMI Preset VS Pre-emph */
- { 0x7, 0x0, 0x0 }, /* 1 400mV 0dB */
- { 0x6, 0x0, 0x0 }, /* 2 500mV 0dB */
- { 0x4, 0x0, 0x0 }, /* 3 650mV 0dB */
- { 0x2, 0x0, 0x0 }, /* 4 800mV 0dB */
- { 0x0, 0x0, 0x0 }, /* 5 1000mV 0dB */
- { 0x0, 0x0, 0x5 }, /* 6 Full -1.5 dB */
- { 0x0, 0x0, 0x6 }, /* 7 Full -1.8 dB */
- { 0x0, 0x0, 0x7 }, /* 8 Full -2 dB */
- { 0x0, 0x0, 0x8 }, /* 9 Full -2.5 dB */
- { 0x0, 0x0, 0xA }, /* 10 Full -3 dB */
-};
-
-static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
- { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7D, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
- { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
- { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
- { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x63, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x61, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
- { 0x6, 0x7B, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
- { 0xC, 0x60, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
- { 0xC, 0x7F, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
- { 0xC, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x6F, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */
- { 0x6, 0x60, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-/*
- * Cloned the HOBL entry to comply with the voltage and pre-emphasis entries
- * that DisplayPort specification requires
- */
-static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = {
- /* VS pre-emp */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 0 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 1 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 2 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 3 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 0 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 1 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 2 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 0 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */
-};
-
-static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x2F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
- { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7D, 0x2A, 0x00, 0x15 }, /* 350 900 8.2 */
- { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x6E, 0x3E, 0x00, 0x01 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x50, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
- { 0xC, 0x61, 0x33, 0x00, 0x0C }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2E, 0x00, 0x11 }, /* 350 900 8.2 */
- { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x5F, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x5F, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
- { 0x6, 0x7E, 0x36, 0x00, 0x09 }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table)
-{
- return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
-}
-
-static const struct ddi_buf_trans *
-bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (dev_priv->vbt.edp.low_vswing) {
- *n_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
- return bdw_ddi_translations_edp;
- } else {
- *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
- return bdw_ddi_translations_dp;
- }
-}
-
-static const struct ddi_buf_trans *
-skl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_SKL_ULX(dev_priv)) {
- *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
- return skl_y_ddi_translations_dp;
- } else if (IS_SKL_ULT(dev_priv)) {
- *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
- return skl_u_ddi_translations_dp;
- } else {
- *n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
- return skl_ddi_translations_dp;
- }
-}
-
-static const struct ddi_buf_trans *
-kbl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_KBL_ULX(dev_priv) ||
- IS_CFL_ULX(dev_priv) ||
- IS_CML_ULX(dev_priv)) {
- *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
- return kbl_y_ddi_translations_dp;
- } else if (IS_KBL_ULT(dev_priv) ||
- IS_CFL_ULT(dev_priv) ||
- IS_CML_ULT(dev_priv)) {
- *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
- return kbl_u_ddi_translations_dp;
- } else {
- *n_entries = ARRAY_SIZE(kbl_ddi_translations_dp);
- return kbl_ddi_translations_dp;
- }
-}
-
-static const struct ddi_buf_trans *
-skl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (dev_priv->vbt.edp.low_vswing) {
- if (IS_SKL_ULX(dev_priv) ||
- IS_KBL_ULX(dev_priv) ||
- IS_CFL_ULX(dev_priv) ||
- IS_CML_ULX(dev_priv)) {
- *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
- return skl_y_ddi_translations_edp;
- } else if (IS_SKL_ULT(dev_priv) ||
- IS_KBL_ULT(dev_priv) ||
- IS_CFL_ULT(dev_priv) ||
- IS_CML_ULT(dev_priv)) {
- *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
- return skl_u_ddi_translations_edp;
- } else {
- *n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
- return skl_ddi_translations_edp;
- }
- }
-
- if (IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv))
- return kbl_get_buf_trans_dp(encoder, n_entries);
- else
- return skl_get_buf_trans_dp(encoder, n_entries);
-}
-
-static const struct ddi_buf_trans *
-skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
-{
- if (IS_SKL_ULX(dev_priv) ||
- IS_KBL_ULX(dev_priv) ||
- IS_CFL_ULX(dev_priv) ||
- IS_CML_ULX(dev_priv)) {
- *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
- return skl_y_ddi_translations_hdmi;
- } else {
- *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
- return skl_ddi_translations_hdmi;
- }
-}
-
-static int skl_buf_trans_num_entries(enum port port, int n_entries)
-{
- /* Only DDIA and DDIE can select the 10th register with DP */
- if (port == PORT_A || port == PORT_E)
- return min(n_entries, 10);
- else
- return min(n_entries, 9);
-}
-
-static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv)) {
- const struct ddi_buf_trans *ddi_translations =
- kbl_get_buf_trans_dp(encoder, n_entries);
- *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
- return ddi_translations;
- } else if (IS_SKYLAKE(dev_priv)) {
- const struct ddi_buf_trans *ddi_translations =
- skl_get_buf_trans_dp(encoder, n_entries);
- *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
- return ddi_translations;
- } else if (IS_BROADWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
- return bdw_ddi_translations_dp;
- } else if (IS_HASWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
- return hsw_ddi_translations_dp;
- }
-
- *n_entries = 0;
- return NULL;
-}
-
-static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_GEN9_BC(dev_priv)) {
- const struct ddi_buf_trans *ddi_translations =
- skl_get_buf_trans_edp(encoder, n_entries);
- *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
- return ddi_translations;
- } else if (IS_BROADWELL(dev_priv)) {
- return bdw_get_buf_trans_edp(encoder, n_entries);
- } else if (IS_HASWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
- return hsw_ddi_translations_dp;
- }
-
- *n_entries = 0;
- return NULL;
-}
-
-static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
- int *n_entries)
-{
- if (IS_BROADWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
- return bdw_ddi_translations_fdi;
- } else if (IS_HASWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
- return hsw_ddi_translations_fdi;
- }
-
- *n_entries = 0;
- return NULL;
-}
-
-static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder,
- int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_GEN9_BC(dev_priv)) {
- return skl_get_buf_trans_hdmi(dev_priv, n_entries);
- } else if (IS_BROADWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- return bdw_ddi_translations_hdmi;
- } else if (IS_HASWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
- return hsw_ddi_translations_hdmi;
- }
-
- *n_entries = 0;
- return NULL;
-}
-
-static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
-{
- *n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
- return bxt_ddi_translations_dp;
-}
-
-static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (dev_priv->vbt.edp.low_vswing) {
- *n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
- return bxt_ddi_translations_edp;
- }
-
- return bxt_get_buf_trans_dp(encoder, n_entries);
-}
-
-static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
-{
- *n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
- return bxt_ddi_translations_hdmi;
-}
-
-static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
-
- if (voltage == VOLTAGE_INFO_0_85V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
- return cnl_ddi_translations_hdmi_0_85V;
- } else if (voltage == VOLTAGE_INFO_0_95V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V);
- return cnl_ddi_translations_hdmi_0_95V;
- } else if (voltage == VOLTAGE_INFO_1_05V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V);
- return cnl_ddi_translations_hdmi_1_05V;
- } else {
- *n_entries = 1; /* shut up gcc */
- MISSING_CASE(voltage);
- }
- return NULL;
-}
-
-static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
-
- if (voltage == VOLTAGE_INFO_0_85V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
- return cnl_ddi_translations_dp_0_85V;
- } else if (voltage == VOLTAGE_INFO_0_95V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V);
- return cnl_ddi_translations_dp_0_95V;
- } else if (voltage == VOLTAGE_INFO_1_05V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V);
- return cnl_ddi_translations_dp_1_05V;
- } else {
- *n_entries = 1; /* shut up gcc */
- MISSING_CASE(voltage);
- }
- return NULL;
-}
-
-static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
-
- if (dev_priv->vbt.edp.low_vswing) {
- if (voltage == VOLTAGE_INFO_0_85V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
- return cnl_ddi_translations_edp_0_85V;
- } else if (voltage == VOLTAGE_INFO_0_95V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
- return cnl_ddi_translations_edp_0_95V;
- } else if (voltage == VOLTAGE_INFO_1_05V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V);
- return cnl_ddi_translations_edp_1_05V;
- } else {
- *n_entries = 1; /* shut up gcc */
- MISSING_CASE(voltage);
- }
- return NULL;
- } else {
- return cnl_get_buf_trans_dp(encoder, n_entries);
- }
-}
-
-static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
- return icl_combo_phy_ddi_translations_hdmi;
-}
-
-static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
- return icl_combo_phy_ddi_translations_dp_hbr2;
-}
-
-static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (crtc_state->port_clock > 540000) {
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
- return icl_combo_phy_ddi_translations_edp_hbr3;
- } else if (dev_priv->vbt.edp.low_vswing) {
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
- return icl_combo_phy_ddi_translations_edp_hbr2;
- } else if (IS_DG1(dev_priv) && crtc_state->port_clock > 270000) {
- *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_hbr2_hbr3);
- return dg1_combo_phy_ddi_translations_dp_hbr2_hbr3;
- } else if (IS_DG1(dev_priv)) {
- *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_rbr_hbr);
- return dg1_combo_phy_ddi_translations_dp_rbr_hbr;
- }
-
- return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return icl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- return icl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
- else
- return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct icl_mg_phy_ddi_buf_trans *
-icl_get_mg_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi);
- return icl_mg_phy_ddi_translations_hdmi;
-}
-
-static const struct icl_mg_phy_ddi_buf_trans *
-icl_get_mg_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (crtc_state->port_clock > 270000) {
- *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hbr2_hbr3);
- return icl_mg_phy_ddi_translations_hbr2_hbr3;
- } else {
- *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr);
- return icl_mg_phy_ddi_translations_rbr_hbr;
- }
-}
-
-static const struct icl_mg_phy_ddi_buf_trans *
-icl_get_mg_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return icl_get_mg_buf_trans_hdmi(encoder, crtc_state, n_entries);
- else
- return icl_get_mg_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct cnl_ddi_buf_trans *
-ehl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
- return icl_combo_phy_ddi_translations_hdmi;
-}
-
-static const struct cnl_ddi_buf_trans *
-ehl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp);
- return ehl_combo_phy_ddi_translations_dp;
-}
-
-static const struct cnl_ddi_buf_trans *
-ehl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (dev_priv->vbt.edp.low_vswing) {
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
- return icl_combo_phy_ddi_translations_edp_hbr2;
- }
-
- return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct cnl_ddi_buf_trans *
-ehl_get_combo_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return ehl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
- else
- return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct cnl_ddi_buf_trans *
-jsl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
- return icl_combo_phy_ddi_translations_hdmi;
-}
-
-static const struct cnl_ddi_buf_trans *
-jsl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
- return icl_combo_phy_ddi_translations_dp_hbr2;
-}
-
-static const struct cnl_ddi_buf_trans *
-jsl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (dev_priv->vbt.edp.low_vswing) {
- if (crtc_state->port_clock > 270000) {
- *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr2);
- return jsl_combo_phy_ddi_translations_edp_hbr2;
- } else {
- *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr);
- return jsl_combo_phy_ddi_translations_edp_hbr;
- }
- }
-
- return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct cnl_ddi_buf_trans *
-jsl_get_combo_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return jsl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
- else
- return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct cnl_ddi_buf_trans *
-tgl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
- return icl_combo_phy_ddi_translations_hdmi;
-}
-
-static const struct cnl_ddi_buf_trans *
-tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (crtc_state->port_clock > 270000) {
- if (IS_ROCKETLAKE(dev_priv)) {
- *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr2_hbr3);
- return rkl_combo_phy_ddi_translations_dp_hbr2_hbr3;
- } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
- *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2);
- return tgl_uy_combo_phy_ddi_translations_dp_hbr2;
- } else {
- *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
- return tgl_combo_phy_ddi_translations_dp_hbr2;
- }
- } else {
- if (IS_ROCKETLAKE(dev_priv)) {
- *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr);
- return rkl_combo_phy_ddi_translations_dp_hbr;
- } else {
- *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
- return tgl_combo_phy_ddi_translations_dp_hbr;
- }
- }
-}
-
-static const struct cnl_ddi_buf_trans *
-tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- if (crtc_state->port_clock > 540000) {
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
- return icl_combo_phy_ddi_translations_edp_hbr3;
- } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
- *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl);
- return tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
- } else if (dev_priv->vbt.edp.low_vswing) {
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
- return icl_combo_phy_ddi_translations_edp_hbr2;
- }
-
- return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct cnl_ddi_buf_trans *
-tgl_get_combo_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- return tgl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
- else
- return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct tgl_dkl_phy_ddi_buf_trans *
-tgl_get_dkl_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
- return tgl_dkl_phy_hdmi_ddi_trans;
-}
-
-static const struct tgl_dkl_phy_ddi_buf_trans *
-tgl_get_dkl_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (crtc_state->port_clock > 270000) {
- *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans_hbr2);
- return tgl_dkl_phy_dp_ddi_trans_hbr2;
- } else {
- *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
- return tgl_dkl_phy_dp_ddi_trans;
- }
-}
-
-static const struct tgl_dkl_phy_ddi_buf_trans *
-tgl_get_dkl_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, n_entries);
- else
- return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int n_entries, level, default_entry;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (INTEL_GEN(dev_priv) >= 12) {
- if (intel_phy_is_combo(dev_priv, phy))
- tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries);
- else
- tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, &n_entries);
- default_entry = n_entries - 1;
- } else if (INTEL_GEN(dev_priv) == 11) {
- if (intel_phy_is_combo(dev_priv, phy))
- icl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries);
- else
- icl_get_mg_buf_trans_hdmi(encoder, crtc_state, &n_entries);
- default_entry = n_entries - 1;
- } else if (IS_CANNONLAKE(dev_priv)) {
- cnl_get_buf_trans_hdmi(encoder, &n_entries);
- default_entry = n_entries - 1;
- } else if (IS_GEN9_LP(dev_priv)) {
- bxt_get_buf_trans_hdmi(encoder, &n_entries);
- default_entry = n_entries - 1;
- } else if (IS_GEN9_BC(dev_priv)) {
- intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
- default_entry = 8;
- } else if (IS_BROADWELL(dev_priv)) {
- intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
- default_entry = 7;
- } else if (IS_HASWELL(dev_priv)) {
- intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
- default_entry = 6;
- } else {
- drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n");
- return 0;
- }
-
- if (drm_WARN_ON_ONCE(&dev_priv->drm, n_entries == 0))
+ n_entries = intel_ddi_hdmi_num_entries(encoder, crtc_state, &default_entry);
+ if (n_entries == 0)
return 0;
-
level = intel_bios_hdmi_level_shift(encoder);
if (level < 0)
level = default_entry;
@@ -1470,8 +93,8 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
* values in advance. This function programs the correct values for
* DP/eDP/FDI use cases.
*/
-static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
@@ -1533,8 +156,8 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
ddi_translations[level].trans2);
}
-static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
- enum port port)
+void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+ enum port port)
{
if (IS_BROXTON(dev_priv)) {
udelay(16);
@@ -1622,141 +245,6 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
}
}
-/* Starting with Haswell, different DDI ports can work in FDI mode for
- * connection to the PCH-located connectors. For this, it is necessary to train
- * both the DDI port and PCH receiver for the desired DDI buffer settings.
- *
- * The recommended port to work in FDI mode is DDI E, which we use here. Also,
- * please note that when FDI mode is active on DDI E, it shares 2 lines with
- * DDI A (which is used for eDP)
- */
-
-void hsw_fdi_link_train(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 temp, i, rx_ctl_val, ddi_pll_sel;
-
- intel_prepare_dp_ddi_buffers(encoder, crtc_state);
-
- /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
- * mode set "sequence for CRT port" document:
- * - TP1 to TP2 time with the default value
- * - FDI delay to 90h
- *
- * WaFDIAutoLinkSetTimingOverrride:hsw
- */
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
- FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
- /* Enable the PCH Receiver FDI PLL */
- rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
- FDI_RX_PLL_ENABLE |
- FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
- udelay(220);
-
- /* Switch from Rawclk to PCDclk */
- rx_ctl_val |= FDI_PCDCLK;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
-
- /* Configure Port Clock Select */
- ddi_pll_sel = hsw_pll_to_ddi_pll_sel(crtc_state->shared_dpll);
- intel_de_write(dev_priv, PORT_CLK_SEL(PORT_E), ddi_pll_sel);
- drm_WARN_ON(&dev_priv->drm, ddi_pll_sel != PORT_CLK_SEL_SPLL);
-
- /* Start the training iterating through available voltages and emphasis,
- * testing each value twice. */
- for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
- /* Configure DP_TP_CTL with auto-training */
- intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN |
- DP_TP_CTL_ENHANCED_FRAME_ENABLE |
- DP_TP_CTL_LINK_TRAIN_PAT1 |
- DP_TP_CTL_ENABLE);
-
- /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
- * DDI E does not support port reversal, the functionality is
- * achieved on the PCH side in FDI_RX_CTL, so no need to set the
- * port reversal bit */
- intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
- DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
-
- udelay(600);
-
- /* Program PCH FDI Receiver TU */
- intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
-
- /* Enable PCH FDI Receiver with auto-training */
- rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
-
- /* Wait for FDI receiver lane calibration */
- udelay(30);
-
- /* Unset FDI_RX_MISC pwrdn lanes */
- temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
- intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
-
- /* Wait for FDI auto training time */
- udelay(5);
-
- temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
- if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
- drm_dbg_kms(&dev_priv->drm,
- "FDI link training done on step %d\n", i);
- break;
- }
-
- /*
- * Leave things enabled even if we failed to train FDI.
- * Results in less fireworks from the state checker.
- */
- if (i == ARRAY_SIZE(hsw_ddi_translations_fdi) * 2 - 1) {
- drm_err(&dev_priv->drm, "FDI link training failed!\n");
- break;
- }
-
- rx_ctl_val &= ~FDI_RX_ENABLE;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
-
- temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
- temp &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
-
- /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
- temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
- temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
- intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
- intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
-
- intel_wait_ddi_buf_idle(dev_priv, PORT_E);
-
- /* Reset FDI_RX_MISC pwrdn lanes */
- temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
- intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
- }
-
- /* Enable normal pixel sending for FDI */
- intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN |
- DP_TP_CTL_LINK_TRAIN_NORMAL |
- DP_TP_CTL_ENHANCED_FRAME_ENABLE |
- DP_TP_CTL_ENABLE);
-}
-
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -1815,25 +303,6 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
}
-static void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
- if (intel_phy_is_tc(dev_priv, phy) &&
- intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll) ==
- DPLL_ID_ICL_TBTPLL)
- pipe_config->port_clock = icl_calc_tbt_pll_link(dev_priv,
- encoder->port);
- else
- pipe_config->port_clock =
- intel_dpll_get_freq(dev_priv, pipe_config->shared_dpll,
- &pipe_config->dpll_hw_state);
-
- ddi_dotclock_get(pipe_config);
-}
-
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -2480,13 +949,7 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
enum port port = encoder->port;
int n_entries;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- ddi_translations = bxt_get_buf_trans_hdmi(encoder, &n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- ddi_translations = bxt_get_buf_trans_edp(encoder, &n_entries);
- else
- ddi_translations = bxt_get_buf_trans_dp(encoder, &n_entries);
-
+ ddi_translations = bxt_get_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
@@ -2523,15 +986,9 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
else
icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
} else if (IS_CANNONLAKE(dev_priv)) {
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- cnl_get_buf_trans_edp(encoder, &n_entries);
- else
- cnl_get_buf_trans_dp(encoder, &n_entries);
+ cnl_get_buf_trans(encoder, crtc_state, &n_entries);
} else if (IS_GEN9_LP(dev_priv)) {
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- bxt_get_buf_trans_edp(encoder, &n_entries);
- else
- bxt_get_buf_trans_dp(encoder, &n_entries);
+ bxt_get_buf_trans(encoder, crtc_state, &n_entries);
} else {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
intel_ddi_get_buf_trans_edp(encoder, &n_entries);
@@ -2569,12 +1026,7 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
int n_entries, ln;
u32 val;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- ddi_translations = cnl_get_buf_trans_hdmi(encoder, &n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- ddi_translations = cnl_get_buf_trans_edp(encoder, &n_entries);
- else
- ddi_translations = cnl_get_buf_trans_dp(encoder, &n_entries);
+ ddi_translations = cnl_get_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
@@ -3110,196 +1562,580 @@ hsw_set_signal_levels(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
}
-static u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
- enum phy phy)
+static void _cnl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 clk_sel_mask, u32 clk_sel, u32 clk_off)
{
- if (IS_ROCKETLAKE(dev_priv)) {
- return RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- } else if (intel_phy_is_combo(dev_priv, phy)) {
- return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- } else if (intel_phy_is_tc(dev_priv, phy)) {
- enum tc_port tc_port = intel_port_to_tc(dev_priv,
- (enum port)phy);
+ mutex_lock(&i915->dpll.lock);
- return ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port);
- }
+ intel_de_rmw(i915, reg, clk_sel_mask, clk_sel);
- return 0;
+ /*
+ * "This step and the step before must be
+ * done with separate register writes."
+ */
+ intel_de_rmw(i915, reg, clk_off, 0);
+
+ mutex_unlock(&i915->dpll.lock);
+}
+
+static void _cnl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 clk_off)
+{
+ mutex_lock(&i915->dpll.lock);
+
+ intel_de_rmw(i915, reg, 0, clk_off);
+
+ mutex_unlock(&i915->dpll.lock);
}
-static void dg1_map_plls_to_ports(struct intel_encoder *encoder,
+static bool _cnl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 clk_off)
+{
+ return !(intel_de_read(i915, reg) & clk_off);
+}
+
+static struct intel_shared_dpll *
+_cnl_ddi_get_pll(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 clk_sel_mask, u32 clk_sel_shift)
+{
+ enum intel_dpll_id id;
+
+ id = (intel_de_read(i915, reg) & clk_sel_mask) >> clk_sel_shift;
+
+ return intel_get_shared_dpll_by_id(i915, id);
+}
+
+static void adls_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- u32 val;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ _cnl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
+ pll->info->id << ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static void adls_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ _cnl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _cnl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _cnl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy),
+ ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
+ ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy));
+}
+
+static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ _cnl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
+ RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ _cnl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _cnl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _cnl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
+ RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
+}
+
+static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
/*
* If we fail this, something went very wrong: first 2 PLLs should be
* used by first 2 phys and last 2 PLLs by last phys
*/
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(&i915->drm,
(pll->info->id < DPLL_ID_DG1_DPLL2 && phy >= PHY_C) ||
(pll->info->id >= DPLL_ID_DG1_DPLL2 && phy < PHY_C)))
return;
- mutex_lock(&dev_priv->dpll.lock);
+ _cnl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
- val = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy));
- drm_WARN_ON(&dev_priv->drm,
- (val & DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)) == 0);
+static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
- val &= ~DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
- val |= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
- intel_de_write(dev_priv, DG1_DPCLKA_CFGCR0(phy), val);
- intel_de_posting_read(dev_priv, DG1_DPCLKA_CFGCR0(phy));
+ _cnl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
- val &= ~DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- intel_de_write(dev_priv, DG1_DPCLKA_CFGCR0(phy), val);
+static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
- mutex_unlock(&dev_priv->dpll.lock);
+ return _cnl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static void icl_map_plls_to_ports(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- u32 val;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
- mutex_lock(&dev_priv->dpll.lock);
+ return _cnl_ddi_get_pll(i915, DG1_DPCLKA_CFGCR0(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
+}
- val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
- drm_WARN_ON(&dev_priv->drm,
- (val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
+static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
- if (intel_phy_is_combo(dev_priv, phy)) {
- u32 mask, sel;
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
- if (IS_ROCKETLAKE(dev_priv)) {
- mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
- sel = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
- } else {
- mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
- sel = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
- }
+ _cnl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
- /*
- * Even though this register references DDIs, note that we
- * want to pass the PHY rather than the port (DDI). For
- * ICL, port=phy in all cases so it doesn't matter, but for
- * EHL the bspec notes the following:
- *
- * "DDID clock tied to DDIA clock, so DPCLKA_CFGCR0 DDIA
- * Clock Select chooses the PLL for both DDIA and DDID and
- * drives port A in all cases."
- */
- val &= ~mask;
- val |= sel;
- intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
- intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
- }
+static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
- val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
+ _cnl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
- mutex_unlock(&dev_priv->dpll.lock);
+static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _cnl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static void dg1_unmap_plls_to_ports(struct intel_encoder *encoder)
+struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
- mutex_lock(&dev_priv->dpll.lock);
+ return _cnl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
+ ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
+}
- intel_de_rmw(dev_priv, DG1_DPCLKA_CFGCR0(phy), 0,
- DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+static void jsl_ddi_tc_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port = encoder->port;
- mutex_unlock(&dev_priv->dpll.lock);
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ /*
+ * "For DDIC and DDID, program DDI_CLK_SEL to map the MG clock to the port.
+ * MG does not exist, but the programming is required to ungate DDIC and DDID."
+ */
+ intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
+
+ icl_ddi_combo_enable_clock(encoder, crtc_state);
}
-static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
+static void jsl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- u32 val;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ icl_ddi_combo_disable_clock(encoder);
+
+ intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+}
+
+static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 tmp;
- mutex_lock(&dev_priv->dpll.lock);
+ tmp = intel_de_read(i915, DDI_CLK_SEL(port));
- val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
- val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
+ if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
+ return false;
- mutex_unlock(&dev_priv->dpll.lock);
+ return icl_ddi_combo_is_clock_enabled(encoder);
}
-static void dg1_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
- u32 port_mask, bool ddi_clk_needed)
+static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- enum port port;
- u32 val;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum port port = encoder->port;
- for_each_port_masked(port, port_mask) {
- enum phy phy = intel_port_to_phy(dev_priv, port);
- bool ddi_clk_off;
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
- val = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy));
- ddi_clk_off = val & DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ intel_de_write(i915, DDI_CLK_SEL(port),
+ icl_pll_to_ddi_clk_sel(encoder, crtc_state));
- if (ddi_clk_needed == !ddi_clk_off)
- continue;
+ mutex_lock(&i915->dpll.lock);
- /*
- * Punt on the case now where clock is gated, but it would
- * be needed by the port. Something else is really broken then.
- */
- if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed))
- continue;
+ intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0);
- drm_notice(&dev_priv->drm,
- "PHY %c is disabled with an ungated DDI clock, gate it\n",
- phy_name(phy));
- val |= DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- intel_de_write(dev_priv, DG1_DPCLKA_CFGCR0(phy), val);
+ mutex_unlock(&i915->dpll.lock);
+}
+
+static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum port port = encoder->port;
+
+ mutex_lock(&i915->dpll.lock);
+
+ intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ 0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
+
+ mutex_unlock(&i915->dpll.lock);
+
+ intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+}
+
+static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum port port = encoder->port;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+
+ if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
+ return false;
+
+ tmp = intel_de_read(i915, ICL_DPCLKA_CFGCR0);
+
+ return !(tmp & ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
+}
+
+static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum port port = encoder->port;
+ enum intel_dpll_id id;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+
+ switch (tmp & DDI_CLK_SEL_MASK) {
+ case DDI_CLK_SEL_TBT_162:
+ case DDI_CLK_SEL_TBT_270:
+ case DDI_CLK_SEL_TBT_540:
+ case DDI_CLK_SEL_TBT_810:
+ id = DPLL_ID_ICL_TBTPLL;
+ break;
+ case DDI_CLK_SEL_MG:
+ id = icl_tc_port_to_pll_id(tc_port);
+ break;
+ default:
+ MISSING_CASE(tmp);
+ fallthrough;
+ case DDI_CLK_SEL_NONE:
+ return NULL;
}
+
+ return intel_get_shared_dpll_by_id(i915, id);
}
-static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
- u32 port_mask, bool ddi_clk_needed)
+static void cnl_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- enum port port;
- u32 val;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port = encoder->port;
- val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
- for_each_port_masked(port, port_mask) {
- enum phy phy = intel_port_to_phy(dev_priv, port);
- bool ddi_clk_off = val & icl_dpclka_cfgcr0_clk_off(dev_priv,
- phy);
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
- if (ddi_clk_needed == !ddi_clk_off)
- continue;
+ _cnl_ddi_enable_clock(i915, DPCLKA_CFGCR0,
+ DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port),
+ DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port),
+ DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+}
- /*
- * Punt on the case now where clock is gated, but it would
- * be needed by the port. Something else is really broken then.
- */
- if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed))
- continue;
+static void cnl_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ _cnl_ddi_disable_clock(i915, DPCLKA_CFGCR0,
+ DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+}
+
+static bool cnl_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ return _cnl_ddi_is_clock_enabled(i915, DPCLKA_CFGCR0,
+ DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+}
+
+static struct intel_shared_dpll *cnl_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
- drm_notice(&dev_priv->drm,
- "PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
- phy_name(phy));
- val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
+ return _cnl_ddi_get_pll(i915, DPCLKA_CFGCR0,
+ DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port),
+ DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port));
+}
+
+static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum intel_dpll_id id;
+
+ switch (encoder->port) {
+ case PORT_A:
+ id = DPLL_ID_SKL_DPLL0;
+ break;
+ case PORT_B:
+ id = DPLL_ID_SKL_DPLL1;
+ break;
+ case PORT_C:
+ id = DPLL_ID_SKL_DPLL2;
+ break;
+ default:
+ MISSING_CASE(encoder->port);
+ return NULL;
}
+
+ return intel_get_shared_dpll_by_id(i915, id);
}
-void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
+static void skl_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port = encoder->port;
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ mutex_lock(&i915->dpll.lock);
+
+ intel_de_rmw(i915, DPLL_CTRL2,
+ DPLL_CTRL2_DDI_CLK_OFF(port) |
+ DPLL_CTRL2_DDI_CLK_SEL_MASK(port),
+ DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
+
+ mutex_unlock(&i915->dpll.lock);
+}
+
+static void skl_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ mutex_lock(&i915->dpll.lock);
+
+ intel_de_rmw(i915, DPLL_CTRL2,
+ 0, DPLL_CTRL2_DDI_CLK_OFF(port));
+
+ mutex_unlock(&i915->dpll.lock);
+}
+
+static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ /*
+ * FIXME Not sure if the override affects both
+ * the PLL selection and the CLK_OFF bit.
+ */
+ return !(intel_de_read(i915, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
+}
+
+static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum intel_dpll_id id;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, DPLL_CTRL2);
+
+ /*
+ * FIXME Not sure if the override affects both
+ * the PLL selection and the CLK_OFF bit.
+ */
+ if ((tmp & DPLL_CTRL2_DDI_SEL_OVERRIDE(port)) == 0)
+ return NULL;
+
+ id = (tmp & DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
+ DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
+
+ return intel_get_shared_dpll_by_id(i915, id);
+}
+
+void hsw_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port = encoder->port;
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ intel_de_write(i915, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
+}
+
+void hsw_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ intel_de_write(i915, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
+bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ return intel_de_read(i915, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
+}
+
+static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum intel_dpll_id id;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, PORT_CLK_SEL(port));
+
+ switch (tmp & PORT_CLK_SEL_MASK) {
+ case PORT_CLK_SEL_WRPLL1:
+ id = DPLL_ID_WRPLL1;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ id = DPLL_ID_WRPLL2;
+ break;
+ case PORT_CLK_SEL_SPLL:
+ id = DPLL_ID_SPLL;
+ break;
+ case PORT_CLK_SEL_LCPLL_810:
+ id = DPLL_ID_LCPLL_810;
+ break;
+ case PORT_CLK_SEL_LCPLL_1350:
+ id = DPLL_ID_LCPLL_1350;
+ break;
+ case PORT_CLK_SEL_LCPLL_2700:
+ id = DPLL_ID_LCPLL_2700;
+ break;
+ default:
+ MISSING_CASE(tmp);
+ fallthrough;
+ case PORT_CLK_SEL_NONE:
+ return NULL;
+ }
+
+ return intel_get_shared_dpll_by_id(i915, id);
+}
+
+void intel_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (encoder->enable_clock)
+ encoder->enable_clock(encoder, crtc_state);
+}
+
+static void intel_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ if (encoder->disable_clock)
+ encoder->disable_clock(encoder);
+}
+
+void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u32 port_mask;
bool ddi_clk_needed;
@@ -3319,7 +2155,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* In the unlikely case that BIOS enables DP in MST mode, just
* warn since our MST HW readout is incomplete.
*/
- if (drm_WARN_ON(&dev_priv->drm, is_mst))
+ if (drm_WARN_ON(&i915->drm, is_mst))
return;
}
@@ -3334,11 +2170,11 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* Sanity check that we haven't incorrectly registered another
* encoder using any of the ports of this DSI encoder.
*/
- for_each_intel_encoder(&dev_priv->drm, other_encoder) {
+ for_each_intel_encoder(&i915->drm, other_encoder) {
if (other_encoder == encoder)
continue;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(&i915->drm,
port_mask & BIT(other_encoder->port)))
return;
}
@@ -3349,92 +2185,15 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
ddi_clk_needed = false;
}
- if (IS_DG1(dev_priv))
- dg1_sanitize_port_clk_off(dev_priv, port_mask, ddi_clk_needed);
- else
- icl_sanitize_port_clk_off(dev_priv, port_mask, ddi_clk_needed);
-}
-
-static void intel_ddi_clk_select(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
- u32 val;
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
-
- if (drm_WARN_ON(&dev_priv->drm, !pll))
+ if (ddi_clk_needed || !encoder->disable_clock ||
+ !encoder->is_clock_enabled(encoder))
return;
- mutex_lock(&dev_priv->dpll.lock);
-
- if (INTEL_GEN(dev_priv) >= 11) {
- if (!intel_phy_is_combo(dev_priv, phy))
- intel_de_write(dev_priv, DDI_CLK_SEL(port),
- icl_pll_to_ddi_clk_sel(encoder, crtc_state));
- else if (IS_JSL_EHL(dev_priv) && port >= PORT_C)
- /*
- * MG does not exist but the programming is required
- * to ungate DDIC and DDID
- */
- intel_de_write(dev_priv, DDI_CLK_SEL(port),
- DDI_CLK_SEL_MG);
- } else if (IS_CANNONLAKE(dev_priv)) {
- /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
- val = intel_de_read(dev_priv, DPCLKA_CFGCR0);
- val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
- intel_de_write(dev_priv, DPCLKA_CFGCR0, val);
-
- /*
- * Configure DPCLKA_CFGCR0 to turn on the clock for the DDI.
- * This step and the step before must be done with separate
- * register writes.
- */
- val = intel_de_read(dev_priv, DPCLKA_CFGCR0);
- val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
- intel_de_write(dev_priv, DPCLKA_CFGCR0, val);
- } else if (IS_GEN9_BC(dev_priv)) {
- /* DDI -> PLL mapping */
- val = intel_de_read(dev_priv, DPLL_CTRL2);
-
- val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
- DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
- val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
- DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
-
- intel_de_write(dev_priv, DPLL_CTRL2, val);
+ drm_notice(&i915->drm,
+ "[ENCODER:%d:%s] is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ encoder->base.base.id, encoder->base.name);
- } else if (INTEL_GEN(dev_priv) < 9) {
- intel_de_write(dev_priv, PORT_CLK_SEL(port),
- hsw_pll_to_ddi_pll_sel(pll));
- }
-
- mutex_unlock(&dev_priv->dpll.lock);
-}
-
-static void intel_ddi_clk_disable(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
-
- if (INTEL_GEN(dev_priv) >= 11) {
- if (!intel_phy_is_combo(dev_priv, phy) ||
- (IS_JSL_EHL(dev_priv) && port >= PORT_C))
- intel_de_write(dev_priv, DDI_CLK_SEL(port),
- DDI_CLK_SEL_NONE);
- } else if (IS_CANNONLAKE(dev_priv)) {
- intel_de_write(dev_priv, DPCLKA_CFGCR0,
- intel_de_read(dev_priv, DPCLKA_CFGCR0) | DPCLKA_CFGCR0_DDI_CLK_OFF(port));
- } else if (IS_GEN9_BC(dev_priv)) {
- intel_de_write(dev_priv, DPLL_CTRL2,
- intel_de_read(dev_priv, DPLL_CTRL2) | DPLL_CTRL2_DDI_CLK_OFF(port));
- } else if (INTEL_GEN(dev_priv) < 9) {
- intel_de_write(dev_priv, PORT_CLK_SEL(port),
- PORT_CLK_SEL_NONE);
- }
+ encoder->disable_clock(encoder);
}
static void
@@ -3443,10 +2202,12 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
{
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+ enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
u32 ln0, ln1, pin_assignment;
u8 width;
- if (dig_port->tc_mode == TC_PORT_TBT_ALT)
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ dig_port->tc_mode == TC_PORT_TBT_ALT)
return;
if (INTEL_GEN(dev_priv) >= 12) {
@@ -3638,6 +2399,73 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
}
}
+static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 dss1;
+
+ if (!HAS_MSO(i915))
+ return;
+
+ dss1 = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe));
+
+ pipe_config->splitter.enable = dss1 & SPLITTER_ENABLE;
+ if (!pipe_config->splitter.enable)
+ return;
+
+ /* Splitter enable is supported for pipe A only. */
+ if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) {
+ pipe_config->splitter.enable = false;
+ return;
+ }
+
+ switch (dss1 & SPLITTER_CONFIGURATION_MASK) {
+ default:
+ drm_WARN(&i915->drm, true,
+ "Invalid splitter configuration, dss1=0x%08x\n", dss1);
+ fallthrough;
+ case SPLITTER_CONFIGURATION_2_SEGMENT:
+ pipe_config->splitter.link_count = 2;
+ break;
+ case SPLITTER_CONFIGURATION_4_SEGMENT:
+ pipe_config->splitter.link_count = 4;
+ break;
+ }
+
+ pipe_config->splitter.pixel_overlap = REG_FIELD_GET(OVERLAP_PIXELS_MASK, dss1);
+}
+
+static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 dss1 = 0;
+
+ if (!HAS_MSO(i915))
+ return;
+
+ if (crtc_state->splitter.enable) {
+ /* Splitter enable is supported for pipe A only. */
+ if (drm_WARN_ON(&i915->drm, pipe != PIPE_A))
+ return;
+
+ dss1 |= SPLITTER_ENABLE;
+ dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap);
+ if (crtc_state->splitter.link_count == 2)
+ dss1 |= SPLITTER_CONFIGURATION_2_SEGMENT;
+ else
+ dss1 |= SPLITTER_CONFIGURATION_4_SEGMENT;
+ }
+
+ intel_de_rmw(i915, ICL_PIPE_DSS_CTL1(pipe),
+ SPLITTER_ENABLE | SPLITTER_CONFIGURATION_MASK |
+ OVERLAP_PIXELS_MASK, dss1);
+}
+
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -3679,7 +2507,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* hsw_crtc_enable()->intel_enable_shared_dpll(). We need only
* configure the PLL to port mapping here.
*/
- intel_ddi_clk_select(encoder, crtc_state);
+ intel_ddi_enable_clock(encoder, crtc_state);
/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
if (!intel_phy_is_tc(dev_priv, phy) ||
@@ -3732,6 +2560,11 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_power_up_lanes(encoder, crtc_state);
/*
+ * 7.g Program CoG/MSO configuration bits in DSS_CTL1 if selected.
+ */
+ intel_ddi_mso_configure(crtc_state);
+
+ /*
* 7.g Configure and enable DDI_BUF_CTL
* 7.h Wait for DDI_BUF_CTL DDI Idle Status = 0b (Not Idle), timeout
* after 500 us.
@@ -3800,7 +2633,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_pps_on(intel_dp);
- intel_ddi_clk_select(encoder, crtc_state);
+ intel_ddi_enable_clock(encoder, crtc_state);
if (!intel_phy_is_tc(dev_priv, phy) ||
dig_port->tc_mode != TC_PORT_TBT_ALT) {
@@ -3873,10 +2706,9 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int level = intel_ddi_hdmi_level(encoder, crtc_state);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
- intel_ddi_clk_select(encoder, crtc_state);
+ intel_ddi_enable_clock(encoder, crtc_state);
drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
@@ -3884,20 +2716,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
icl_program_mg_dp_mode(dig_port, crtc_state);
- if (INTEL_GEN(dev_priv) >= 12)
- tgl_ddi_vswing_sequence(encoder, crtc_state, level);
- else if (INTEL_GEN(dev_priv) == 11)
- icl_ddi_vswing_sequence(encoder, crtc_state, level);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_ddi_vswing_sequence(encoder, crtc_state, level);
- else if (IS_GEN9_LP(dev_priv))
- bxt_ddi_vswing_sequence(encoder, crtc_state, level);
- else
- intel_prepare_hdmi_ddi_buffers(encoder, level);
-
- if (IS_GEN9_BC(dev_priv))
- skl_ddi_set_iboost(encoder, crtc_state, level);
-
intel_ddi_enable_pipe_clock(encoder, crtc_state);
dig_port->set_infoframes(encoder,
@@ -3929,11 +2747,6 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
- if (IS_DG1(dev_priv))
- dg1_map_plls_to_ports(encoder, crtc_state);
- else if (INTEL_GEN(dev_priv) >= 11)
- icl_map_plls_to_ports(encoder, crtc_state);
-
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -4042,7 +2855,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
dig_port->ddi_io_power_domain,
fetch_and_zero(&dig_port->ddi_io_wakeref));
- intel_ddi_clk_disable(encoder);
+ intel_ddi_disable_clock(encoder);
}
static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
@@ -4065,7 +2878,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
dig_port->ddi_io_power_domain,
fetch_and_zero(&dig_port->ddi_io_wakeref));
- intel_ddi_clk_disable(encoder);
+ intel_ddi_disable_clock(encoder);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
@@ -4106,7 +2919,6 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, slave);
intel_crtc_vblank_off(old_slave_crtc_state);
- trace_intel_pipe_disable(slave);
intel_dsc_disable(old_slave_crtc_state);
skl_scaler_disable(old_slave_crtc_state);
@@ -4132,11 +2944,6 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
intel_ddi_post_disable_dp(state, encoder, old_crtc_state,
old_conn_state);
- if (IS_DG1(dev_priv))
- dg1_unmap_plls_to_ports(encoder);
- else if (INTEL_GEN(dev_priv) >= 11)
- icl_unmap_plls_to_ports(encoder);
-
if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port)
intel_display_power_put(dev_priv,
intel_ddi_main_link_aux_domain(dig_port),
@@ -4165,7 +2972,7 @@ void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
intel_disable_ddi_buf(encoder, old_crtc_state);
- intel_ddi_clk_disable(encoder);
+ intel_ddi_disable_clock(encoder);
val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
@@ -4273,6 +3080,7 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
+ int level = intel_ddi_hdmi_level(encoder, crtc_state);
enum port port = encoder->port;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
@@ -4282,6 +3090,20 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
"[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_ddi_vswing_sequence(encoder, crtc_state, level);
+ else if (INTEL_GEN(dev_priv) == 11)
+ icl_ddi_vswing_sequence(encoder, crtc_state, level);
+ else if (IS_CANNONLAKE(dev_priv))
+ cnl_ddi_vswing_sequence(encoder, crtc_state, level);
+ else if (IS_GEN9_LP(dev_priv))
+ bxt_ddi_vswing_sequence(encoder, crtc_state, level);
+ else
+ intel_prepare_hdmi_ddi_buffers(encoder, level);
+
+ if (IS_GEN9_BC(dev_priv))
+ skl_ddi_set_iboost(encoder, crtc_state, level);
+
/* Display WA #1143: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv)) {
/*
@@ -4805,8 +3627,8 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
}
}
-void intel_ddi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+static void intel_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
@@ -4828,6 +3650,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_read_func_ctl(encoder, pipe_config);
}
+ intel_ddi_mso_get_config(encoder, pipe_config);
+
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
@@ -4853,7 +3677,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
}
if (!pipe_config->bigjoiner_slave)
- intel_ddi_clock_get(encoder, pipe_config);
+ ddi_dotclock_get(pipe_config);
if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
@@ -4883,6 +3707,114 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
}
+void intel_ddi_get_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct intel_shared_dpll *pll)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+ struct icl_port_dpll *port_dpll = &crtc_state->icl_port_dplls[port_dpll_id];
+ bool pll_active;
+
+ port_dpll->pll = pll;
+ pll_active = intel_dpll_get_hw_state(i915, pll, &port_dpll->hw_state);
+ drm_WARN_ON(&i915->drm, !pll_active);
+
+ icl_set_active_port_dpll(crtc_state, port_dpll_id);
+
+ crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll,
+ &crtc_state->dpll_hw_state);
+}
+
+static void adls_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, adls_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void rkl_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, rkl_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void dg1_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, dg1_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void icl_ddi_combo_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, icl_ddi_combo_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void icl_ddi_tc_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum icl_port_dpll_id port_dpll_id;
+ struct icl_port_dpll *port_dpll;
+ struct intel_shared_dpll *pll;
+ bool pll_active;
+
+ pll = icl_ddi_tc_get_pll(encoder);
+
+ if (intel_get_shared_dpll_id(i915, pll) == DPLL_ID_ICL_TBTPLL)
+ port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+ else
+ port_dpll_id = ICL_PORT_DPLL_MG_PHY;
+
+ port_dpll = &crtc_state->icl_port_dplls[port_dpll_id];
+
+ port_dpll->pll = pll;
+ pll_active = intel_dpll_get_hw_state(i915, pll, &port_dpll->hw_state);
+ drm_WARN_ON(&i915->drm, !pll_active);
+
+ icl_set_active_port_dpll(crtc_state, port_dpll_id);
+
+ if (intel_get_shared_dpll_id(i915, crtc_state->shared_dpll) == DPLL_ID_ICL_TBTPLL)
+ crtc_state->port_clock = icl_calc_tbt_pll_link(i915, encoder->port);
+ else
+ crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll,
+ &crtc_state->dpll_hw_state);
+
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void cnl_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, cnl_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void bxt_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, bxt_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void skl_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, skl_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+void hsw_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, hsw_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
static void intel_ddi_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -5460,6 +4392,24 @@ static enum hpd_pin cnl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
+static enum hpd_pin skl_hpd_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ if (HAS_PCH_TGP(dev_priv))
+ return icl_hpd_pin(dev_priv, port);
+
+ return HPD_PORT_A + port - PORT_A;
+}
+
+static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port)
+{
+ if (INTEL_GEN(i915) >= 12)
+ return port >= PORT_TC1;
+ else if (INTEL_GEN(i915) >= 11)
+ return port >= PORT_C;
+ else
+ return false;
+}
+
#define port_tc_name(port) ((port) - PORT_TC1 + '1')
#define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1')
@@ -5551,7 +4501,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->post_disable = intel_ddi_post_disable;
encoder->update_pipe = intel_ddi_update_pipe;
encoder->get_hw_state = intel_ddi_get_hw_state;
- encoder->get_config = intel_ddi_get_config;
encoder->sync_state = intel_ddi_sync_state;
encoder->initial_fastset_check = intel_ddi_initial_fastset_check;
encoder->suspend = intel_dp_encoder_suspend;
@@ -5564,6 +4513,65 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ encoder->enable_clock = adls_ddi_enable_clock;
+ encoder->disable_clock = adls_ddi_disable_clock;
+ encoder->is_clock_enabled = adls_ddi_is_clock_enabled;
+ encoder->get_config = adls_ddi_get_config;
+ } else if (IS_ROCKETLAKE(dev_priv)) {
+ encoder->enable_clock = rkl_ddi_enable_clock;
+ encoder->disable_clock = rkl_ddi_disable_clock;
+ encoder->is_clock_enabled = rkl_ddi_is_clock_enabled;
+ encoder->get_config = rkl_ddi_get_config;
+ } else if (IS_DG1(dev_priv)) {
+ encoder->enable_clock = dg1_ddi_enable_clock;
+ encoder->disable_clock = dg1_ddi_disable_clock;
+ encoder->is_clock_enabled = dg1_ddi_is_clock_enabled;
+ encoder->get_config = dg1_ddi_get_config;
+ } else if (IS_JSL_EHL(dev_priv)) {
+ if (intel_ddi_is_tc(dev_priv, port)) {
+ encoder->enable_clock = jsl_ddi_tc_enable_clock;
+ encoder->disable_clock = jsl_ddi_tc_disable_clock;
+ encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled;
+ encoder->get_config = icl_ddi_combo_get_config;
+ } else {
+ encoder->enable_clock = icl_ddi_combo_enable_clock;
+ encoder->disable_clock = icl_ddi_combo_disable_clock;
+ encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
+ encoder->get_config = icl_ddi_combo_get_config;
+ }
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ if (intel_ddi_is_tc(dev_priv, port)) {
+ encoder->enable_clock = icl_ddi_tc_enable_clock;
+ encoder->disable_clock = icl_ddi_tc_disable_clock;
+ encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled;
+ encoder->get_config = icl_ddi_tc_get_config;
+ } else {
+ encoder->enable_clock = icl_ddi_combo_enable_clock;
+ encoder->disable_clock = icl_ddi_combo_disable_clock;
+ encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
+ encoder->get_config = icl_ddi_combo_get_config;
+ }
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ encoder->enable_clock = cnl_ddi_enable_clock;
+ encoder->disable_clock = cnl_ddi_disable_clock;
+ encoder->is_clock_enabled = cnl_ddi_is_clock_enabled;
+ encoder->get_config = cnl_ddi_get_config;
+ } else if (IS_GEN9_LP(dev_priv)) {
+ /* BXT/GLK have fixed PLL->port mapping */
+ encoder->get_config = bxt_ddi_get_config;
+ } else if (IS_GEN9_BC(dev_priv)) {
+ encoder->enable_clock = skl_ddi_enable_clock;
+ encoder->disable_clock = skl_ddi_disable_clock;
+ encoder->is_clock_enabled = skl_ddi_is_clock_enabled;
+ encoder->get_config = skl_ddi_get_config;
+ } else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ encoder->enable_clock = hsw_ddi_enable_clock;
+ encoder->disable_clock = hsw_ddi_disable_clock;
+ encoder->is_clock_enabled = hsw_ddi_is_clock_enabled;
+ encoder->get_config = hsw_ddi_get_config;
+ }
+
if (IS_DG1(dev_priv))
encoder->hpd_pin = dg1_hpd_pin(dev_priv, port);
else if (IS_ROCKETLAKE(dev_priv))
@@ -5576,6 +4584,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
else if (IS_GEN(dev_priv, 10))
encoder->hpd_pin = cnl_hpd_pin(dev_priv, port);
+ else if (IS_GEN(dev_priv, 9))
+ encoder->hpd_pin = skl_hpd_pin(dev_priv, port);
else
encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
@@ -5588,6 +4598,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_de_read(dev_priv, DDI_BUF_CTL(port))
& (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
+ if (intel_bios_is_lane_reversal_needed(dev_priv, port))
+ dig_port->saved_port_bits |= DDI_BUF_PORT_REVERSAL;
+
dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
@@ -5612,6 +4625,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
goto err;
dig_port->hpd_pulse = intel_dp_hpd_pulse;
+
+ /* Splitter enable for eDP MSO is supported for pipe A only. */
+ if (dig_port->dp.mso_link_count)
+ encoder->pipe_mask = BIT(PIPE_A);
}
/* In theory we don't need the encoder->type check, but leave it just in
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index a4dd815c0000..59c6b01d4199 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -17,6 +17,7 @@ struct intel_crtc_state;
struct intel_dp;
struct intel_dpll_hw_state;
struct intel_encoder;
+struct intel_shared_dpll;
enum transcoder;
i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
@@ -27,8 +28,22 @@ void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
struct intel_encoder *intel_encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
-void hsw_fdi_link_train(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_get_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct intel_shared_dpll *pll);
+void hsw_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void hsw_ddi_disable_clock(struct intel_encoder *encoder);
+bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder);
+void hsw_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
+struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
+void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+ enum port port);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
@@ -40,8 +55,6 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-void intel_ddi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state);
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
@@ -53,6 +66,6 @@ u32 ddi_signal_levels(struct intel_dp *intel_dp,
int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
enum transcoder cpu_transcoder,
bool enable, u32 hdcp_mask);
-void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
+void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
new file mode 100644
index 000000000000..f65c2b35461c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -0,0 +1,1394 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
+#include "intel_display_types.h"
+
+/* HDMI/DVI modes ignore everything but the last 2 items. So we share
+ * them for both DP and FDI transports, allowing those ports to
+ * automatically adapt to HDMI connections as well
+ */
+static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
+ { 0x00FFFFFF, 0x0006000E, 0x0 },
+ { 0x00D75FFF, 0x0005000A, 0x0 },
+ { 0x00C30FFF, 0x00040006, 0x0 },
+ { 0x80AAAFFF, 0x000B0000, 0x0 },
+ { 0x00FFFFFF, 0x0005000A, 0x0 },
+ { 0x00D75FFF, 0x000C0004, 0x0 },
+ { 0x80C30FFF, 0x000B0000, 0x0 },
+ { 0x00FFFFFF, 0x00040006, 0x0 },
+ { 0x80D75FFF, 0x000B0000, 0x0 },
+};
+
+static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
+ { 0x00FFFFFF, 0x0007000E, 0x0 },
+ { 0x00D75FFF, 0x000F000A, 0x0 },
+ { 0x00C30FFF, 0x00060006, 0x0 },
+ { 0x00AAAFFF, 0x001E0000, 0x0 },
+ { 0x00FFFFFF, 0x000F000A, 0x0 },
+ { 0x00D75FFF, 0x00160004, 0x0 },
+ { 0x00C30FFF, 0x001E0000, 0x0 },
+ { 0x00FFFFFF, 0x00060006, 0x0 },
+ { 0x00D75FFF, 0x001E0000, 0x0 },
+};
+
+static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
+ /* Idx NT mV d T mV d db */
+ { 0x00FFFFFF, 0x0006000E, 0x0 },/* 0: 400 400 0 */
+ { 0x00E79FFF, 0x000E000C, 0x0 },/* 1: 400 500 2 */
+ { 0x00D75FFF, 0x0005000A, 0x0 },/* 2: 400 600 3.5 */
+ { 0x00FFFFFF, 0x0005000A, 0x0 },/* 3: 600 600 0 */
+ { 0x00E79FFF, 0x001D0007, 0x0 },/* 4: 600 750 2 */
+ { 0x00D75FFF, 0x000C0004, 0x0 },/* 5: 600 900 3.5 */
+ { 0x00FFFFFF, 0x00040006, 0x0 },/* 6: 800 800 0 */
+ { 0x80E79FFF, 0x00030002, 0x0 },/* 7: 800 1000 2 */
+ { 0x00FFFFFF, 0x00140005, 0x0 },/* 8: 850 850 0 */
+ { 0x00FFFFFF, 0x000C0004, 0x0 },/* 9: 900 900 0 */
+ { 0x00FFFFFF, 0x001C0003, 0x0 },/* 10: 950 950 0 */
+ { 0x80FFFFFF, 0x00030002, 0x0 },/* 11: 1000 1000 0 */
+};
+
+static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
+ { 0x00FFFFFF, 0x00000012, 0x0 },
+ { 0x00EBAFFF, 0x00020011, 0x0 },
+ { 0x00C71FFF, 0x0006000F, 0x0 },
+ { 0x00AAAFFF, 0x000E000A, 0x0 },
+ { 0x00FFFFFF, 0x00020011, 0x0 },
+ { 0x00DB6FFF, 0x0005000F, 0x0 },
+ { 0x00BEEFFF, 0x000A000C, 0x0 },
+ { 0x00FFFFFF, 0x0005000F, 0x0 },
+ { 0x00DB6FFF, 0x000A000C, 0x0 },
+};
+
+static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
+ { 0x00FFFFFF, 0x0007000E, 0x0 },
+ { 0x00D75FFF, 0x000E000A, 0x0 },
+ { 0x00BEFFFF, 0x00140006, 0x0 },
+ { 0x80B2CFFF, 0x001B0002, 0x0 },
+ { 0x00FFFFFF, 0x000E000A, 0x0 },
+ { 0x00DB6FFF, 0x00160005, 0x0 },
+ { 0x80C71FFF, 0x001A0002, 0x0 },
+ { 0x00F7DFFF, 0x00180004, 0x0 },
+ { 0x80D75FFF, 0x001B0002, 0x0 },
+};
+
+static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
+ { 0x00FFFFFF, 0x0001000E, 0x0 },
+ { 0x00D75FFF, 0x0004000A, 0x0 },
+ { 0x00C30FFF, 0x00070006, 0x0 },
+ { 0x00AAAFFF, 0x000C0000, 0x0 },
+ { 0x00FFFFFF, 0x0004000A, 0x0 },
+ { 0x00D75FFF, 0x00090004, 0x0 },
+ { 0x00C30FFF, 0x000C0000, 0x0 },
+ { 0x00FFFFFF, 0x00070006, 0x0 },
+ { 0x00D75FFF, 0x000C0000, 0x0 },
+};
+
+static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
+ /* Idx NT mV d T mV df db */
+ { 0x00FFFFFF, 0x0007000E, 0x0 },/* 0: 400 400 0 */
+ { 0x00D75FFF, 0x000E000A, 0x0 },/* 1: 400 600 3.5 */
+ { 0x00BEFFFF, 0x00140006, 0x0 },/* 2: 400 800 6 */
+ { 0x00FFFFFF, 0x0009000D, 0x0 },/* 3: 450 450 0 */
+ { 0x00FFFFFF, 0x000E000A, 0x0 },/* 4: 600 600 0 */
+ { 0x00D7FFFF, 0x00140006, 0x0 },/* 5: 600 800 2.5 */
+ { 0x80CB2FFF, 0x001B0002, 0x0 },/* 6: 600 1000 4.5 */
+ { 0x00FFFFFF, 0x00140006, 0x0 },/* 7: 800 800 0 */
+ { 0x80E79FFF, 0x001B0002, 0x0 },/* 8: 800 1000 2 */
+ { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */
+};
+
+/* Skylake H and S */
+static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
+ { 0x00002016, 0x000000A0, 0x0 },
+ { 0x00005012, 0x0000009B, 0x0 },
+ { 0x00007011, 0x00000088, 0x0 },
+ { 0x80009010, 0x000000C0, 0x1 },
+ { 0x00002016, 0x0000009B, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000C0, 0x1 },
+ { 0x00002016, 0x000000DF, 0x0 },
+ { 0x80005012, 0x000000C0, 0x1 },
+};
+
+/* Skylake U */
+static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
+ { 0x0000201B, 0x000000A2, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x1 },
+ { 0x80009010, 0x000000C0, 0x1 },
+ { 0x0000201B, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x1 },
+ { 0x80007011, 0x000000C0, 0x1 },
+ { 0x00002016, 0x00000088, 0x0 },
+ { 0x80005012, 0x000000C0, 0x1 },
+};
+
+/* Skylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
+ { 0x00000018, 0x000000A2, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
+ { 0x80009010, 0x000000C0, 0x3 },
+ { 0x00000018, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
+ { 0x00000018, 0x00000088, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+};
+
+/* Kabylake H and S */
+static const struct ddi_buf_trans kbl_ddi_translations_dp[] = {
+ { 0x00002016, 0x000000A0, 0x0 },
+ { 0x00005012, 0x0000009B, 0x0 },
+ { 0x00007011, 0x00000088, 0x0 },
+ { 0x80009010, 0x000000C0, 0x1 },
+ { 0x00002016, 0x0000009B, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000C0, 0x1 },
+ { 0x00002016, 0x00000097, 0x0 },
+ { 0x80005012, 0x000000C0, 0x1 },
+};
+
+/* Kabylake U */
+static const struct ddi_buf_trans kbl_u_ddi_translations_dp[] = {
+ { 0x0000201B, 0x000000A1, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
+ { 0x80009010, 0x000000C0, 0x3 },
+ { 0x0000201B, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
+ { 0x00002016, 0x0000004F, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+};
+
+/* Kabylake Y */
+static const struct ddi_buf_trans kbl_y_ddi_translations_dp[] = {
+ { 0x00001017, 0x000000A1, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
+ { 0x8000800F, 0x000000C0, 0x3 },
+ { 0x00001017, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
+ { 0x00001017, 0x0000004C, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+};
+
+/*
+ * Skylake/Kabylake H and S
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
+ { 0x00000018, 0x000000A8, 0x0 },
+ { 0x00004013, 0x000000A9, 0x0 },
+ { 0x00007011, 0x000000A2, 0x0 },
+ { 0x00009010, 0x0000009C, 0x0 },
+ { 0x00000018, 0x000000A9, 0x0 },
+ { 0x00006013, 0x000000A2, 0x0 },
+ { 0x00007011, 0x000000A6, 0x0 },
+ { 0x00000018, 0x000000AB, 0x0 },
+ { 0x00007013, 0x0000009F, 0x0 },
+ { 0x00000018, 0x000000DF, 0x0 },
+};
+
+/*
+ * Skylake/Kabylake U
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
+ { 0x00000018, 0x000000A8, 0x0 },
+ { 0x00004013, 0x000000A9, 0x0 },
+ { 0x00007011, 0x000000A2, 0x0 },
+ { 0x00009010, 0x0000009C, 0x0 },
+ { 0x00000018, 0x000000A9, 0x0 },
+ { 0x00006013, 0x000000A2, 0x0 },
+ { 0x00007011, 0x000000A6, 0x0 },
+ { 0x00002016, 0x000000AB, 0x0 },
+ { 0x00005013, 0x0000009F, 0x0 },
+ { 0x00000018, 0x000000DF, 0x0 },
+};
+
+/*
+ * Skylake/Kabylake Y
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
+ { 0x00000018, 0x000000A8, 0x0 },
+ { 0x00004013, 0x000000AB, 0x0 },
+ { 0x00007011, 0x000000A4, 0x0 },
+ { 0x00009010, 0x000000DF, 0x0 },
+ { 0x00000018, 0x000000AA, 0x0 },
+ { 0x00006013, 0x000000A4, 0x0 },
+ { 0x00007011, 0x0000009D, 0x0 },
+ { 0x00000018, 0x000000A0, 0x0 },
+ { 0x00006012, 0x000000DF, 0x0 },
+ { 0x00000018, 0x0000008A, 0x0 },
+};
+
+/* Skylake/Kabylake U, H and S */
+static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
+ { 0x00000018, 0x000000AC, 0x0 },
+ { 0x00005012, 0x0000009D, 0x0 },
+ { 0x00007011, 0x00000088, 0x0 },
+ { 0x00000018, 0x000000A1, 0x0 },
+ { 0x00000018, 0x00000098, 0x0 },
+ { 0x00004013, 0x00000088, 0x0 },
+ { 0x80006012, 0x000000CD, 0x1 },
+ { 0x00000018, 0x000000DF, 0x0 },
+ { 0x80003015, 0x000000CD, 0x1 }, /* Default */
+ { 0x80003015, 0x000000C0, 0x1 },
+ { 0x80000018, 0x000000C0, 0x1 },
+};
+
+/* Skylake/Kabylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
+ { 0x00000018, 0x000000A1, 0x0 },
+ { 0x00005012, 0x000000DF, 0x0 },
+ { 0x80007011, 0x000000CB, 0x3 },
+ { 0x00000018, 0x000000A4, 0x0 },
+ { 0x00000018, 0x0000009D, 0x0 },
+ { 0x00004013, 0x00000080, 0x0 },
+ { 0x80006013, 0x000000C0, 0x3 },
+ { 0x00000018, 0x0000008A, 0x0 },
+ { 0x80003015, 0x000000C0, 0x3 }, /* Default */
+ { 0x80003015, 0x000000C0, 0x3 },
+ { 0x80000018, 0x000000C0, 0x3 },
+};
+
+
+static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
+ /* Idx NT mV diff db */
+ { 52, 0x9A, 0, 128, }, /* 0: 400 0 */
+ { 78, 0x9A, 0, 85, }, /* 1: 400 3.5 */
+ { 104, 0x9A, 0, 64, }, /* 2: 400 6 */
+ { 154, 0x9A, 0, 43, }, /* 3: 400 9.5 */
+ { 77, 0x9A, 0, 128, }, /* 4: 600 0 */
+ { 116, 0x9A, 0, 85, }, /* 5: 600 3.5 */
+ { 154, 0x9A, 0, 64, }, /* 6: 600 6 */
+ { 102, 0x9A, 0, 128, }, /* 7: 800 0 */
+ { 154, 0x9A, 0, 85, }, /* 8: 800 3.5 */
+ { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */
+};
+
+static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
+ /* Idx NT mV diff db */
+ { 26, 0, 0, 128, }, /* 0: 200 0 */
+ { 38, 0, 0, 112, }, /* 1: 200 1.5 */
+ { 48, 0, 0, 96, }, /* 2: 200 4 */
+ { 54, 0, 0, 69, }, /* 3: 200 6 */
+ { 32, 0, 0, 128, }, /* 4: 250 0 */
+ { 48, 0, 0, 104, }, /* 5: 250 1.5 */
+ { 54, 0, 0, 85, }, /* 6: 250 4 */
+ { 43, 0, 0, 128, }, /* 7: 300 0 */
+ { 54, 0, 0, 101, }, /* 8: 300 1.5 */
+ { 48, 0, 0, 128, }, /* 9: 300 0 */
+};
+
+/* BSpec has 2 recommended values - entries 0 and 8.
+ * Using the entry with higher vswing.
+ */
+static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
+ /* Idx NT mV diff db */
+ { 52, 0x9A, 0, 128, }, /* 0: 400 0 */
+ { 52, 0x9A, 0, 85, }, /* 1: 400 3.5 */
+ { 52, 0x9A, 0, 64, }, /* 2: 400 6 */
+ { 42, 0x9A, 0, 43, }, /* 3: 400 9.5 */
+ { 77, 0x9A, 0, 128, }, /* 4: 600 0 */
+ { 77, 0x9A, 0, 85, }, /* 5: 600 3.5 */
+ { 77, 0x9A, 0, 64, }, /* 6: 600 6 */
+ { 102, 0x9A, 0, 128, }, /* 7: 800 0 */
+ { 102, 0x9A, 0, 85, }, /* 8: 800 3.5 */
+ { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */
+};
+
+/* Voltage Swing Programming for VccIO 0.85V for DP */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_85V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
+ { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
+ { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
+ { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */
+ { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
+ { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 0.85V for HDMI */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_85V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
+ { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
+ { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
+ { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 */
+ { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
+};
+
+/* Voltage Swing Programming for VccIO 0.85V for eDP */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_85V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x66, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
+ { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
+ { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
+ { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
+ { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
+ { 0xA, 0x66, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
+ { 0xB, 0x70, 0x3C, 0x00, 0x03 }, /* 460 600 2.3 */
+ { 0xC, 0x75, 0x3C, 0x00, 0x03 }, /* 537 700 2.3 */
+ { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 0.95V for DP */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_95V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
+ { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
+ { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
+ { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */
+ { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
+ { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 0.95V for HDMI */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_95V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x5C, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
+ { 0xB, 0x69, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
+ { 0x5, 0x76, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
+ { 0xA, 0x5E, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
+ { 0xB, 0x69, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
+ { 0xB, 0x79, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
+ { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */
+ { 0x5, 0x76, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */
+ { 0x6, 0x7D, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */
+ { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 0.95V for eDP */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_95V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x61, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
+ { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
+ { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
+ { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
+ { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
+ { 0xA, 0x61, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
+ { 0xB, 0x68, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */
+ { 0xC, 0x6E, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */
+ { 0x4, 0x7F, 0x3A, 0x00, 0x05 }, /* 460 600 2.3 */
+ { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 1.05V for DP */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_1_05V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
+ { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
+ { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 400 1050 8.4 */
+ { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
+ { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 550 1050 5.6 */
+ { 0x5, 0x76, 0x3E, 0x00, 0x01 }, /* 850 900 0.5 */
+ { 0x6, 0x7F, 0x36, 0x00, 0x09 }, /* 750 1050 2.9 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 1.05V for HDMI */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_1_05V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
+ { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
+ { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
+ { 0xA, 0x5B, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
+ { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
+ { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
+ { 0x6, 0x7C, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */
+ { 0x5, 0x70, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */
+ { 0x6, 0x7C, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */
+ { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 1.05V for eDP */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x5E, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
+ { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
+ { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
+ { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
+ { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
+ { 0xA, 0x5E, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
+ { 0xB, 0x64, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */
+ { 0xE, 0x6A, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */
+ { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
+};
+
+/* icl_combo_phy_ddi_translations */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
+ { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
+ { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
+ { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */
+ { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
+ { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
+ { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
+ { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
+ { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
+ { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
+ { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
+ { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
+ { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */
+ { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
+};
+
+static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
+ { 0xC, 0x64, 0x34, 0x00, 0x0B }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 350 900 8.2 */
+ { 0xA, 0x46, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x64, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */
+ { 0xC, 0x61, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x38, 0x00, 0x07 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
+ { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
+ { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
+ { 0xA, 0x35, 0x36, 0x00, 0x09 }, /* 200 350 4.9 */
+ { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
+ { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
+ { 0xA, 0x35, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
+ { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
+ { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
+ { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 250 1.9 */
+ { 0x1, 0x7F, 0x3D, 0x00, 0x02 }, /* 200 300 3.5 */
+ { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 200 350 4.9 */
+ { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
+ { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 300 1.6 */
+ { 0xA, 0x35, 0x3A, 0x00, 0x05 }, /* 250 350 2.9 */
+ { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
+ { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x60, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x58, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr[] = {
+ /* Voltage swing pre-emphasis */
+ { 0x18, 0x00, 0x00 }, /* 0 0 */
+ { 0x1D, 0x00, 0x05 }, /* 0 1 */
+ { 0x24, 0x00, 0x0C }, /* 0 2 */
+ { 0x2B, 0x00, 0x14 }, /* 0 3 */
+ { 0x21, 0x00, 0x00 }, /* 1 0 */
+ { 0x2B, 0x00, 0x08 }, /* 1 1 */
+ { 0x30, 0x00, 0x0F }, /* 1 2 */
+ { 0x31, 0x00, 0x03 }, /* 2 0 */
+ { 0x34, 0x00, 0x0B }, /* 2 1 */
+ { 0x3F, 0x00, 0x00 }, /* 3 0 */
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3[] = {
+ /* Voltage swing pre-emphasis */
+ { 0x18, 0x00, 0x00 }, /* 0 0 */
+ { 0x1D, 0x00, 0x05 }, /* 0 1 */
+ { 0x24, 0x00, 0x0C }, /* 0 2 */
+ { 0x2B, 0x00, 0x14 }, /* 0 3 */
+ { 0x26, 0x00, 0x00 }, /* 1 0 */
+ { 0x2C, 0x00, 0x07 }, /* 1 1 */
+ { 0x33, 0x00, 0x0C }, /* 1 2 */
+ { 0x2E, 0x00, 0x00 }, /* 2 0 */
+ { 0x36, 0x00, 0x09 }, /* 2 1 */
+ { 0x3F, 0x00, 0x00 }, /* 3 0 */
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi[] = {
+ /* HDMI Preset VS Pre-emph */
+ { 0x1A, 0x0, 0x0 }, /* 1 400mV 0dB */
+ { 0x20, 0x0, 0x0 }, /* 2 500mV 0dB */
+ { 0x29, 0x0, 0x0 }, /* 3 650mV 0dB */
+ { 0x32, 0x0, 0x0 }, /* 4 800mV 0dB */
+ { 0x3F, 0x0, 0x0 }, /* 5 1000mV 0dB */
+ { 0x3A, 0x0, 0x5 }, /* 6 Full -1.5 dB */
+ { 0x39, 0x0, 0x6 }, /* 7 Full -1.8 dB */
+ { 0x38, 0x0, 0x7 }, /* 8 Full -2 dB */
+ { 0x37, 0x0, 0x8 }, /* 9 Full -2.5 dB */
+ { 0x36, 0x0, 0x9 }, /* 10 Full -3 dB */
+};
+
+static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
+ { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
+ { 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */
+ { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
+ { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
+ { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
+ { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
+ { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
+ { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
+};
+
+static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans_hbr2[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
+ { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
+ { 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */
+ { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
+ { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
+ { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
+ { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
+ { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
+ { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
+};
+
+static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = {
+ /* HDMI Preset VS Pre-emph */
+ { 0x7, 0x0, 0x0 }, /* 1 400mV 0dB */
+ { 0x6, 0x0, 0x0 }, /* 2 500mV 0dB */
+ { 0x4, 0x0, 0x0 }, /* 3 650mV 0dB */
+ { 0x2, 0x0, 0x0 }, /* 4 800mV 0dB */
+ { 0x0, 0x0, 0x0 }, /* 5 1000mV 0dB */
+ { 0x0, 0x0, 0x5 }, /* 6 Full -1.5 dB */
+ { 0x0, 0x0, 0x6 }, /* 7 Full -1.8 dB */
+ { 0x0, 0x0, 0x7 }, /* 8 Full -2 dB */
+ { 0x0, 0x0, 0x8 }, /* 9 Full -2.5 dB */
+ { 0x0, 0x0, 0xA }, /* 10 Full -3 dB */
+};
+
+static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7D, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x63, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x61, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7B, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
+ { 0xC, 0x60, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
+ { 0xC, 0x7F, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
+ { 0xC, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x6F, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */
+ { 0x6, 0x60, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+/*
+ * Cloned the HOBL entry to comply with the voltage and pre-emphasis entries
+ * that DisplayPort specification requires
+ */
+static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = {
+ /* VS pre-emp */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 0 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 1 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 2 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 3 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 0 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 1 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 2 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 0 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */
+};
+
+static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x2F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7D, 0x2A, 0x00, 0x15 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6E, 0x3E, 0x00, 0x01 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x50, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
+ { 0xC, 0x61, 0x33, 0x00, 0x0C }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2E, 0x00, 0x11 }, /* 350 900 8.2 */
+ { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x5F, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x5F, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7E, 0x36, 0x00, 0x09 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table)
+{
+ return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
+}
+
+static const struct ddi_buf_trans *
+bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+ return bdw_ddi_translations_edp;
+ } else {
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+ return bdw_ddi_translations_dp;
+ }
+}
+
+static const struct ddi_buf_trans *
+skl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_SKL_ULX(dev_priv)) {
+ *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
+ return skl_y_ddi_translations_dp;
+ } else if (IS_SKL_ULT(dev_priv)) {
+ *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+ return skl_u_ddi_translations_dp;
+ } else {
+ *n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ return skl_ddi_translations_dp;
+ }
+}
+
+static const struct ddi_buf_trans *
+kbl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv) ||
+ IS_CML_ULX(dev_priv)) {
+ *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
+ return kbl_y_ddi_translations_dp;
+ } else if (IS_KBL_ULT(dev_priv) ||
+ IS_CFL_ULT(dev_priv) ||
+ IS_CML_ULT(dev_priv)) {
+ *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
+ return kbl_u_ddi_translations_dp;
+ } else {
+ *n_entries = ARRAY_SIZE(kbl_ddi_translations_dp);
+ return kbl_ddi_translations_dp;
+ }
+}
+
+static const struct ddi_buf_trans *
+skl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (dev_priv->vbt.edp.low_vswing) {
+ if (IS_SKL_ULX(dev_priv) ||
+ IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv) ||
+ IS_CML_ULX(dev_priv)) {
+ *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
+ return skl_y_ddi_translations_edp;
+ } else if (IS_SKL_ULT(dev_priv) ||
+ IS_KBL_ULT(dev_priv) ||
+ IS_CFL_ULT(dev_priv) ||
+ IS_CML_ULT(dev_priv)) {
+ *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
+ return skl_u_ddi_translations_edp;
+ } else {
+ *n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
+ return skl_ddi_translations_edp;
+ }
+ }
+
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv))
+ return kbl_get_buf_trans_dp(encoder, n_entries);
+ else
+ return skl_get_buf_trans_dp(encoder, n_entries);
+}
+
+static const struct ddi_buf_trans *
+skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ if (IS_SKL_ULX(dev_priv) ||
+ IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv) ||
+ IS_CML_ULX(dev_priv)) {
+ *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
+ return skl_y_ddi_translations_hdmi;
+ } else {
+ *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+ return skl_ddi_translations_hdmi;
+ }
+}
+
+static int skl_buf_trans_num_entries(enum port port, int n_entries)
+{
+ /* Only DDIA and DDIE can select the 10th register with DP */
+ if (port == PORT_A || port == PORT_E)
+ return min(n_entries, 10);
+ else
+ return min(n_entries, 9);
+}
+
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv)) {
+ const struct ddi_buf_trans *ddi_translations =
+ kbl_get_buf_trans_dp(encoder, n_entries);
+ *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
+ return ddi_translations;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ const struct ddi_buf_trans *ddi_translations =
+ skl_get_buf_trans_dp(encoder, n_entries);
+ *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
+ return ddi_translations;
+ } else if (IS_BROADWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+ return bdw_ddi_translations_dp;
+ } else if (IS_HASWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
+ return hsw_ddi_translations_dp;
+ }
+
+ *n_entries = 0;
+ return NULL;
+}
+
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_GEN9_BC(dev_priv)) {
+ const struct ddi_buf_trans *ddi_translations =
+ skl_get_buf_trans_edp(encoder, n_entries);
+ *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
+ return ddi_translations;
+ } else if (IS_BROADWELL(dev_priv)) {
+ return bdw_get_buf_trans_edp(encoder, n_entries);
+ } else if (IS_HASWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
+ return hsw_ddi_translations_dp;
+ }
+
+ *n_entries = 0;
+ return NULL;
+}
+
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
+ int *n_entries)
+{
+ if (IS_BROADWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
+ return bdw_ddi_translations_fdi;
+ } else if (IS_HASWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
+ return hsw_ddi_translations_fdi;
+ }
+
+ *n_entries = 0;
+ return NULL;
+}
+
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_GEN9_BC(dev_priv)) {
+ return skl_get_buf_trans_hdmi(dev_priv, n_entries);
+ } else if (IS_BROADWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+ return bdw_ddi_translations_hdmi;
+ } else if (IS_HASWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
+ return hsw_ddi_translations_hdmi;
+ }
+
+ *n_entries = 0;
+ return NULL;
+}
+
+static const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
+ return bxt_ddi_translations_dp;
+}
+
+static const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
+ return bxt_ddi_translations_edp;
+ }
+
+ return bxt_get_buf_trans_dp(encoder, n_entries);
+}
+
+static const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
+ return bxt_ddi_translations_hdmi;
+}
+
+const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return bxt_get_buf_trans_hdmi(encoder, n_entries);
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return bxt_get_buf_trans_edp(encoder, n_entries);
+ return bxt_get_buf_trans_dp(encoder, n_entries);
+}
+
+static const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+
+ if (voltage == VOLTAGE_INFO_0_85V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
+ return cnl_ddi_translations_hdmi_0_85V;
+ } else if (voltage == VOLTAGE_INFO_0_95V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V);
+ return cnl_ddi_translations_hdmi_0_95V;
+ } else if (voltage == VOLTAGE_INFO_1_05V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V);
+ return cnl_ddi_translations_hdmi_1_05V;
+ } else {
+ *n_entries = 1; /* shut up gcc */
+ MISSING_CASE(voltage);
+ }
+ return NULL;
+}
+
+static const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+
+ if (voltage == VOLTAGE_INFO_0_85V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
+ return cnl_ddi_translations_dp_0_85V;
+ } else if (voltage == VOLTAGE_INFO_0_95V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V);
+ return cnl_ddi_translations_dp_0_95V;
+ } else if (voltage == VOLTAGE_INFO_1_05V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V);
+ return cnl_ddi_translations_dp_1_05V;
+ } else {
+ *n_entries = 1; /* shut up gcc */
+ MISSING_CASE(voltage);
+ }
+ return NULL;
+}
+
+static const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+
+ if (dev_priv->vbt.edp.low_vswing) {
+ if (voltage == VOLTAGE_INFO_0_85V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
+ return cnl_ddi_translations_edp_0_85V;
+ } else if (voltage == VOLTAGE_INFO_0_95V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
+ return cnl_ddi_translations_edp_0_95V;
+ } else if (voltage == VOLTAGE_INFO_1_05V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V);
+ return cnl_ddi_translations_edp_1_05V;
+ } else {
+ *n_entries = 1; /* shut up gcc */
+ MISSING_CASE(voltage);
+ }
+ return NULL;
+ } else {
+ return cnl_get_buf_trans_dp(encoder, n_entries);
+ }
+}
+
+const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return cnl_get_buf_trans_hdmi(encoder, n_entries);
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return cnl_get_buf_trans_edp(encoder, n_entries);
+ return cnl_get_buf_trans_dp(encoder, n_entries);
+}
+
+static const struct cnl_ddi_buf_trans *
+icl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+ return icl_combo_phy_ddi_translations_hdmi;
+}
+
+static const struct cnl_ddi_buf_trans *
+icl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
+ return icl_combo_phy_ddi_translations_dp_hbr2;
+}
+
+static const struct cnl_ddi_buf_trans *
+icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (crtc_state->port_clock > 540000) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
+ return icl_combo_phy_ddi_translations_edp_hbr3;
+ } else if (dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
+ return icl_combo_phy_ddi_translations_edp_hbr2;
+ } else if (IS_DG1(dev_priv) && crtc_state->port_clock > 270000) {
+ *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_hbr2_hbr3);
+ return dg1_combo_phy_ddi_translations_dp_hbr2_hbr3;
+ } else if (IS_DG1(dev_priv)) {
+ *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_rbr_hbr);
+ return dg1_combo_phy_ddi_translations_dp_rbr_hbr;
+ }
+
+ return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+const struct cnl_ddi_buf_trans *
+icl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return icl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return icl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct icl_mg_phy_ddi_buf_trans *
+icl_get_mg_buf_trans_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi);
+ return icl_mg_phy_ddi_translations_hdmi;
+}
+
+static const struct icl_mg_phy_ddi_buf_trans *
+icl_get_mg_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000) {
+ *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hbr2_hbr3);
+ return icl_mg_phy_ddi_translations_hbr2_hbr3;
+ } else {
+ *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr);
+ return icl_mg_phy_ddi_translations_rbr_hbr;
+ }
+}
+
+const struct icl_mg_phy_ddi_buf_trans *
+icl_get_mg_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return icl_get_mg_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ else
+ return icl_get_mg_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct cnl_ddi_buf_trans *
+ehl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+ return icl_combo_phy_ddi_translations_hdmi;
+}
+
+static const struct cnl_ddi_buf_trans *
+ehl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp);
+ return ehl_combo_phy_ddi_translations_dp;
+}
+
+static const struct cnl_ddi_buf_trans *
+ehl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
+ return icl_combo_phy_ddi_translations_edp_hbr2;
+ }
+
+ return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+const struct cnl_ddi_buf_trans *
+ehl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return ehl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct cnl_ddi_buf_trans *
+jsl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+ return icl_combo_phy_ddi_translations_hdmi;
+}
+
+static const struct cnl_ddi_buf_trans *
+jsl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
+ return icl_combo_phy_ddi_translations_dp_hbr2;
+}
+
+static const struct cnl_ddi_buf_trans *
+jsl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (dev_priv->vbt.edp.low_vswing) {
+ if (crtc_state->port_clock > 270000) {
+ *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr2);
+ return jsl_combo_phy_ddi_translations_edp_hbr2;
+ } else {
+ *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr);
+ return jsl_combo_phy_ddi_translations_edp_hbr;
+ }
+ }
+
+ return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+const struct cnl_ddi_buf_trans *
+jsl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return jsl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct cnl_ddi_buf_trans *
+tgl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+ return icl_combo_phy_ddi_translations_hdmi;
+}
+
+static const struct cnl_ddi_buf_trans *
+tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (crtc_state->port_clock > 270000) {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr2_hbr3);
+ return rkl_combo_phy_ddi_translations_dp_hbr2_hbr3;
+ } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+ *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2);
+ return tgl_uy_combo_phy_ddi_translations_dp_hbr2;
+ } else {
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
+ return tgl_combo_phy_ddi_translations_dp_hbr2;
+ }
+ } else {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr);
+ return rkl_combo_phy_ddi_translations_dp_hbr;
+ } else {
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
+ return tgl_combo_phy_ddi_translations_dp_hbr;
+ }
+ }
+}
+
+static const struct cnl_ddi_buf_trans *
+tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (crtc_state->port_clock > 540000) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
+ return icl_combo_phy_ddi_translations_edp_hbr3;
+ } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl);
+ return tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
+ } else if (dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
+ return icl_combo_phy_ddi_translations_edp_hbr2;
+ }
+
+ return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+const struct cnl_ddi_buf_trans *
+tgl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return tgl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct tgl_dkl_phy_ddi_buf_trans *
+tgl_get_dkl_buf_trans_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
+ return tgl_dkl_phy_hdmi_ddi_trans;
+}
+
+static const struct tgl_dkl_phy_ddi_buf_trans *
+tgl_get_dkl_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000) {
+ *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans_hbr2);
+ return tgl_dkl_phy_dp_ddi_trans_hbr2;
+ } else {
+ *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
+ return tgl_dkl_phy_dp_ddi_trans;
+ }
+}
+
+const struct tgl_dkl_phy_ddi_buf_trans *
+tgl_get_dkl_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ else
+ return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *default_entry)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ int n_entries;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries);
+ else
+ tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, &n_entries);
+ *default_entry = n_entries - 1;
+ } else if (INTEL_GEN(dev_priv) == 11) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries);
+ else
+ icl_get_mg_buf_trans_hdmi(encoder, crtc_state, &n_entries);
+ *default_entry = n_entries - 1;
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ cnl_get_buf_trans_hdmi(encoder, &n_entries);
+ *default_entry = n_entries - 1;
+ } else if (IS_GEN9_LP(dev_priv)) {
+ bxt_get_buf_trans_hdmi(encoder, &n_entries);
+ *default_entry = n_entries - 1;
+ } else if (IS_GEN9_BC(dev_priv)) {
+ intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
+ *default_entry = 8;
+ } else if (IS_BROADWELL(dev_priv)) {
+ intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
+ *default_entry = 7;
+ } else if (IS_HASWELL(dev_priv)) {
+ intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
+ *default_entry = 6;
+ } else {
+ drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n");
+ return 0;
+ }
+
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, n_entries == 0))
+ return 0;
+
+ return n_entries;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
new file mode 100644
index 000000000000..f8f0ef87e977
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_DDI_BUF_TRANS_H_
+#define _INTEL_DDI_BUF_TRANS_H_
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_encoder;
+struct intel_crtc_state;
+
+struct ddi_buf_trans {
+ u32 trans1; /* balance leg enable, de-emph level */
+ u32 trans2; /* vref sel, vswing */
+ u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
+};
+
+struct bxt_ddi_buf_trans {
+ u8 margin; /* swing value */
+ u8 scale; /* scale value */
+ u8 enable; /* scale enable */
+ u8 deemphasis;
+};
+
+struct cnl_ddi_buf_trans {
+ u8 dw2_swing_sel;
+ u8 dw7_n_scalar;
+ u8 dw4_cursor_coeff;
+ u8 dw4_post_cursor_2;
+ u8 dw4_post_cursor_1;
+};
+
+struct icl_mg_phy_ddi_buf_trans {
+ u32 cri_txdeemph_override_11_6;
+ u32 cri_txdeemph_override_5_0;
+ u32 cri_txdeemph_override_17_12;
+};
+
+struct tgl_dkl_phy_ddi_buf_trans {
+ u32 dkl_vswing_control;
+ u32 dkl_preshoot_control;
+ u32 dkl_de_emphasis_control;
+};
+
+bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table);
+
+int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *default_entry);
+
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries);
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
+ int *n_entries);
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder,
+ int *n_entries);
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries);
+
+const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+
+const struct cnl_ddi_buf_trans *
+tgl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+const struct tgl_dkl_phy_ddi_buf_trans *
+tgl_get_dkl_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+const struct cnl_ddi_buf_trans *
+jsl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+const struct cnl_ddi_buf_trans *
+ehl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+const struct cnl_ddi_buf_trans *
+icl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+const struct icl_mg_phy_ddi_buf_trans *
+icl_get_mg_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+
+const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 8d7aaa68c6f6..1a4e255dc43c 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -24,6 +24,7 @@
* Eric Anholt <eric@anholt.net>
*/
+#include <acpi/video.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/intel-iommu.h>
@@ -43,6 +44,7 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
+#include "display/intel_audio.h"
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
#include "display/intel_display_debugfs.h"
@@ -65,7 +67,6 @@
#include "gt/intel_rps.h"
#include "i915_drv.h"
-#include "i915_trace.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
@@ -94,6 +95,8 @@
#include "intel_tc.h"
#include "intel_vga.h"
#include "i9xx_plane.h"
+#include "skl_scaler.h"
+#include "skl_universal_plane.h"
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
@@ -112,11 +115,6 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
-static void vlv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config);
-static void chv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config);
-static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
@@ -569,224 +567,6 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
}
-static void _vlv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
- intel_de_posting_read(dev_priv, DPLL(pipe));
- udelay(150);
-
- if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
-}
-
-static void vlv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
-
- /* PLL is protected by panel, make sure we can write it */
- assert_panel_unlocked(dev_priv, pipe);
-
- if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
- _vlv_enable_pll(crtc, pipe_config);
-
- intel_de_write(dev_priv, DPLL_MD(pipe),
- pipe_config->dpll_hw_state.dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(pipe));
-}
-
-
-static void _chv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
- u32 tmp;
-
- vlv_dpio_get(dev_priv);
-
- /* Enable back the 10bit clock to display controller */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
- tmp |= DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
-
- vlv_dpio_put(dev_priv);
-
- /*
- * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
- */
- udelay(1);
-
- /* Enable PLL */
- intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
-
- /* Check PLL is locked */
- if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
-}
-
-static void chv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
-
- /* PLL is protected by panel, make sure we can write it */
- assert_panel_unlocked(dev_priv, pipe);
-
- if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
- _chv_enable_pll(crtc, pipe_config);
-
- if (pipe != PIPE_A) {
- /*
- * WaPixelRepeatModeFixForC0:chv
- *
- * DPLLCMD is AWOL. Use chicken bits to propagate
- * the value from DPLLBMD to either pipe B or C.
- */
- intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
- intel_de_write(dev_priv, DPLL_MD(PIPE_B),
- pipe_config->dpll_hw_state.dpll_md);
- intel_de_write(dev_priv, CBR4_VLV, 0);
- dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
-
- /*
- * DPLLB VGA mode also seems to cause problems.
- * We should always have it disabled.
- */
- drm_WARN_ON(&dev_priv->drm,
- (intel_de_read(dev_priv, DPLL(PIPE_B)) &
- DPLL_VGA_MODE_DIS) == 0);
- } else {
- intel_de_write(dev_priv, DPLL_MD(pipe),
- pipe_config->dpll_hw_state.dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(pipe));
- }
-}
-
-static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
-{
- if (IS_I830(dev_priv))
- return false;
-
- return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
-}
-
-static void i9xx_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- i915_reg_t reg = DPLL(crtc->pipe);
- u32 dpll = crtc_state->dpll_hw_state.dpll;
- int i;
-
- assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
-
- /* PLL is protected by panel, make sure we can write it */
- if (i9xx_has_pps(dev_priv))
- assert_panel_unlocked(dev_priv, crtc->pipe);
-
- /*
- * Apparently we need to have VGA mode enabled prior to changing
- * the P1/P2 dividers. Otherwise the DPLL will keep using the old
- * dividers, even though the register value does change.
- */
- intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
- intel_de_write(dev_priv, reg, dpll);
-
- /* Wait for the clocks to stabilize. */
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- if (INTEL_GEN(dev_priv) >= 4) {
- intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
- crtc_state->dpll_hw_state.dpll_md);
- } else {
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- intel_de_write(dev_priv, reg, dpll);
- }
-
- /* We do this three times for luck */
- for (i = 0; i < 3; i++) {
- intel_de_write(dev_priv, reg, dpll);
- intel_de_posting_read(dev_priv, reg);
- udelay(150); /* wait for warmup */
- }
-}
-
-static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- /* Don't disable pipe or pipe PLLs if needed */
- if (IS_I830(dev_priv))
- return;
-
- /* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
-
- intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
- intel_de_posting_read(dev_priv, DPLL(pipe));
-}
-
-static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- u32 val;
-
- /* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
-
- val = DPLL_INTEGRATED_REF_CLK_VLV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (pipe != PIPE_A)
- val |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- intel_de_write(dev_priv, DPLL(pipe), val);
- intel_de_posting_read(dev_priv, DPLL(pipe));
-}
-
-static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
- u32 val;
-
- /* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
-
- val = DPLL_SSC_REF_CLK_CHV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (pipe != PIPE_A)
- val |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- intel_de_write(dev_priv, DPLL(pipe), val);
- intel_de_posting_read(dev_priv, DPLL(pipe));
-
- vlv_dpio_get(dev_priv);
-
- /* Disable 10bit clock to display controller */
- val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
- val &= ~DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
-
- vlv_dpio_put(dev_priv);
-}
-
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dig_port,
unsigned int expected_mask)
@@ -1013,8 +793,6 @@ void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
/* FIXME: assert CPU port conditions for SNB+ */
}
- trace_intel_pipe_enable(crtc);
-
reg = PIPECONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
if (val & PIPECONF_ENABLE) {
@@ -1054,8 +832,6 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
- trace_intel_pipe_disable(crtc);
-
reg = PIPECONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
if ((val & PIPECONF_ENABLE) == 0)
@@ -1082,32 +858,6 @@ static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
return IS_GEN(dev_priv, 2) ? 2048 : 4096;
}
-static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
-{
- if (!is_ccs_modifier(fb->modifier))
- return false;
-
- return plane >= fb->format->num_planes / 2;
-}
-
-static bool is_gen12_ccs_modifier(u64 modifier)
-{
- return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
- modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
- modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
-}
-
-static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
-{
- return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
-}
-
-static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane)
-{
- return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC &&
- plane == 2;
-}
-
static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
{
if (is_ccs_modifier(fb->modifier))
@@ -1116,38 +866,6 @@ static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
return plane == 1;
}
-static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
-{
- drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
- (main_plane && main_plane >= fb->format->num_planes / 2));
-
- return fb->format->num_planes / 2 + main_plane;
-}
-
-static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
-{
- drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
- ccs_plane < fb->format->num_planes / 2);
-
- if (is_gen12_ccs_cc_plane(fb, ccs_plane))
- return 0;
-
- return ccs_plane - fb->format->num_planes / 2;
-}
-
-int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
-{
- struct drm_i915_private *i915 = to_i915(fb->dev);
-
- if (is_ccs_modifier(fb->modifier))
- return main_to_ccs_plane(fb, main_plane);
- else if (INTEL_GEN(i915) < 11 &&
- intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
- return 1;
- else
- return 0;
-}
-
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
u64 modifier)
@@ -1163,7 +881,7 @@ static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
color_plane == 1;
}
-static unsigned int
+unsigned int
intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
@@ -1217,7 +935,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
}
}
-static unsigned int
+unsigned int
intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
{
if (is_gen12_ccs_plane(fb, color_plane))
@@ -1881,18 +1599,9 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
}
}
-bool is_ccs_modifier(u64 modifier)
-{
- return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
- modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
- modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
- modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
-}
-
static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
{
- return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
+ return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
512) * 64;
}
@@ -2050,7 +1759,7 @@ static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
return stride > max_stride;
}
-static void
+void
intel_fb_plane_get_subsampling(int *hsub, int *vsub,
const struct drm_framebuffer *fb,
int color_plane)
@@ -2075,7 +1784,7 @@ intel_fb_plane_get_subsampling(int *hsub, int *vsub,
return;
}
- main_plane = ccs_to_main_plane(fb, color_plane);
+ main_plane = skl_ccs_to_main_plane(fb, color_plane);
*hsub = drm_format_info_block_width(fb->format, color_plane) /
drm_format_info_block_width(fb->format, main_plane);
@@ -2115,7 +1824,7 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
ccs_x = (x * hsub) % tile_width;
ccs_y = (y * vsub) % tile_height;
- main_plane = ccs_to_main_plane(fb, ccs_plane);
+ main_plane = skl_ccs_to_main_plane(fb, ccs_plane);
main_x = intel_fb->normal[main_plane].x % tile_width;
main_y = intel_fb->normal[main_plane].y % tile_height;
@@ -2141,7 +1850,7 @@ static void
intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
{
int main_plane = is_ccs_plane(fb, color_plane) ?
- ccs_to_main_plane(fb, color_plane) : 0;
+ skl_ccs_to_main_plane(fb, color_plane) : 0;
int main_hsub, main_vsub;
int hsub, vsub;
@@ -2495,106 +2204,6 @@ intel_plane_compute_gtt(struct intel_plane_state *plane_state)
return intel_plane_check_stride(plane_state);
}
-static int i9xx_format_to_fourcc(int format)
-{
- switch (format) {
- case DISPPLANE_8BPP:
- return DRM_FORMAT_C8;
- case DISPPLANE_BGRA555:
- return DRM_FORMAT_ARGB1555;
- case DISPPLANE_BGRX555:
- return DRM_FORMAT_XRGB1555;
- case DISPPLANE_BGRX565:
- return DRM_FORMAT_RGB565;
- default:
- case DISPPLANE_BGRX888:
- return DRM_FORMAT_XRGB8888;
- case DISPPLANE_RGBX888:
- return DRM_FORMAT_XBGR8888;
- case DISPPLANE_BGRA888:
- return DRM_FORMAT_ARGB8888;
- case DISPPLANE_RGBA888:
- return DRM_FORMAT_ABGR8888;
- case DISPPLANE_BGRX101010:
- return DRM_FORMAT_XRGB2101010;
- case DISPPLANE_RGBX101010:
- return DRM_FORMAT_XBGR2101010;
- case DISPPLANE_BGRA101010:
- return DRM_FORMAT_ARGB2101010;
- case DISPPLANE_RGBA101010:
- return DRM_FORMAT_ABGR2101010;
- case DISPPLANE_RGBX161616:
- return DRM_FORMAT_XBGR16161616F;
- }
-}
-
-int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
-{
- switch (format) {
- case PLANE_CTL_FORMAT_RGB_565:
- return DRM_FORMAT_RGB565;
- case PLANE_CTL_FORMAT_NV12:
- return DRM_FORMAT_NV12;
- case PLANE_CTL_FORMAT_XYUV:
- return DRM_FORMAT_XYUV8888;
- case PLANE_CTL_FORMAT_P010:
- return DRM_FORMAT_P010;
- case PLANE_CTL_FORMAT_P012:
- return DRM_FORMAT_P012;
- case PLANE_CTL_FORMAT_P016:
- return DRM_FORMAT_P016;
- case PLANE_CTL_FORMAT_Y210:
- return DRM_FORMAT_Y210;
- case PLANE_CTL_FORMAT_Y212:
- return DRM_FORMAT_Y212;
- case PLANE_CTL_FORMAT_Y216:
- return DRM_FORMAT_Y216;
- case PLANE_CTL_FORMAT_Y410:
- return DRM_FORMAT_XVYU2101010;
- case PLANE_CTL_FORMAT_Y412:
- return DRM_FORMAT_XVYU12_16161616;
- case PLANE_CTL_FORMAT_Y416:
- return DRM_FORMAT_XVYU16161616;
- default:
- case PLANE_CTL_FORMAT_XRGB_8888:
- if (rgb_order) {
- if (alpha)
- return DRM_FORMAT_ABGR8888;
- else
- return DRM_FORMAT_XBGR8888;
- } else {
- if (alpha)
- return DRM_FORMAT_ARGB8888;
- else
- return DRM_FORMAT_XRGB8888;
- }
- case PLANE_CTL_FORMAT_XRGB_2101010:
- if (rgb_order) {
- if (alpha)
- return DRM_FORMAT_ABGR2101010;
- else
- return DRM_FORMAT_XBGR2101010;
- } else {
- if (alpha)
- return DRM_FORMAT_ARGB2101010;
- else
- return DRM_FORMAT_XRGB2101010;
- }
- case PLANE_CTL_FORMAT_XRGB_16161616F:
- if (rgb_order) {
- if (alpha)
- return DRM_FORMAT_ABGR16161616F;
- else
- return DRM_FORMAT_XBGR16161616F;
- } else {
- if (alpha)
- return DRM_FORMAT_ARGB16161616F;
- else
- return DRM_FORMAT_XRGB16161616F;
- }
- }
-}
-
static struct i915_vma *
initial_plane_vma(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
@@ -2789,6 +2398,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
intel_disable_plane(plane, crtc_state);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
}
static void
@@ -2899,52 +2509,6 @@ valid_fb:
&to_intel_frontbuffer(fb)->bits);
}
-
-static bool
-skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
- int main_x, int main_y, u32 main_offset,
- int ccs_plane)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int aux_x = plane_state->color_plane[ccs_plane].x;
- int aux_y = plane_state->color_plane[ccs_plane].y;
- u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
- u32 alignment = intel_surf_alignment(fb, ccs_plane);
- int hsub;
- int vsub;
-
- intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
- while (aux_offset >= main_offset && aux_y <= main_y) {
- int x, y;
-
- if (aux_x == main_x && aux_y == main_y)
- break;
-
- if (aux_offset == 0)
- break;
-
- x = aux_x / hsub;
- y = aux_y / vsub;
- aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
- plane_state,
- ccs_plane,
- aux_offset,
- aux_offset -
- alignment);
- aux_x = x * hsub + aux_x % hsub;
- aux_y = y * vsub + aux_y % vsub;
- }
-
- if (aux_x != main_x || aux_y != main_y)
- return false;
-
- plane_state->color_plane[ccs_plane].offset = aux_offset;
- plane_state->color_plane[ccs_plane].x = aux_x;
- plane_state->color_plane[ccs_plane].y = aux_y;
-
- return true;
-}
-
unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
{
@@ -2956,643 +2520,6 @@ intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
return y;
}
-static int intel_plane_min_width(struct intel_plane *plane,
- const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- if (plane->min_width)
- return plane->min_width(fb, color_plane, rotation);
- else
- return 1;
-}
-
-static int intel_plane_max_width(struct intel_plane *plane,
- const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- if (plane->max_width)
- return plane->max_width(fb, color_plane, rotation);
- else
- return INT_MAX;
-}
-
-static int intel_plane_max_height(struct intel_plane *plane,
- const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- if (plane->max_height)
- return plane->max_height(fb, color_plane, rotation);
- else
- return INT_MAX;
-}
-
-int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
- int *x, int *y, u32 *offset)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- const int aux_plane = intel_main_to_aux_plane(fb, 0);
- const u32 aux_offset = plane_state->color_plane[aux_plane].offset;
- const u32 alignment = intel_surf_alignment(fb, 0);
- const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
-
- intel_add_fb_offsets(x, y, plane_state, 0);
- *offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0);
- if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
- return -EINVAL;
-
- /*
- * AUX surface offset is specified as the distance from the
- * main surface offset, and it must be non-negative. Make
- * sure that is what we will get.
- */
- if (aux_plane && *offset > aux_offset)
- *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
- *offset,
- aux_offset & ~(alignment - 1));
-
- /*
- * When using an X-tiled surface, the plane blows up
- * if the x offset + width exceed the stride.
- *
- * TODO: linear and Y-tiled seem fine, Yf untested,
- */
- if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
- int cpp = fb->format->cpp[0];
-
- while ((*x + w) * cpp > plane_state->color_plane[0].stride) {
- if (*offset == 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Unable to find suitable display surface offset due to X-tiling\n");
- return -EINVAL;
- }
-
- *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
- *offset,
- *offset - alignment);
- }
- }
-
- return 0;
-}
-
-static int skl_check_main_surface(struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- const unsigned int rotation = plane_state->hw.rotation;
- int x = plane_state->uapi.src.x1 >> 16;
- int y = plane_state->uapi.src.y1 >> 16;
- const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
- const int h = drm_rect_height(&plane_state->uapi.src) >> 16;
- const int min_width = intel_plane_min_width(plane, fb, 0, rotation);
- const int max_width = intel_plane_max_width(plane, fb, 0, rotation);
- const int max_height = intel_plane_max_height(plane, fb, 0, rotation);
- const int aux_plane = intel_main_to_aux_plane(fb, 0);
- const u32 alignment = intel_surf_alignment(fb, 0);
- u32 offset;
- int ret;
-
- if (w > max_width || w < min_width || h > max_height) {
- drm_dbg_kms(&dev_priv->drm,
- "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
- w, h, min_width, max_width, max_height);
- return -EINVAL;
- }
-
- ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
- if (ret)
- return ret;
-
- /*
- * CCS AUX surface doesn't have its own x/y offsets, we must make sure
- * they match with the main surface x/y offsets.
- */
- if (is_ccs_modifier(fb->modifier)) {
- while (!skl_check_main_ccs_coordinates(plane_state, x, y,
- offset, aux_plane)) {
- if (offset == 0)
- break;
-
- offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
- offset, offset - alignment);
- }
-
- if (x != plane_state->color_plane[aux_plane].x ||
- y != plane_state->color_plane[aux_plane].y) {
- drm_dbg_kms(&dev_priv->drm,
- "Unable to find suitable display surface offset due to CCS\n");
- return -EINVAL;
- }
- }
-
- drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191);
-
- plane_state->color_plane[0].offset = offset;
- plane_state->color_plane[0].x = x;
- plane_state->color_plane[0].y = y;
-
- /*
- * Put the final coordinates back so that the src
- * coordinate checks will see the right values.
- */
- drm_rect_translate_to(&plane_state->uapi.src,
- x << 16, y << 16);
-
- return 0;
-}
-
-static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- int uv_plane = 1;
- int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation);
- int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation);
- int x = plane_state->uapi.src.x1 >> 17;
- int y = plane_state->uapi.src.y1 >> 17;
- int w = drm_rect_width(&plane_state->uapi.src) >> 17;
- int h = drm_rect_height(&plane_state->uapi.src) >> 17;
- u32 offset;
-
- /* FIXME not quite sure how/if these apply to the chroma plane */
- if (w > max_width || h > max_height) {
- drm_dbg_kms(&i915->drm,
- "CbCr source size %dx%d too big (limit %dx%d)\n",
- w, h, max_width, max_height);
- return -EINVAL;
- }
-
- intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
- offset = intel_plane_compute_aligned_offset(&x, &y,
- plane_state, uv_plane);
-
- if (is_ccs_modifier(fb->modifier)) {
- int ccs_plane = main_to_ccs_plane(fb, uv_plane);
- u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
- u32 alignment = intel_surf_alignment(fb, uv_plane);
-
- if (offset > aux_offset)
- offset = intel_plane_adjust_aligned_offset(&x, &y,
- plane_state,
- uv_plane,
- offset,
- aux_offset & ~(alignment - 1));
-
- while (!skl_check_main_ccs_coordinates(plane_state, x, y,
- offset, ccs_plane)) {
- if (offset == 0)
- break;
-
- offset = intel_plane_adjust_aligned_offset(&x, &y,
- plane_state,
- uv_plane,
- offset, offset - alignment);
- }
-
- if (x != plane_state->color_plane[ccs_plane].x ||
- y != plane_state->color_plane[ccs_plane].y) {
- drm_dbg_kms(&i915->drm,
- "Unable to find suitable display surface offset due to CCS\n");
- return -EINVAL;
- }
- }
-
- drm_WARN_ON(&i915->drm, x > 8191 || y > 8191);
-
- plane_state->color_plane[uv_plane].offset = offset;
- plane_state->color_plane[uv_plane].x = x;
- plane_state->color_plane[uv_plane].y = y;
-
- return 0;
-}
-
-static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int src_x = plane_state->uapi.src.x1 >> 16;
- int src_y = plane_state->uapi.src.y1 >> 16;
- u32 offset;
- int ccs_plane;
-
- for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
- int main_hsub, main_vsub;
- int hsub, vsub;
- int x, y;
-
- if (!is_ccs_plane(fb, ccs_plane) ||
- is_gen12_ccs_cc_plane(fb, ccs_plane))
- continue;
-
- intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
- ccs_to_main_plane(fb, ccs_plane));
- intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
-
- hsub *= main_hsub;
- vsub *= main_vsub;
- x = src_x / hsub;
- y = src_y / vsub;
-
- intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
-
- offset = intel_plane_compute_aligned_offset(&x, &y,
- plane_state,
- ccs_plane);
-
- plane_state->color_plane[ccs_plane].offset = offset;
- plane_state->color_plane[ccs_plane].x = (x * hsub +
- src_x % hsub) /
- main_hsub;
- plane_state->color_plane[ccs_plane].y = (y * vsub +
- src_y % vsub) /
- main_vsub;
- }
-
- return 0;
-}
-
-int skl_check_plane_surface(struct intel_plane_state *plane_state)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int ret, i;
-
- ret = intel_plane_compute_gtt(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- /*
- * Handle the AUX surface first since the main surface setup depends on
- * it.
- */
- if (is_ccs_modifier(fb->modifier)) {
- ret = skl_check_ccs_aux_surface(plane_state);
- if (ret)
- return ret;
- }
-
- if (intel_format_info_is_yuv_semiplanar(fb->format,
- fb->modifier)) {
- ret = skl_check_nv12_aux_surface(plane_state);
- if (ret)
- return ret;
- }
-
- for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) {
- plane_state->color_plane[i].offset = 0;
- plane_state->color_plane[i].x = 0;
- plane_state->color_plane[i].y = 0;
- }
-
- ret = skl_check_main_surface(plane_state);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-/*
- * This function detaches (aka. unbinds) unused scalers in hardware
- */
-static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- int i;
-
- /* loop through and disable scalers that aren't in use */
- for (i = 0; i < intel_crtc->num_scalers; i++) {
- if (!scaler_state->scalers[i].in_use)
- skl_detach_scaler(intel_crtc, i);
- }
-}
-
-static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
- int color_plane, unsigned int rotation)
-{
- /*
- * The stride is either expressed as a multiple of 64 bytes chunks for
- * linear buffers or in number of tiles for tiled buffers.
- */
- if (is_surface_linear(fb, color_plane))
- return 64;
- else if (drm_rotation_90_or_270(rotation))
- return intel_tile_height(fb, color_plane);
- else
- return intel_tile_width_bytes(fb, color_plane);
-}
-
-u32 skl_plane_stride(const struct intel_plane_state *plane_state,
- int color_plane)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- u32 stride = plane_state->color_plane[color_plane].stride;
-
- if (color_plane >= fb->format->num_planes)
- return 0;
-
- return stride / skl_plane_stride_mult(fb, color_plane, rotation);
-}
-
-static u32 skl_plane_ctl_format(u32 pixel_format)
-{
- switch (pixel_format) {
- case DRM_FORMAT_C8:
- return PLANE_CTL_FORMAT_INDEXED;
- case DRM_FORMAT_RGB565:
- return PLANE_CTL_FORMAT_RGB_565;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- return PLANE_CTL_FORMAT_XRGB_8888;
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ABGR2101010:
- return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- return PLANE_CTL_FORMAT_XRGB_2101010;
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ABGR16161616F:
- return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_ARGB16161616F:
- return PLANE_CTL_FORMAT_XRGB_16161616F;
- case DRM_FORMAT_XYUV8888:
- return PLANE_CTL_FORMAT_XYUV;
- case DRM_FORMAT_YUYV:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
- case DRM_FORMAT_YVYU:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
- case DRM_FORMAT_UYVY:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
- case DRM_FORMAT_VYUY:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
- case DRM_FORMAT_NV12:
- return PLANE_CTL_FORMAT_NV12;
- case DRM_FORMAT_P010:
- return PLANE_CTL_FORMAT_P010;
- case DRM_FORMAT_P012:
- return PLANE_CTL_FORMAT_P012;
- case DRM_FORMAT_P016:
- return PLANE_CTL_FORMAT_P016;
- case DRM_FORMAT_Y210:
- return PLANE_CTL_FORMAT_Y210;
- case DRM_FORMAT_Y212:
- return PLANE_CTL_FORMAT_Y212;
- case DRM_FORMAT_Y216:
- return PLANE_CTL_FORMAT_Y216;
- case DRM_FORMAT_XVYU2101010:
- return PLANE_CTL_FORMAT_Y410;
- case DRM_FORMAT_XVYU12_16161616:
- return PLANE_CTL_FORMAT_Y412;
- case DRM_FORMAT_XVYU16161616:
- return PLANE_CTL_FORMAT_Y416;
- default:
- MISSING_CASE(pixel_format);
- }
-
- return 0;
-}
-
-static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
-{
- if (!plane_state->hw.fb->format->has_alpha)
- return PLANE_CTL_ALPHA_DISABLE;
-
- switch (plane_state->hw.pixel_blend_mode) {
- case DRM_MODE_BLEND_PIXEL_NONE:
- return PLANE_CTL_ALPHA_DISABLE;
- case DRM_MODE_BLEND_PREMULTI:
- return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
- case DRM_MODE_BLEND_COVERAGE:
- return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
- default:
- MISSING_CASE(plane_state->hw.pixel_blend_mode);
- return PLANE_CTL_ALPHA_DISABLE;
- }
-}
-
-static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
-{
- if (!plane_state->hw.fb->format->has_alpha)
- return PLANE_COLOR_ALPHA_DISABLE;
-
- switch (plane_state->hw.pixel_blend_mode) {
- case DRM_MODE_BLEND_PIXEL_NONE:
- return PLANE_COLOR_ALPHA_DISABLE;
- case DRM_MODE_BLEND_PREMULTI:
- return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
- case DRM_MODE_BLEND_COVERAGE:
- return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
- default:
- MISSING_CASE(plane_state->hw.pixel_blend_mode);
- return PLANE_COLOR_ALPHA_DISABLE;
- }
-}
-
-static u32 skl_plane_ctl_tiling(u64 fb_modifier)
-{
- switch (fb_modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- break;
- case I915_FORMAT_MOD_X_TILED:
- return PLANE_CTL_TILED_X;
- case I915_FORMAT_MOD_Y_TILED:
- return PLANE_CTL_TILED_Y;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- return PLANE_CTL_TILED_Y |
- PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
- PLANE_CTL_CLEAR_COLOR_DISABLE;
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
- case I915_FORMAT_MOD_Yf_TILED:
- return PLANE_CTL_TILED_YF;
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
- default:
- MISSING_CASE(fb_modifier);
- }
-
- return 0;
-}
-
-static u32 skl_plane_ctl_rotate(unsigned int rotate)
-{
- switch (rotate) {
- case DRM_MODE_ROTATE_0:
- break;
- /*
- * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
- * while i915 HW rotation is clockwise, thats why this swapping.
- */
- case DRM_MODE_ROTATE_90:
- return PLANE_CTL_ROTATE_270;
- case DRM_MODE_ROTATE_180:
- return PLANE_CTL_ROTATE_180;
- case DRM_MODE_ROTATE_270:
- return PLANE_CTL_ROTATE_90;
- default:
- MISSING_CASE(rotate);
- }
-
- return 0;
-}
-
-static u32 cnl_plane_ctl_flip(unsigned int reflect)
-{
- switch (reflect) {
- case 0:
- break;
- case DRM_MODE_REFLECT_X:
- return PLANE_CTL_FLIP_HORIZONTAL;
- case DRM_MODE_REFLECT_Y:
- default:
- MISSING_CASE(reflect);
- }
-
- return 0;
-}
-
-u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 plane_ctl = 0;
-
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- return plane_ctl;
-
- if (crtc_state->gamma_enable)
- plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
-
- if (crtc_state->csc_enable)
- plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
-
- return plane_ctl;
-}
-
-u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 plane_ctl;
-
- plane_ctl = PLANE_CTL_ENABLE;
-
- if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
- plane_ctl |= skl_plane_ctl_alpha(plane_state);
- plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
-
- if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
- plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
-
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
- }
-
- plane_ctl |= skl_plane_ctl_format(fb->format->format);
- plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
- plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
-
- if (INTEL_GEN(dev_priv) >= 10)
- plane_ctl |= cnl_plane_ctl_flip(rotation &
- DRM_MODE_REFLECT_MASK);
-
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
-
- return plane_ctl;
-}
-
-u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 plane_color_ctl = 0;
-
- if (INTEL_GEN(dev_priv) >= 11)
- return plane_color_ctl;
-
- if (crtc_state->gamma_enable)
- plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
-
- if (crtc_state->csc_enable)
- plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
-
- return plane_color_ctl;
-}
-
-u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- u32 plane_color_ctl = 0;
-
- plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
- plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
-
- if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
- switch (plane_state->hw.color_encoding) {
- case DRM_COLOR_YCBCR_BT709:
- plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
- break;
- case DRM_COLOR_YCBCR_BT2020:
- plane_color_ctl |=
- PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020;
- break;
- default:
- plane_color_ctl |=
- PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601;
- }
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
- } else if (fb->format->is_yuv) {
- plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
- }
-
- return plane_color_ctl;
-}
-
static int
__intel_display_resume(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -4157,461 +3084,6 @@ static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
}
}
-/*
- * The hardware phase 0.0 refers to the center of the pixel.
- * We want to start from the top/left edge which is phase
- * -0.5. That matches how the hardware calculates the scaling
- * factors (from top-left of the first pixel to bottom-right
- * of the last pixel, as opposed to the pixel centers).
- *
- * For 4:2:0 subsampled chroma planes we obviously have to
- * adjust that so that the chroma sample position lands in
- * the right spot.
- *
- * Note that for packed YCbCr 4:2:2 formats there is no way to
- * control chroma siting. The hardware simply replicates the
- * chroma samples for both of the luma samples, and thus we don't
- * actually get the expected MPEG2 chroma siting convention :(
- * The same behaviour is observed on pre-SKL platforms as well.
- *
- * Theory behind the formula (note that we ignore sub-pixel
- * source coordinates):
- * s = source sample position
- * d = destination sample position
- *
- * Downscaling 4:1:
- * -0.5
- * | 0.0
- * | | 1.5 (initial phase)
- * | | |
- * v v v
- * | s | s | s | s |
- * | d |
- *
- * Upscaling 1:4:
- * -0.5
- * | -0.375 (initial phase)
- * | | 0.0
- * | | |
- * v v v
- * | s |
- * | d | d | d | d |
- */
-u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
-{
- int phase = -0x8000;
- u16 trip = 0;
-
- if (chroma_cosited)
- phase += (sub - 1) * 0x8000 / sub;
-
- phase += scale / (2 * sub);
-
- /*
- * Hardware initial phase limited to [-0.5:1.5].
- * Since the max hardware scale factor is 3.0, we
- * should never actually excdeed 1.0 here.
- */
- WARN_ON(phase < -0x8000 || phase > 0x18000);
-
- if (phase < 0)
- phase = 0x10000 + phase;
- else
- trip = PS_PHASE_TRIP;
-
- return ((phase >> 2) & PS_PHASE_MASK) | trip;
-}
-
-#define SKL_MIN_SRC_W 8
-#define SKL_MAX_SRC_W 4096
-#define SKL_MIN_SRC_H 8
-#define SKL_MAX_SRC_H 4096
-#define SKL_MIN_DST_W 8
-#define SKL_MAX_DST_W 4096
-#define SKL_MIN_DST_H 8
-#define SKL_MAX_DST_H 4096
-#define ICL_MAX_SRC_W 5120
-#define ICL_MAX_SRC_H 4096
-#define ICL_MAX_DST_W 5120
-#define ICL_MAX_DST_H 4096
-#define SKL_MIN_YUV_420_SRC_W 16
-#define SKL_MIN_YUV_420_SRC_H 16
-
-static int
-skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
- unsigned int scaler_user, int *scaler_id,
- int src_w, int src_h, int dst_w, int dst_h,
- const struct drm_format_info *format,
- u64 modifier, bool need_scaler)
-{
- struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
-
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- if (src_w != dst_w || src_h != dst_h)
- need_scaler = true;
-
- /*
- * Scaling/fitting not supported in IF-ID mode in GEN9+
- * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
- * Once NV12 is enabled, handle it here while allocating scaler
- * for NV12.
- */
- if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
- need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- drm_dbg_kms(&dev_priv->drm,
- "Pipe/Plane scaling not supported with IF-ID mode\n");
- return -EINVAL;
- }
-
- /*
- * if plane is being disabled or scaler is no more required or force detach
- * - free scaler binded to this plane/crtc
- * - in order to do this, update crtc->scaler_usage
- *
- * Here scaler state in crtc_state is set free so that
- * scaler can be assigned to other user. Actual register
- * update to free the scaler is done in plane/panel-fit programming.
- * For this purpose crtc/plane_state->scaler_id isn't reset here.
- */
- if (force_detach || !need_scaler) {
- if (*scaler_id >= 0) {
- scaler_state->scaler_users &= ~(1 << scaler_user);
- scaler_state->scalers[*scaler_id].in_use = 0;
-
- drm_dbg_kms(&dev_priv->drm,
- "scaler_user index %u.%u: "
- "Staged freeing scaler id %d scaler_users = 0x%x\n",
- intel_crtc->pipe, scaler_user, *scaler_id,
- scaler_state->scaler_users);
- *scaler_id = -1;
- }
- return 0;
- }
-
- if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
- (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
- drm_dbg_kms(&dev_priv->drm,
- "Planar YUV: src dimensions not met\n");
- return -EINVAL;
- }
-
- /* range checks */
- if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
- dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
- (INTEL_GEN(dev_priv) >= 11 &&
- (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
- dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
- (INTEL_GEN(dev_priv) < 11 &&
- (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
- dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
- drm_dbg_kms(&dev_priv->drm,
- "scaler_user index %u.%u: src %ux%u dst %ux%u "
- "size is out of scaler range\n",
- intel_crtc->pipe, scaler_user, src_w, src_h,
- dst_w, dst_h);
- return -EINVAL;
- }
-
- /* mark this plane as a scaler user in crtc_state */
- scaler_state->scaler_users |= (1 << scaler_user);
- drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
- "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
- intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
- scaler_state->scaler_users);
-
- return 0;
-}
-
-static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
-{
- const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
- int width, height;
-
- if (crtc_state->pch_pfit.enabled) {
- width = drm_rect_width(&crtc_state->pch_pfit.dst);
- height = drm_rect_height(&crtc_state->pch_pfit.dst);
- } else {
- width = pipe_mode->crtc_hdisplay;
- height = pipe_mode->crtc_vdisplay;
- }
- return skl_update_scaler(crtc_state, !crtc_state->hw.active,
- SKL_CRTC_INDEX,
- &crtc_state->scaler_state.scaler_id,
- crtc_state->pipe_src_w, crtc_state->pipe_src_h,
- width, height, NULL, 0,
- crtc_state->pch_pfit.enabled);
-}
-
-/**
- * skl_update_scaler_plane - Stages update to scaler state for a given plane.
- * @crtc_state: crtc's scaler state
- * @plane_state: atomic plane state to update
- *
- * Return
- * 0 - scaler_usage updated successfully
- * error - requested scaling cannot be supported or other error condition
- */
-static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- struct intel_plane *intel_plane =
- to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
- struct drm_framebuffer *fb = plane_state->hw.fb;
- int ret;
- bool force_detach = !fb || !plane_state->uapi.visible;
- bool need_scaler = false;
-
- /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
- if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
- fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
- need_scaler = true;
-
- ret = skl_update_scaler(crtc_state, force_detach,
- drm_plane_index(&intel_plane->base),
- &plane_state->scaler_id,
- drm_rect_width(&plane_state->uapi.src) >> 16,
- drm_rect_height(&plane_state->uapi.src) >> 16,
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst),
- fb ? fb->format : NULL,
- fb ? fb->modifier : 0,
- need_scaler);
-
- if (ret || plane_state->scaler_id < 0)
- return ret;
-
- /* check colorkey */
- if (plane_state->ckey.flags) {
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] scaling with color key not allowed",
- intel_plane->base.base.id,
- intel_plane->base.name);
- return -EINVAL;
- }
-
- /* Check src format */
- switch (fb->format->format) {
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_XYUV8888:
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- case DRM_FORMAT_Y210:
- case DRM_FORMAT_Y212:
- case DRM_FORMAT_Y216:
- case DRM_FORMAT_XVYU2101010:
- case DRM_FORMAT_XVYU12_16161616:
- case DRM_FORMAT_XVYU16161616:
- break;
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_ARGB16161616F:
- if (INTEL_GEN(dev_priv) >= 11)
- break;
- fallthrough;
- default:
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
- intel_plane->base.base.id, intel_plane->base.name,
- fb->base.id, fb->format->format);
- return -EINVAL;
- }
-
- return 0;
-}
-
-void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- int i;
-
- for (i = 0; i < crtc->num_scalers; i++)
- skl_detach_scaler(crtc, i);
-}
-
-static int cnl_coef_tap(int i)
-{
- return i % 7;
-}
-
-static u16 cnl_nearest_filter_coef(int t)
-{
- return t == 3 ? 0x0800 : 0x3000;
-}
-
-/*
- * Theory behind setting nearest-neighbor integer scaling:
- *
- * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
- * The letter represents the filter tap (D is the center tap) and the number
- * represents the coefficient set for a phase (0-16).
- *
- * +------------+------------------------+------------------------+
- * |Index value | Data value coeffient 1 | Data value coeffient 2 |
- * +------------+------------------------+------------------------+
- * | 00h | B0 | A0 |
- * +------------+------------------------+------------------------+
- * | 01h | D0 | C0 |
- * +------------+------------------------+------------------------+
- * | 02h | F0 | E0 |
- * +------------+------------------------+------------------------+
- * | 03h | A1 | G0 |
- * +------------+------------------------+------------------------+
- * | 04h | C1 | B1 |
- * +------------+------------------------+------------------------+
- * | ... | ... | ... |
- * +------------+------------------------+------------------------+
- * | 38h | B16 | A16 |
- * +------------+------------------------+------------------------+
- * | 39h | D16 | C16 |
- * +------------+------------------------+------------------------+
- * | 3Ah | F16 | C16 |
- * +------------+------------------------+------------------------+
- * | 3Bh | Reserved | G16 |
- * +------------+------------------------+------------------------+
- *
- * To enable nearest-neighbor scaling: program scaler coefficents with
- * the center tap (Dxx) values set to 1 and all other values set to 0 as per
- * SCALER_COEFFICIENT_FORMAT
- *
- */
-
-static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
- enum pipe pipe, int id, int set)
-{
- int i;
-
- intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set),
- PS_COEE_INDEX_AUTO_INC);
-
- for (i = 0; i < 17 * 7; i += 2) {
- u32 tmp;
- int t;
-
- t = cnl_coef_tap(i);
- tmp = cnl_nearest_filter_coef(t);
-
- t = cnl_coef_tap(i + 1);
- tmp |= cnl_nearest_filter_coef(t) << 16;
-
- intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set),
- tmp);
- }
-
- intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
-}
-
-u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
-{
- if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
- return (PS_FILTER_PROGRAMMED |
- PS_Y_VERT_FILTER_SELECT(set) |
- PS_Y_HORZ_FILTER_SELECT(set) |
- PS_UV_VERT_FILTER_SELECT(set) |
- PS_UV_HORZ_FILTER_SELECT(set));
- }
-
- return PS_FILTER_MEDIUM;
-}
-
-void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
- int id, int set, enum drm_scaling_filter filter)
-{
- switch (filter) {
- case DRM_SCALING_FILTER_DEFAULT:
- break;
- case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
- cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set);
- break;
- default:
- MISSING_CASE(filter);
- }
-}
-
-static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- struct drm_rect src = {
- .x2 = crtc_state->pipe_src_w << 16,
- .y2 = crtc_state->pipe_src_h << 16,
- };
- const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
- u16 uv_rgb_hphase, uv_rgb_vphase;
- enum pipe pipe = crtc->pipe;
- int width = drm_rect_width(dst);
- int height = drm_rect_height(dst);
- int x = dst->x1;
- int y = dst->y1;
- int hscale, vscale;
- unsigned long irqflags;
- int id;
- u32 ps_ctrl;
-
- if (!crtc_state->pch_pfit.enabled)
- return;
-
- if (drm_WARN_ON(&dev_priv->drm,
- crtc_state->scaler_state.scaler_id < 0))
- return;
-
- hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
- vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
-
- uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
- uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
-
- id = scaler_state->scaler_id;
-
- ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
- ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- skl_scaler_setup_filter(dev_priv, pipe, id, 0,
- crtc_state->hw.scaling_filter);
-
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
-
- intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
- PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
- PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
- x << 16 | y);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
- width << 16 | height);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -5546,10 +4018,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (INTEL_GEN(dev_priv) >= 11)
icl_pipe_mbus_enable(crtc);
- if (new_crtc_state->bigjoiner_slave) {
- trace_intel_pipe_enable(crtc);
+ if (new_crtc_state->bigjoiner_slave)
intel_crtc_vblank_on(new_crtc_state);
- }
intel_encoders_enable(state, crtc);
@@ -5680,6 +4150,8 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
{
if (phy == PHY_NONE)
return false;
+ else if (IS_ALDERLAKE_S(dev_priv))
+ return phy <= PHY_E;
else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
return phy <= PHY_D;
else if (IS_JSL_EHL(dev_priv))
@@ -5692,11 +4164,9 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
- if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
- return false;
- else if (INTEL_GEN(dev_priv) >= 12)
+ if (IS_TIGERLAKE(dev_priv))
return phy >= PHY_D && phy <= PHY_I;
- else if (INTEL_GEN(dev_priv) >= 11 && !IS_JSL_EHL(dev_priv))
+ else if (IS_ICELAKE(dev_priv))
return phy >= PHY_C && phy <= PHY_F;
else
return false;
@@ -5704,7 +4174,9 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
{
- if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
+ if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
+ return PHY_B + port - PORT_TC1;
+ else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
return PHY_C + port - PORT_TC1;
else if (IS_JSL_EHL(i915) && port == PORT_D)
return PHY_A;
@@ -6380,8 +4852,30 @@ static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state
pipe_mode->crtc_clock /= 2;
}
- intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
- intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
+ if (crtc_state->splitter.enable) {
+ int n = crtc_state->splitter.link_count;
+ int overlap = crtc_state->splitter.pixel_overlap;
+
+ /*
+ * eDP MSO uses segment timings from EDID for transcoder
+ * timings, but full mode for everything else.
+ *
+ * h_full = (h_segment - pixel_overlap) * link_count
+ */
+ pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
+ pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
+ pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
+ pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
+ pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
+ pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
+ pipe_mode->crtc_clock *= n;
+
+ intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
+ intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
+ } else {
+ intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
+ intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
+ }
intel_crtc_compute_pixel_rate(crtc_state);
@@ -6419,6 +4913,19 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
pipe_config->pipe_src_w /= 2;
}
+ if (pipe_config->splitter.enable) {
+ int n = pipe_config->splitter.link_count;
+ int overlap = pipe_config->splitter.pixel_overlap;
+
+ pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
+ pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
+ pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
+ pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
+ pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
+ pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
+ pipe_mode->crtc_clock *= n;
+ }
+
intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
if (INTEL_GEN(dev_priv) < 4) {
@@ -6554,35 +5061,6 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
}
}
-static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
- pipe)
-{
- u32 reg_val;
-
- /*
- * PLLB opamp always calibrates to max value of 0x3f, force enable it
- * and set it to a reasonable value instead.
- */
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
- reg_val &= 0xffffff00;
- reg_val |= 0x00000030;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
-
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
- reg_val &= 0x00ffffff;
- reg_val |= 0x8c000000;
- vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
-
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
- reg_val &= 0xffffff00;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
-
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
- reg_val &= 0x00ffffff;
- reg_val |= 0xb0000000;
- vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
-}
-
static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
const struct intel_link_m_n *m_n)
{
@@ -6678,267 +5156,6 @@ void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_s
intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
}
-static void vlv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- u32 mdiv;
- u32 bestn, bestm1, bestm2, bestp1, bestp2;
- u32 coreclk, reg_val;
-
- /* Enable Refclk */
- intel_de_write(dev_priv, DPLL(pipe),
- pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
-
- /* No need to actually set up the DPLL with DSI */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
- return;
-
- vlv_dpio_get(dev_priv);
-
- bestn = pipe_config->dpll.n;
- bestm1 = pipe_config->dpll.m1;
- bestm2 = pipe_config->dpll.m2;
- bestp1 = pipe_config->dpll.p1;
- bestp2 = pipe_config->dpll.p2;
-
- /* See eDP HDMI DPIO driver vbios notes doc */
-
- /* PLL B needs special handling */
- if (pipe == PIPE_B)
- vlv_pllb_recal_opamp(dev_priv, pipe);
-
- /* Set up Tx target for periodic Rcomp update */
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
-
- /* Disable target IRef on PLL */
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
- reg_val &= 0x00ffffff;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
-
- /* Disable fast lock */
- vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
-
- /* Set idtafcrecal before PLL is enabled */
- mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
- mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
- mdiv |= ((bestn << DPIO_N_SHIFT));
- mdiv |= (1 << DPIO_K_SHIFT);
-
- /*
- * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
- * but we don't support that).
- * Note: don't use the DAC post divider as it seems unstable.
- */
- mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
-
- mdiv |= DPIO_ENABLE_CALIBRATION;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
-
- /* Set HBR and RBR LPF coefficients */
- if (pipe_config->port_clock == 162000 ||
- intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
- intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
- 0x009f0003);
- else
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
- 0x00d0000f);
-
- if (intel_crtc_has_dp_encoder(pipe_config)) {
- /* Use SSC source */
- if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
- 0x0df40000);
- else
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
- 0x0df70000);
- } else { /* HDMI or VGA */
- /* Use bend source */
- if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
- 0x0df70000);
- else
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
- 0x0df40000);
- }
-
- coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
- coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
- if (intel_crtc_has_dp_encoder(pipe_config))
- coreclk |= 0x01000000;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
-
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
-
- vlv_dpio_put(dev_priv);
-}
-
-static void chv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
- u32 loopfilter, tribuf_calcntr;
- u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
- u32 dpio_val;
- int vco;
-
- /* Enable Refclk and SSC */
- intel_de_write(dev_priv, DPLL(pipe),
- pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
-
- /* No need to actually set up the DPLL with DSI */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
- return;
-
- bestn = pipe_config->dpll.n;
- bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
- bestm1 = pipe_config->dpll.m1;
- bestm2 = pipe_config->dpll.m2 >> 22;
- bestp1 = pipe_config->dpll.p1;
- bestp2 = pipe_config->dpll.p2;
- vco = pipe_config->dpll.vco;
- dpio_val = 0;
- loopfilter = 0;
-
- vlv_dpio_get(dev_priv);
-
- /* p1 and p2 divider */
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
- 5 << DPIO_CHV_S1_DIV_SHIFT |
- bestp1 << DPIO_CHV_P1_DIV_SHIFT |
- bestp2 << DPIO_CHV_P2_DIV_SHIFT |
- 1 << DPIO_CHV_K_DIV_SHIFT);
-
- /* Feedback post-divider - m2 */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
-
- /* Feedback refclk divider - n and m1 */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
- DPIO_CHV_M1_DIV_BY_2 |
- 1 << DPIO_CHV_N_DIV_SHIFT);
-
- /* M2 fraction division */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
-
- /* M2 fraction division enable */
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
- dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
- dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
- if (bestm2_frac)
- dpio_val |= DPIO_CHV_FRAC_DIV_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
-
- /* Program digital lock detect threshold */
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
- dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
- DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
- dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
- if (!bestm2_frac)
- dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
-
- /* Loop filter */
- if (vco == 5400000) {
- loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
- tribuf_calcntr = 0x9;
- } else if (vco <= 6200000) {
- loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
- tribuf_calcntr = 0x9;
- } else if (vco <= 6480000) {
- loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
- tribuf_calcntr = 0x8;
- } else {
- /* Not supported. Apply the same limits as in the max case */
- loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
- tribuf_calcntr = 0;
- }
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
-
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
- dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
- dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
-
- /* AFC Recal */
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
- vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
- DPIO_AFC_RECAL);
-
- vlv_dpio_put(dev_priv);
-}
-
-/**
- * vlv_force_pll_on - forcibly enable just the PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to enable
- * @dpll: PLL configuration
- *
- * Enable the PLL for @pipe using the supplied @dpll config. To be used
- * in cases where we need the PLL enabled even when @pipe is not going to
- * be enabled.
- */
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
- const struct dpll *dpll)
-{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- struct intel_crtc_state *pipe_config;
-
- pipe_config = intel_crtc_state_alloc(crtc);
- if (!pipe_config)
- return -ENOMEM;
-
- pipe_config->cpu_transcoder = (enum transcoder)pipe;
- pipe_config->pixel_multiplier = 1;
- pipe_config->dpll = *dpll;
-
- if (IS_CHERRYVIEW(dev_priv)) {
- chv_compute_dpll(crtc, pipe_config);
- chv_prepare_pll(crtc, pipe_config);
- chv_enable_pll(crtc, pipe_config);
- } else {
- vlv_compute_dpll(crtc, pipe_config);
- vlv_prepare_pll(crtc, pipe_config);
- vlv_enable_pll(crtc, pipe_config);
- }
-
- kfree(pipe_config);
-
- return 0;
-}
-
-/**
- * vlv_force_pll_off - forcibly disable just the PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to disable
- *
- * Disable the PLL for @pipe. To be used in cases where we need
- * the PLL enabled even when @pipe is not going to be enabled.
- */
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- if (IS_CHERRYVIEW(dev_priv))
- chv_disable_pll(dev_priv, pipe);
- else
- vlv_disable_pll(dev_priv, pipe);
-}
-
-
-
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -7206,92 +5423,6 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
}
-static void
-i9xx_get_initial_plane_config(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- enum pipe pipe;
- u32 val, base, offset;
- int fourcc, pixel_format;
- unsigned int aligned_height;
- struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
-
- if (!plane->get_hw_state(plane, &pipe))
- return;
-
- drm_WARN_ON(dev, pipe != crtc->pipe);
-
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
- if (!intel_fb) {
- drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
- return;
- }
-
- fb = &intel_fb->base;
-
- fb->dev = dev;
-
- val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
-
- if (INTEL_GEN(dev_priv) >= 4) {
- if (val & DISPPLANE_TILED) {
- plane_config->tiling = I915_TILING_X;
- fb->modifier = I915_FORMAT_MOD_X_TILED;
- }
-
- if (val & DISPPLANE_ROTATE_180)
- plane_config->rotation = DRM_MODE_ROTATE_180;
- }
-
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
- val & DISPPLANE_MIRROR)
- plane_config->rotation |= DRM_MODE_REFLECT_X;
-
- pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
- fourcc = i9xx_format_to_fourcc(pixel_format);
- fb->format = drm_format_info(fourcc);
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
- base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- if (plane_config->tiling)
- offset = intel_de_read(dev_priv,
- DSPTILEOFF(i9xx_plane));
- else
- offset = intel_de_read(dev_priv,
- DSPLINOFF(i9xx_plane));
- base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
- } else {
- base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
- }
- plane_config->base = base;
-
- val = intel_de_read(dev_priv, PIPESRC(pipe));
- fb->width = ((val >> 16) & 0xfff) + 1;
- fb->height = ((val >> 0) & 0xfff) + 1;
-
- val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
- fb->pitches[0] = val & 0xffffffc0;
-
- aligned_height = intel_fb_align_height(fb, 0, fb->height);
-
- plane_config->size = fb->pitches[0] * aligned_height;
-
- drm_dbg_kms(&dev_priv->drm,
- "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- crtc->base.name, plane->base.name, fb->width, fb->height,
- fb->format->cpp[0] * 8, base, fb->pitches[0],
- plane_config->size);
-
- plane_config->fb = intel_fb;
-}
-
static void chv_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -8274,150 +6405,6 @@ static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
-static void
-skl_get_initial_plane_config(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
-{
- struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- enum plane_id plane_id = plane->id;
- enum pipe pipe;
- u32 val, base, offset, stride_mult, tiling, alpha;
- int fourcc, pixel_format;
- unsigned int aligned_height;
- struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
-
- if (!plane->get_hw_state(plane, &pipe))
- return;
-
- drm_WARN_ON(dev, pipe != crtc->pipe);
-
- if (crtc_state->bigjoiner) {
- drm_dbg_kms(&dev_priv->drm,
- "Unsupported bigjoiner configuration for initial FB\n");
- return;
- }
-
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
- if (!intel_fb) {
- drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
- return;
- }
-
- fb = &intel_fb->base;
-
- fb->dev = dev;
-
- val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
-
- if (INTEL_GEN(dev_priv) >= 11)
- pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
- else
- pixel_format = val & PLANE_CTL_FORMAT_MASK;
-
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
- alpha = intel_de_read(dev_priv,
- PLANE_COLOR_CTL(pipe, plane_id));
- alpha &= PLANE_COLOR_ALPHA_MASK;
- } else {
- alpha = val & PLANE_CTL_ALPHA_MASK;
- }
-
- fourcc = skl_format_to_fourcc(pixel_format,
- val & PLANE_CTL_ORDER_RGBX, alpha);
- fb->format = drm_format_info(fourcc);
-
- tiling = val & PLANE_CTL_TILED_MASK;
- switch (tiling) {
- case PLANE_CTL_TILED_LINEAR:
- fb->modifier = DRM_FORMAT_MOD_LINEAR;
- break;
- case PLANE_CTL_TILED_X:
- plane_config->tiling = I915_TILING_X;
- fb->modifier = I915_FORMAT_MOD_X_TILED;
- break;
- case PLANE_CTL_TILED_Y:
- plane_config->tiling = I915_TILING_Y;
- if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
- fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
- I915_FORMAT_MOD_Y_TILED_CCS;
- else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
- fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
- else
- fb->modifier = I915_FORMAT_MOD_Y_TILED;
- break;
- case PLANE_CTL_TILED_YF:
- if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
- fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
- else
- fb->modifier = I915_FORMAT_MOD_Yf_TILED;
- break;
- default:
- MISSING_CASE(tiling);
- goto error;
- }
-
- /*
- * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
- * while i915 HW rotation is clockwise, thats why this swapping.
- */
- switch (val & PLANE_CTL_ROTATE_MASK) {
- case PLANE_CTL_ROTATE_0:
- plane_config->rotation = DRM_MODE_ROTATE_0;
- break;
- case PLANE_CTL_ROTATE_90:
- plane_config->rotation = DRM_MODE_ROTATE_270;
- break;
- case PLANE_CTL_ROTATE_180:
- plane_config->rotation = DRM_MODE_ROTATE_180;
- break;
- case PLANE_CTL_ROTATE_270:
- plane_config->rotation = DRM_MODE_ROTATE_90;
- break;
- }
-
- if (INTEL_GEN(dev_priv) >= 10 &&
- val & PLANE_CTL_FLIP_HORIZONTAL)
- plane_config->rotation |= DRM_MODE_REFLECT_X;
-
- /* 90/270 degree rotation would require extra work */
- if (drm_rotation_90_or_270(plane_config->rotation))
- goto error;
-
- base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
- plane_config->base = base;
-
- offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
-
- val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
- fb->height = ((val >> 16) & 0xffff) + 1;
- fb->width = ((val >> 0) & 0xffff) + 1;
-
- val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
- stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
- fb->pitches[0] = (val & 0x3ff) * stride_mult;
-
- aligned_height = intel_fb_align_height(fb, 0, fb->height);
-
- plane_config->size = fb->pitches[0] * aligned_height;
-
- drm_dbg_kms(&dev_priv->drm,
- "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- crtc->base.name, plane->base.name, fb->width, fb->height,
- fb->format->cpp[0] * 8, base, fb->pitches[0],
- plane_config->size);
-
- plane_config->fb = intel_fb;
- return;
-
-error:
- kfree(intel_fb);
-}
-
static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -8564,205 +6551,6 @@ out:
return ret;
}
-static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
- struct intel_crtc_state *pipe_config)
-{
- enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
- enum phy phy = intel_port_to_phy(dev_priv, port);
- struct icl_port_dpll *port_dpll;
- struct intel_shared_dpll *pll;
- enum intel_dpll_id id;
- bool pll_active;
- u32 clk_sel;
-
- clk_sel = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)) & DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
- id = DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy);
-
- if (WARN_ON(id > DPLL_ID_DG1_DPLL3))
- return;
-
- pll = intel_get_shared_dpll_by_id(dev_priv, id);
- port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
-
- port_dpll->pll = pll;
- pll_active = intel_dpll_get_hw_state(dev_priv, pll,
- &port_dpll->hw_state);
- drm_WARN_ON(&dev_priv->drm, !pll_active);
-
- icl_set_active_port_dpll(pipe_config, port_dpll_id);
-}
-
-static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
- struct intel_crtc_state *pipe_config)
-{
- enum phy phy = intel_port_to_phy(dev_priv, port);
- enum icl_port_dpll_id port_dpll_id;
- struct icl_port_dpll *port_dpll;
- struct intel_shared_dpll *pll;
- enum intel_dpll_id id;
- bool pll_active;
- u32 temp;
-
- if (intel_phy_is_combo(dev_priv, phy)) {
- u32 mask, shift;
-
- if (IS_ROCKETLAKE(dev_priv)) {
- mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
- shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
- } else {
- mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
- shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
- }
-
- temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & mask;
- id = temp >> shift;
- port_dpll_id = ICL_PORT_DPLL_DEFAULT;
- } else if (intel_phy_is_tc(dev_priv, phy)) {
- u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
-
- if (clk_sel == DDI_CLK_SEL_MG) {
- id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
- port));
- port_dpll_id = ICL_PORT_DPLL_MG_PHY;
- } else {
- drm_WARN_ON(&dev_priv->drm,
- clk_sel < DDI_CLK_SEL_TBT_162);
- id = DPLL_ID_ICL_TBTPLL;
- port_dpll_id = ICL_PORT_DPLL_DEFAULT;
- }
- } else {
- drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
- return;
- }
-
- pll = intel_get_shared_dpll_by_id(dev_priv, id);
- port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
-
- port_dpll->pll = pll;
- pll_active = intel_dpll_get_hw_state(dev_priv, pll,
- &port_dpll->hw_state);
- drm_WARN_ON(&dev_priv->drm, !pll_active);
-
- icl_set_active_port_dpll(pipe_config, port_dpll_id);
-}
-
-static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
- struct intel_crtc_state *pipe_config)
-{
- struct intel_shared_dpll *pll;
- enum intel_dpll_id id;
- bool pll_active;
- u32 temp;
-
- temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
-
- if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
- return;
-
- pll = intel_get_shared_dpll_by_id(dev_priv, id);
-
- pipe_config->shared_dpll = pll;
- pll_active = intel_dpll_get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state);
- drm_WARN_ON(&dev_priv->drm, !pll_active);
-}
-
-static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
- enum port port,
- struct intel_crtc_state *pipe_config)
-{
- struct intel_shared_dpll *pll;
- enum intel_dpll_id id;
- bool pll_active;
-
- switch (port) {
- case PORT_A:
- id = DPLL_ID_SKL_DPLL0;
- break;
- case PORT_B:
- id = DPLL_ID_SKL_DPLL1;
- break;
- case PORT_C:
- id = DPLL_ID_SKL_DPLL2;
- break;
- default:
- drm_err(&dev_priv->drm, "Incorrect port type\n");
- return;
- }
-
- pll = intel_get_shared_dpll_by_id(dev_priv, id);
-
- pipe_config->shared_dpll = pll;
- pll_active = intel_dpll_get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state);
- drm_WARN_ON(&dev_priv->drm, !pll_active);
-}
-
-static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
- struct intel_crtc_state *pipe_config)
-{
- struct intel_shared_dpll *pll;
- enum intel_dpll_id id;
- bool pll_active;
- u32 temp;
-
- temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
- id = temp >> (port * 3 + 1);
-
- if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
- return;
-
- pll = intel_get_shared_dpll_by_id(dev_priv, id);
-
- pipe_config->shared_dpll = pll;
- pll_active = intel_dpll_get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state);
- drm_WARN_ON(&dev_priv->drm, !pll_active);
-}
-
-static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
- struct intel_crtc_state *pipe_config)
-{
- struct intel_shared_dpll *pll;
- enum intel_dpll_id id;
- u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
- bool pll_active;
-
- switch (ddi_pll_sel) {
- case PORT_CLK_SEL_WRPLL1:
- id = DPLL_ID_WRPLL1;
- break;
- case PORT_CLK_SEL_WRPLL2:
- id = DPLL_ID_WRPLL2;
- break;
- case PORT_CLK_SEL_SPLL:
- id = DPLL_ID_SPLL;
- break;
- case PORT_CLK_SEL_LCPLL_810:
- id = DPLL_ID_LCPLL_810;
- break;
- case PORT_CLK_SEL_LCPLL_1350:
- id = DPLL_ID_LCPLL_1350;
- break;
- case PORT_CLK_SEL_LCPLL_2700:
- id = DPLL_ID_LCPLL_2700;
- break;
- default:
- MISSING_CASE(ddi_pll_sel);
- fallthrough;
- case PORT_CLK_SEL_NONE:
- return;
- }
-
- pll = intel_get_shared_dpll_by_id(dev_priv, id);
-
- pipe_config->shared_dpll = pll;
- pll_active = intel_dpll_get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state);
- drm_WARN_ON(&dev_priv->drm, !pll_active);
-}
-
static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
struct intel_display_power_domain_set *power_domain_set)
@@ -8919,19 +6707,6 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
}
- if (IS_DG1(dev_priv))
- dg1_get_ddi_pll(dev_priv, port, pipe_config);
- else if (INTEL_GEN(dev_priv) >= 11)
- icl_get_ddi_pll(dev_priv, port, pipe_config);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_get_ddi_pll(dev_priv, port, pipe_config);
- else if (IS_GEN9_LP(dev_priv))
- bxt_get_ddi_pll(dev_priv, port, pipe_config);
- else if (IS_GEN9_BC(dev_priv))
- skl_get_ddi_pll(dev_priv, port, pipe_config);
- else
- hsw_get_ddi_pll(dev_priv, port, pipe_config);
-
/*
* Haswell has only FDI/PCH transcoder A. It is which is connected to
* DDI E. So just check whether this pipe is wired to DDI E and whether
@@ -10022,19 +7797,27 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
- if (connector->base.state->crtc)
+ struct drm_connector_state *conn_state = connector->base.state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(connector->base.encoder);
+
+ if (conn_state->crtc)
drm_connector_put(&connector->base);
- if (connector->base.encoder) {
- connector->base.state->best_encoder =
- connector->base.encoder;
- connector->base.state->crtc =
- connector->base.encoder->crtc;
+ if (encoder) {
+ struct intel_crtc *crtc =
+ to_intel_crtc(encoder->base.crtc);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ conn_state->best_encoder = &encoder->base;
+ conn_state->crtc = &crtc->base;
+ conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
drm_connector_get(&connector->base);
} else {
- connector->base.state->best_encoder = NULL;
- connector->base.state->crtc = NULL;
+ conn_state->best_encoder = NULL;
+ conn_state->crtc = NULL;
}
}
drm_connector_list_iter_end(&conn_iter);
@@ -10295,6 +8078,11 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
pipe_config->bigjoiner_slave ? "slave" :
pipe_config->bigjoiner ? "master" : "no");
+ drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
+ enableddisabled(pipe_config->splitter.enable),
+ pipe_config->splitter.link_count,
+ pipe_config->splitter.pixel_overlap);
+
if (pipe_config->has_pch_encoder)
intel_dump_m_n_config(pipe_config, "fdi",
pipe_config->fdi_lanes,
@@ -11335,6 +9123,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(dsc.dsc_split);
PIPE_CONF_CHECK_I(dsc.compressed_bpp);
+ PIPE_CONF_CHECK_BOOL(splitter.enable);
+ PIPE_CONF_CHECK_I(splitter.link_count);
+ PIPE_CONF_CHECK_I(splitter.pixel_overlap);
+
PIPE_CONF_CHECK_I(mst_master_transcoder);
PIPE_CONF_CHECK_BOOL(vrr.enable);
@@ -11384,11 +9176,10 @@ static void verify_wm_state(struct intel_crtc *crtc,
struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
struct skl_pipe_wm wm;
} *hw;
- struct skl_pipe_wm *sw_wm;
- struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
+ int level, max_level = ilk_wm_max_level(dev_priv);
+ struct intel_plane *plane;
u8 hw_enabled_slices;
- const enum pipe pipe = crtc->pipe;
- int plane, level, max_level = ilk_wm_max_level(dev_priv);
if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
return;
@@ -11398,7 +9189,6 @@ static void verify_wm_state(struct intel_crtc *crtc,
return;
skl_pipe_wm_get_hw_state(crtc, &hw->wm);
- sw_wm = &new_crtc_state->wm.skl.optimal;
skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
@@ -11411,110 +9201,52 @@ static void verify_wm_state(struct intel_crtc *crtc,
dev_priv->dbuf.enabled_slices,
hw_enabled_slices);
- /* planes */
- for_each_universal_plane(dev_priv, pipe, plane) {
- struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
-
- hw_plane_wm = &hw->wm.planes[plane];
- sw_plane_wm = &sw_wm->planes[plane];
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ const struct skl_wm_level *hw_wm_level, *sw_wm_level;
/* Watermarks */
for (level = 0; level <= max_level; level++) {
- if (skl_wm_level_equals(&hw_plane_wm->wm[level],
- &sw_plane_wm->wm[level]) ||
- (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
- &sw_plane_wm->sagv_wm0)))
- continue;
+ hw_wm_level = &hw->wm.planes[plane->id].wm[level];
+ sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
- drm_err(&dev_priv->drm,
- "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe), plane + 1, level,
- sw_plane_wm->wm[level].plane_en,
- sw_plane_wm->wm[level].plane_res_b,
- sw_plane_wm->wm[level].plane_res_l,
- hw_plane_wm->wm[level].plane_en,
- hw_plane_wm->wm[level].plane_res_b,
- hw_plane_wm->wm[level].plane_res_l);
- }
-
- if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
- &sw_plane_wm->trans_wm)) {
- drm_err(&dev_priv->drm,
- "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe), plane + 1,
- sw_plane_wm->trans_wm.plane_en,
- sw_plane_wm->trans_wm.plane_res_b,
- sw_plane_wm->trans_wm.plane_res_l,
- hw_plane_wm->trans_wm.plane_en,
- hw_plane_wm->trans_wm.plane_res_b,
- hw_plane_wm->trans_wm.plane_res_l);
- }
-
- /* DDB */
- hw_ddb_entry = &hw->ddb_y[plane];
- sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
-
- if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&dev_priv->drm,
- "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe), plane + 1,
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
- }
- }
-
- /*
- * cursor
- * If the cursor plane isn't active, we may not have updated it's ddb
- * allocation. In that case since the ddb allocation will be updated
- * once the plane becomes visible, we can skip this check
- */
- if (1) {
- struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
-
- hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
- sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
-
- /* Watermarks */
- for (level = 0; level <= max_level; level++) {
- if (skl_wm_level_equals(&hw_plane_wm->wm[level],
- &sw_plane_wm->wm[level]) ||
- (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
- &sw_plane_wm->sagv_wm0)))
+ if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
continue;
drm_err(&dev_priv->drm,
- "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe), level,
- sw_plane_wm->wm[level].plane_en,
- sw_plane_wm->wm[level].plane_res_b,
- sw_plane_wm->wm[level].plane_res_l,
- hw_plane_wm->wm[level].plane_en,
- hw_plane_wm->wm[level].plane_res_b,
- hw_plane_wm->wm[level].plane_res_l);
+ "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name, level,
+ sw_wm_level->plane_en,
+ sw_wm_level->plane_res_b,
+ sw_wm_level->plane_res_l,
+ hw_wm_level->plane_en,
+ hw_wm_level->plane_res_b,
+ hw_wm_level->plane_res_l);
}
- if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
- &sw_plane_wm->trans_wm)) {
+ hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
+ sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
+
+ if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
drm_err(&dev_priv->drm,
- "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe),
- sw_plane_wm->trans_wm.plane_en,
- sw_plane_wm->trans_wm.plane_res_b,
- sw_plane_wm->trans_wm.plane_res_l,
- hw_plane_wm->trans_wm.plane_en,
- hw_plane_wm->trans_wm.plane_res_b,
- hw_plane_wm->trans_wm.plane_res_l);
+ "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->plane_en,
+ sw_wm_level->plane_res_b,
+ sw_wm_level->plane_res_l,
+ hw_wm_level->plane_en,
+ hw_wm_level->plane_res_b,
+ hw_wm_level->plane_res_l);
}
/* DDB */
- hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
- sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
+ hw_ddb_entry = &hw->ddb_y[plane->id];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
drm_err(&dev_priv->drm,
- "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe),
+ "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
+ plane->base.base.id, plane->base.name,
sw_ddb_entry->start, sw_ddb_entry->end,
hw_ddb_entry->start, hw_ddb_entry->end);
}
@@ -11689,7 +9421,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
struct intel_crtc_state *new_crtc_state)
{
struct intel_dpll_hw_state dpll_hw_state;
- unsigned int crtc_mask;
+ u8 pipe_mask;
bool active;
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
@@ -11702,34 +9434,34 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
I915_STATE_WARN(!pll->on && pll->active_mask,
"pll in active use but not on in sw tracking\n");
I915_STATE_WARN(pll->on && !pll->active_mask,
- "pll is on but not used by any active crtc\n");
+ "pll is on but not used by any active pipe\n");
I915_STATE_WARN(pll->on != active,
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
}
if (!crtc) {
- I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
- "more active pll users than references: %x vs %x\n",
- pll->active_mask, pll->state.crtc_mask);
+ I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
+ "more active pll users than references: 0x%x vs 0x%x\n",
+ pll->active_mask, pll->state.pipe_mask);
return;
}
- crtc_mask = drm_crtc_mask(&crtc->base);
+ pipe_mask = BIT(crtc->pipe);
if (new_crtc_state->hw.active)
- I915_STATE_WARN(!(pll->active_mask & crtc_mask),
- "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
+ I915_STATE_WARN(!(pll->active_mask & pipe_mask),
+ "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
pipe_name(crtc->pipe), pll->active_mask);
else
- I915_STATE_WARN(pll->active_mask & crtc_mask,
- "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
+ I915_STATE_WARN(pll->active_mask & pipe_mask,
+ "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
pipe_name(crtc->pipe), pll->active_mask);
- I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
- "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
- crtc_mask, pll->state.crtc_mask);
+ I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
+ "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
+ pipe_mask, pll->state.pipe_mask);
I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
&dpll_hw_state,
@@ -11749,15 +9481,15 @@ verify_shared_dpll_state(struct intel_crtc *crtc,
if (old_crtc_state->shared_dpll &&
old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
- unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+ u8 pipe_mask = BIT(crtc->pipe);
struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
- I915_STATE_WARN(pll->active_mask & crtc_mask,
- "pll active mismatch (didn't expect pipe %c in active mask)\n",
- pipe_name(crtc->pipe));
- I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
- "pll enabled crtcs mismatch (found %x in enabled mask)\n",
- pipe_name(crtc->pipe));
+ I915_STATE_WARN(pll->active_mask & pipe_mask,
+ "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
+ pipe_name(crtc->pipe), pll->active_mask);
+ I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
+ "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
+ pipe_name(crtc->pipe), pll->state.pipe_mask);
}
}
@@ -13910,7 +11642,13 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_TC1);
+ intel_ddi_init(dev_priv, PORT_TC2);
+ intel_ddi_init(dev_priv, PORT_TC3);
+ intel_ddi_init(dev_priv, PORT_TC4);
+ } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_TC1);
@@ -13966,8 +11704,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
/*
* Haswell uses DDI functions to detect digital outputs.
- * On SKL pre-D0 the strap isn't connected, so we assume
- * it's there.
+ * On SKL pre-D0 the strap isn't connected. Later SKUs may or
+ * may not have it - it was supposed to be fixed by the same
+ * time we stopped using straps. Assume it's there.
*/
found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
@@ -13976,7 +11715,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
* register */
- found = intel_de_read(dev_priv, SFUSE_STRAP);
+ if (HAS_PCH_TGP(dev_priv)) {
+ /* W/A due to lack of STRAP config on TGP PCH*/
+ found = (SFUSE_STRAP_DDIB_DETECTED |
+ SFUSE_STRAP_DDIC_DETECTED |
+ SFUSE_STRAP_DDID_DETECTED);
+ } else {
+ found = intel_de_read(dev_priv, SFUSE_STRAP);
+ }
if (found & SFUSE_STRAP_DDIB_DETECTED)
intel_ddi_init(dev_priv, PORT_B);
@@ -14132,8 +11878,6 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_dvo_init(dev_priv);
}
- intel_psr_init(dev_priv);
-
for_each_intel_encoder(&dev_priv->drm, encoder) {
encoder->base.possible_crtcs =
intel_encoder_possible_crtcs(encoder);
@@ -14503,6 +12247,7 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
void intel_init_display_hooks(struct drm_i915_private *dev_priv)
{
intel_init_cdclk_hooks(dev_priv);
+ intel_init_audio_hooks(dev_priv);
intel_dpll_init_clock_hook(dev_priv);
@@ -14938,6 +12683,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
intel_update_czclk(i915);
intel_modeset_init_hw(i915);
+ intel_dpll_update_ref_clks(i915);
intel_hdcp_component_init(i915);
@@ -15393,8 +13139,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
/* notify opregion of the sanitized encoder state */
intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_sanitize_encoder_pll_mapping(encoder);
+ if (HAS_DDI(dev_priv))
+ intel_ddi_sanitize_encoder_pll_mapping(encoder);
}
/* FIXME read out full plane state for all planes */
@@ -15474,8 +13220,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
readout_plane_state(dev_priv);
- intel_dpll_readout_hw_state(dev_priv);
-
for_each_intel_encoder(dev, encoder) {
pipe = 0;
@@ -15510,6 +13254,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe_name(pipe));
}
+ intel_dpll_readout_hw_state(dev_priv);
+
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->get_hw_state(connector)) {
@@ -15971,6 +13717,57 @@ void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
intel_bios_driver_remove(i915);
}
+void intel_display_driver_register(struct drm_i915_private *i915)
+{
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ intel_display_debugfs_register(i915);
+
+ /* Must be done after probing outputs */
+ intel_opregion_register(i915);
+ acpi_video_register();
+
+ intel_audio_init(i915);
+
+ /*
+ * Some ports require correctly set-up hpd registers for
+ * detection to work properly (leading to ghost connected
+ * connector status), e.g. VGA on gm45. Hence we can only set
+ * up the initial fbdev config after hpd irqs are fully
+ * enabled. We do it last so that the async config cannot run
+ * before the connectors are registered.
+ */
+ intel_fbdev_initial_config_async(&i915->drm);
+
+ /*
+ * We need to coordinate the hotplugs with the asynchronous
+ * fbdev configuration, for which we use the
+ * fbdev->async_cookie.
+ */
+ drm_kms_helper_poll_init(&i915->drm);
+}
+
+void intel_display_driver_unregister(struct drm_i915_private *i915)
+{
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ intel_fbdev_unregister(i915);
+ intel_audio_deinit(i915);
+
+ /*
+ * After flushing the fbdev (incl. a late async config which
+ * will have delayed queuing of a hotplug event), then flush
+ * the hotplug events.
+ */
+ drm_kms_helper_poll_fini(&i915->drm);
+ drm_atomic_helper_shutdown(&i915->drm);
+
+ acpi_video_unregister();
+ intel_opregion_unregister(i915);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
struct intel_display_error_state {
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 76f8a805b0a3..431770eeadb4 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -52,6 +52,7 @@ struct intel_crtc_state;
struct intel_digital_port;
struct intel_dp;
struct intel_encoder;
+struct intel_initial_plane_config;
struct intel_load_detect_pipe;
struct intel_plane;
struct intel_plane_state;
@@ -352,11 +353,6 @@ enum phy_fia {
for_each_cpu_transcoder(__dev_priv, __t) \
for_each_if ((__mask) & BIT(__t))
-#define for_each_universal_plane(__dev_priv, __pipe, __p) \
- for ((__p) = 0; \
- (__p) < RUNTIME_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
- (__p)++)
-
#define for_each_sprite(__dev_priv, __p, __s) \
for ((__s) = 0; \
(__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \
@@ -417,10 +413,19 @@ enum phy_fia {
for_each_if((encoder_mask) & \
drm_encoder_mask(&intel_encoder->base))
+#define for_each_intel_encoder_mask_with_psr(dev, intel_encoder, encoder_mask) \
+ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+ for_each_if(((encoder_mask) & drm_encoder_mask(&(intel_encoder)->base)) && \
+ intel_encoder_can_psr(intel_encoder))
+
#define for_each_intel_dp(dev, intel_encoder) \
for_each_intel_encoder(dev, intel_encoder) \
for_each_if(intel_encoder_is_dp(intel_encoder))
+#define for_each_intel_encoder_with_psr(dev, intel_encoder) \
+ for_each_intel_encoder((dev), (intel_encoder)) \
+ for_each_if(intel_encoder_can_psr(intel_encoder))
+
#define for_each_intel_connector_iter(intel_connector, iter) \
while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
@@ -507,8 +512,6 @@ void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
bool constant_n, bool fec_enable);
-bool is_ccs_modifier(u64 modifier);
-int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane);
void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
@@ -586,9 +589,6 @@ void intel_cleanup_plane_fb(struct drm_plane *plane,
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
- const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
bool intel_fuzzy_clock_check(int clock1, int clock2);
@@ -613,25 +613,8 @@ enum intel_display_power_domain
intel_legacy_aux_to_power_domain(enum aux_ch aux_ch);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
-
-u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
-void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
-u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set);
-void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
- int id, int set, enum drm_scaling_filter filter);
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
-u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
-u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
-u32 skl_plane_stride(const struct intel_plane_state *plane_state,
- int plane);
-int skl_check_plane_surface(struct intel_plane_state *plane_state);
-int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
- int *x, int *y, u32 *offset);
-int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
+
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
@@ -653,12 +636,21 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state);
+
unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
int color_plane);
+void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
+ const struct drm_framebuffer *fb,
+ int color_plane);
u32 intel_plane_adjust_aligned_offset(int *x, int *y,
const struct intel_plane_state *state,
int color_plane,
u32 old_offset, u32 new_offset);
+unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane);
+unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane);
+
+void intel_display_driver_register(struct drm_i915_private *i915);
+void intel_display_driver_unregister(struct drm_i915_private *i915);
/* modesetting */
void intel_modeset_init_hw(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index d62b18d5ecd8..0c5b7600d847 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -249,12 +249,11 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
"sink internal error",
};
struct drm_connector *connector = m->private;
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_dp *intel_dp =
intel_attached_dp(to_intel_connector(connector));
int ret;
- if (!CAN_PSR(dev_priv)) {
+ if (!CAN_PSR(intel_dp)) {
seq_puts(m, "PSR Unsupported\n");
return -ENODEV;
}
@@ -280,12 +279,13 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
static void
-psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
+psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
{
- u32 val, status_val;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const char *status = "unknown";
+ u32 val, status_val;
- if (dev_priv->psr.psr2_enabled) {
+ if (intel_dp->psr.psr2_enabled) {
static const char * const live_status[] = {
"IDLE",
"CAPTURE",
@@ -300,7 +300,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"TG_ON"
};
val = intel_de_read(dev_priv,
- EDP_PSR2_STATUS(dev_priv->psr.transcoder));
+ EDP_PSR2_STATUS(intel_dp->psr.transcoder));
status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
EDP_PSR2_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
@@ -317,7 +317,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"SRDENT_ON",
};
val = intel_de_read(dev_priv,
- EDP_PSR_STATUS(dev_priv->psr.transcoder));
+ EDP_PSR_STATUS(intel_dp->psr.transcoder));
status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
EDP_PSR_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
@@ -327,21 +327,18 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
}
-static int i915_edp_psr_status(struct seq_file *m, void *data)
+static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_psr *psr = &dev_priv->psr;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_psr *psr = &intel_dp->psr;
intel_wakeref_t wakeref;
const char *status;
bool enabled;
u32 val;
- if (!HAS_PSR(dev_priv))
- return -ENODEV;
-
seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
- if (psr->dp)
- seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
+ if (psr->sink_support)
+ seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
seq_puts(m, "\n");
if (!psr->sink_support)
@@ -365,16 +362,16 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
if (psr->psr2_enabled) {
val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ EDP_PSR2_CTL(intel_dp->psr.transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
val = intel_de_read(dev_priv,
- EDP_PSR_CTL(dev_priv->psr.transcoder));
+ EDP_PSR_CTL(intel_dp->psr.transcoder));
enabled = val & EDP_PSR_ENABLE;
}
seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
enableddisabled(enabled), val);
- psr_source_status(dev_priv, m);
+ psr_source_status(intel_dp, m);
seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
psr->busy_frontbuffer_bits);
@@ -383,7 +380,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
val = intel_de_read(dev_priv,
- EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
+ EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
val &= EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance counter: %u\n", val);
}
@@ -404,7 +401,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
*/
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
val = intel_de_read(dev_priv,
- PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
+ PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
su_frames_val[frame / 3] = val;
}
@@ -430,23 +427,50 @@ unlock:
return 0;
}
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_dp *intel_dp = NULL;
+ struct intel_encoder *encoder;
+
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
+
+ /* Find the first EDP which supports PSR */
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ intel_dp = enc_to_intel_dp(encoder);
+ break;
+ }
+
+ if (!intel_dp)
+ return -ENODEV;
+
+ return intel_psr_status(m, intel_dp);
+}
+
static int
i915_edp_psr_debug_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_encoder *encoder;
intel_wakeref_t wakeref;
- int ret;
+ int ret = -ENODEV;
- if (!CAN_PSR(dev_priv))
- return -ENODEV;
+ if (!HAS_PSR(dev_priv))
+ return ret;
- drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
- ret = intel_psr_debug_set(dev_priv, val);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ // TODO: split to each transcoder's PSR debug state
+ ret = intel_psr_debug_set(intel_dp, val);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ }
return ret;
}
@@ -455,12 +479,20 @@ static int
i915_edp_psr_debug_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_encoder *encoder;
- if (!CAN_PSR(dev_priv))
+ if (!HAS_PSR(dev_priv))
return -ENODEV;
- *val = READ_ONCE(dev_priv->psr.debug);
- return 0;
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ // TODO: split to each transcoder's PSR debug state
+ *val = READ_ONCE(intel_dp->psr.debug);
+ return 0;
+ }
+
+ return -ENODEV;
}
DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
@@ -1066,8 +1098,8 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
pll->info->id);
- seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
- pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
+ seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
+ pll->state.pipe_mask, pll->active_mask, yesno(pll->on));
seq_printf(m, " tracked hardware state:\n");
seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
seq_printf(m, " dpll_md: 0x%08x\n",
@@ -1233,9 +1265,6 @@ static void drrs_status_per_crtc(struct seq_file *m,
/* disable_drrs() will make drrs->dp NULL */
if (!drrs->dp) {
seq_puts(m, "Idleness DRRS: Disabled\n");
- if (dev_priv->psr.enabled)
- seq_puts(m,
- "\tAs PSR is enabled, DRRS is not enabled\n");
mutex_unlock(&drrs->mutex);
return;
}
@@ -2169,19 +2198,40 @@ DEFINE_SHOW_ATTRIBUTE(i915_panel);
static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
+ int ret;
- if (connector->status != connector_status_connected)
- return -ENODEV;
+ ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
+ if (!connector->encoder || connector->status != connector_status_connected) {
+ ret = -ENODEV;
+ goto out;
+ }
seq_printf(m, "%s:%d HDCP version: ", connector->name,
connector->base.id);
intel_hdcp_info(m, intel_connector);
- return 0;
+out:
+ drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+
+ return ret;
}
DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+static int i915_psr_status_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_dp *intel_dp =
+ intel_attached_dp(to_intel_connector(connector));
+
+ return intel_psr_status(m, intel_dp);
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
+
#define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
seq_puts(m, "LPSP: incapable\n"))
@@ -2357,6 +2407,12 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
connector, &i915_psr_sink_status_fops);
}
+ if (HAS_PSR(dev_priv) &&
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ debugfs_create_file("i915_psr_status", 0444, root,
+ connector, &i915_psr_status_fops);
+ }
+
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index c11c37c65d86..7e0eaa872350 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -2886,24 +2886,24 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUX_E) | \
- BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_AUX_G) | \
- BIT_ULL(POWER_DOMAIN_AUX_H) | \
- BIT_ULL(POWER_DOMAIN_AUX_I) | \
- BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_INIT))
@@ -2921,18 +2921,12 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_INIT))
-#define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
-#define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
-#define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
-#define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
-#define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
-#define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
+#define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
+#define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
+#define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
+#define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
+#define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5)
+#define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6)
#define TGL_AUX_A_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
@@ -2941,44 +2935,34 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
BIT_ULL(POWER_DOMAIN_AUX_B))
#define TGL_AUX_C_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_C))
-#define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_D))
-#define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_E))
-#define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_F))
-#define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_G))
-#define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_H))
-#define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_I))
-#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
-#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
-#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
-#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
-#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
-#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
+
+#define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1)
+#define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2)
+#define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3)
+#define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4)
+#define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5)
+#define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6)
+
+#define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1)
+#define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2)
+#define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3)
+#define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4)
+#define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5)
+#define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6)
#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUX_E) | \
- BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_AUX_G) | \
- BIT_ULL(POWER_DOMAIN_AUX_H) | \
- BIT_ULL(POWER_DOMAIN_AUX_I) | \
- BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
#define RKL_PW_4_POWER_DOMAINS ( \
@@ -2994,10 +2978,10 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUX_E) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
BIT_ULL(POWER_DOMAIN_INIT))
/*
@@ -4145,8 +4129,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
}
},
{
- .name = "DDI D TC1 IO",
- .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
+ .name = "DDI IO TC1",
+ .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4155,8 +4139,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI E TC2 IO",
- .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
+ .name = "DDI IO TC2",
+ .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4165,8 +4149,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI F TC3 IO",
- .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
+ .name = "DDI IO TC3",
+ .domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4175,8 +4159,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI G TC4 IO",
- .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
+ .name = "DDI IO TC4",
+ .domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4185,8 +4169,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI H TC5 IO",
- .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
+ .name = "DDI IO TC5",
+ .domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4195,8 +4179,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI I TC6 IO",
- .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
+ .name = "DDI IO TC6",
+ .domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4241,8 +4225,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX D TC1",
- .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
+ .name = "AUX USBC1",
+ .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4252,8 +4236,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX E TC2",
- .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
+ .name = "AUX USBC2",
+ .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4263,8 +4247,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX F TC3",
- .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
+ .name = "AUX USBC3",
+ .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4274,8 +4258,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX G TC4",
- .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
+ .name = "AUX USBC4",
+ .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4285,8 +4269,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX H TC5",
- .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
+ .name = "AUX USBC5",
+ .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4296,8 +4280,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX I TC6",
- .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
+ .name = "AUX USBC6",
+ .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4307,8 +4291,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX D TBT1",
- .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
+ .name = "AUX TBT1",
+ .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4318,8 +4302,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX E TBT2",
- .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
+ .name = "AUX TBT2",
+ .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4329,8 +4313,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX F TBT3",
- .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
+ .name = "AUX TBT3",
+ .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4340,8 +4324,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX G TBT4",
- .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
+ .name = "AUX TBT4",
+ .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4351,8 +4335,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX H TBT5",
- .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
+ .name = "AUX TBT5",
+ .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4362,8 +4346,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX I TBT6",
- .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
+ .name = "AUX TBT6",
+ .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4471,8 +4455,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = {
}
},
{
- .name = "DDI D TC1 IO",
- .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
+ .name = "DDI IO TC1",
+ .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4481,8 +4465,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = {
},
},
{
- .name = "DDI E TC2 IO",
- .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
+ .name = "DDI IO TC2",
+ .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4511,8 +4495,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = {
},
},
{
- .name = "AUX D TC1",
- .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
+ .name = "AUX USBC1",
+ .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4521,8 +4505,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = {
},
},
{
- .name = "AUX E TC2",
- .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
+ .name = "AUX USBC2",
+ .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4689,7 +4673,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_DG1(dev_priv)) {
+ if (IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) {
err = set_power_wells_mask(power_domains, tgl_power_wells,
BIT_ULL(TGL_DISP_PW_TC_COLD_OFF));
} else if (IS_ROCKETLAKE(dev_priv)) {
@@ -5317,17 +5301,25 @@ struct buddy_page_mask {
static const struct buddy_page_mask tgl_buddy_page_masks[] = {
{ .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
+ { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF },
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
+ { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
{ .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
+ { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E },
{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
+ { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
{}
};
static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
{ .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
+ { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 },
+ { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
{ .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
+ { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 },
+ { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
{}
};
@@ -5339,9 +5331,10 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
int config, i;
- if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) ||
- IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
- /* Wa_1409767108:tgl,dg1 */
+ if (IS_ALDERLAKE_S(dev_priv) ||
+ IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) ||
+ IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_B0))
+ /* Wa_1409767108:tgl,dg1,adl-s */
table = wa_1409767108_buddy_page_masks;
else
table = tgl_buddy_page_masks;
@@ -5379,7 +5372,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- /* Wa_14011294188:ehl,jsl,tgl,rkl */
+ /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP &&
INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index bc30c479be53..f3ca5d5c9778 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -41,6 +41,14 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_DDI_G_LANES,
POWER_DOMAIN_PORT_DDI_H_LANES,
POWER_DOMAIN_PORT_DDI_I_LANES,
+
+ POWER_DOMAIN_PORT_DDI_LANES_TC1 = POWER_DOMAIN_PORT_DDI_D_LANES, /* tgl+ */
+ POWER_DOMAIN_PORT_DDI_LANES_TC2,
+ POWER_DOMAIN_PORT_DDI_LANES_TC3,
+ POWER_DOMAIN_PORT_DDI_LANES_TC4,
+ POWER_DOMAIN_PORT_DDI_LANES_TC5,
+ POWER_DOMAIN_PORT_DDI_LANES_TC6,
+
POWER_DOMAIN_PORT_DDI_A_IO,
POWER_DOMAIN_PORT_DDI_B_IO,
POWER_DOMAIN_PORT_DDI_C_IO,
@@ -50,6 +58,14 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_DDI_G_IO,
POWER_DOMAIN_PORT_DDI_H_IO,
POWER_DOMAIN_PORT_DDI_I_IO,
+
+ POWER_DOMAIN_PORT_DDI_IO_TC1 = POWER_DOMAIN_PORT_DDI_D_IO, /* tgl+ */
+ POWER_DOMAIN_PORT_DDI_IO_TC2,
+ POWER_DOMAIN_PORT_DDI_IO_TC3,
+ POWER_DOMAIN_PORT_DDI_IO_TC4,
+ POWER_DOMAIN_PORT_DDI_IO_TC5,
+ POWER_DOMAIN_PORT_DDI_IO_TC6,
+
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@@ -64,6 +80,14 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_G,
POWER_DOMAIN_AUX_H,
POWER_DOMAIN_AUX_I,
+
+ POWER_DOMAIN_AUX_USBC1 = POWER_DOMAIN_AUX_D, /* tgl+ */
+ POWER_DOMAIN_AUX_USBC2,
+ POWER_DOMAIN_AUX_USBC3,
+ POWER_DOMAIN_AUX_USBC4,
+ POWER_DOMAIN_AUX_USBC5,
+ POWER_DOMAIN_AUX_USBC6,
+
POWER_DOMAIN_AUX_IO_A,
POWER_DOMAIN_AUX_C_TBT,
POWER_DOMAIN_AUX_D_TBT,
@@ -72,6 +96,14 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_G_TBT,
POWER_DOMAIN_AUX_H_TBT,
POWER_DOMAIN_AUX_I_TBT,
+
+ POWER_DOMAIN_AUX_TBT1 = POWER_DOMAIN_AUX_D_TBT, /* tgl+ */
+ POWER_DOMAIN_AUX_TBT2,
+ POWER_DOMAIN_AUX_TBT3,
+ POWER_DOMAIN_AUX_TBT4,
+ POWER_DOMAIN_AUX_TBT5,
+ POWER_DOMAIN_AUX_TBT6,
+
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 184ecbbcec99..f159dce0f744 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -37,6 +37,7 @@
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
@@ -219,6 +220,16 @@ struct intel_encoder {
* encoders have been disabled and suspended.
*/
void (*shutdown)(struct intel_encoder *encoder);
+ /*
+ * Enable/disable the clock to the port.
+ */
+ void (*enable_clock)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+ void (*disable_clock)(struct intel_encoder *encoder);
+ /*
+ * Returns whether the port clock is enabled or not.
+ */
+ bool (*is_clock_enabled)(struct intel_encoder *encoder);
enum hpd_pin hpd_pin;
enum intel_display_power_domain power_domain;
/* for communication with audio component; protected by av_mutex */
@@ -725,7 +736,10 @@ struct skl_plane_wm {
struct skl_wm_level wm[8];
struct skl_wm_level uv_wm[8];
struct skl_wm_level trans_wm;
- struct skl_wm_level sagv_wm0;
+ struct {
+ struct skl_wm_level wm0;
+ struct skl_wm_level trans_wm;
+ } sagv;
bool is_planar;
};
@@ -1159,6 +1173,13 @@ struct intel_crtc_state {
u8 pipeline_full;
u16 flipline, vmin, vmax;
} vrr;
+
+ /* Stream Splitter for eDP MSO */
+ struct {
+ bool enable;
+ u8 link_count;
+ u8 pixel_overlap;
+ } splitter;
};
enum intel_pipe_crc_source {
@@ -1414,6 +1435,44 @@ struct intel_pps {
struct edp_power_seq pps_delays;
};
+struct intel_psr {
+ /* Mutex for PSR state of the transcoder */
+ struct mutex lock;
+
+#define I915_PSR_DEBUG_MODE_MASK 0x0f
+#define I915_PSR_DEBUG_DEFAULT 0x00
+#define I915_PSR_DEBUG_DISABLE 0x01
+#define I915_PSR_DEBUG_ENABLE 0x02
+#define I915_PSR_DEBUG_FORCE_PSR1 0x03
+#define I915_PSR_DEBUG_ENABLE_SEL_FETCH 0x4
+#define I915_PSR_DEBUG_IRQ 0x10
+
+ u32 debug;
+ bool sink_support;
+ bool source_support;
+ bool enabled;
+ enum pipe pipe;
+ enum transcoder transcoder;
+ bool active;
+ struct work_struct work;
+ unsigned int busy_frontbuffer_bits;
+ bool sink_psr2_support;
+ bool link_standby;
+ bool colorimetry_support;
+ bool psr2_enabled;
+ bool psr2_sel_fetch_enabled;
+ u8 sink_sync_latency;
+ ktime_t last_entry_attempt;
+ ktime_t last_exit;
+ bool sink_not_reliable;
+ bool irq_aux_error;
+ u16 su_x_granularity;
+ bool dc3co_enabled;
+ u32 dc3co_exit_delay;
+ struct delayed_work dc3co_work;
+ struct drm_dp_vsc_sdp vsc;
+};
+
struct intel_dp {
i915_reg_t output_reg;
u32 DP;
@@ -1448,6 +1507,8 @@ struct intel_dp {
int max_link_lane_count;
/* Max rate for the current link */
int max_link_rate;
+ int mso_link_count;
+ int mso_pixel_overlap;
/* sink or branch descriptor */
struct drm_dp_desc desc;
struct drm_dp_aux aux;
@@ -1516,6 +1577,8 @@ struct intel_dp {
bool hobl_active;
struct intel_dp_pcon_frl frl;
+
+ struct intel_psr psr;
};
enum lspcon_vendor {
@@ -1752,6 +1815,17 @@ dp_to_i915(struct intel_dp *intel_dp)
return to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
}
+#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
+ (intel_dp)->psr.source_support)
+
+static inline bool intel_encoder_can_psr(struct intel_encoder *encoder)
+{
+ if (!intel_encoder_is_dp(encoder))
+ return false;
+
+ return CAN_PSR(enc_to_intel_dp(encoder));
+}
+
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
@@ -1893,4 +1967,39 @@ static inline u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
return dev_priv->fdi_pll_freq;
}
+static inline bool is_ccs_modifier(u64 modifier)
+{
+ return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+}
+
+static inline bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
+{
+ if (!is_ccs_modifier(fb->modifier))
+ return false;
+
+ return plane >= fb->format->num_planes / 2;
+}
+
+static inline bool is_gen12_ccs_modifier(u64 modifier)
+{
+ return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
+}
+
+static inline bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
+{
+ return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
+}
+
+static inline bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane)
+{
+ return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC &&
+ plane == 2;
+}
+
#endif /* __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 8c12d5375607..b6b5776f5a66 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -50,6 +50,7 @@
#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
+#include "intel_dpll.h"
#include "intel_dpio_phy.h"
#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
@@ -788,10 +789,10 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_H_ILLEGAL;
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
- if (mode->hdisplay > fixed_mode->hdisplay)
+ if (mode->hdisplay != fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
+ if (mode->vdisplay != fixed_mode->vdisplay)
return MODE_PANEL;
target_clock = fixed_mode->clock;
@@ -1663,12 +1664,10 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
const struct drm_connector_state *conn_state,
struct drm_dp_vsc_sdp *vsc)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
vsc->sdp_type = DP_SDP_VSC;
- if (dev_priv->psr.psr2_enabled) {
- if (dev_priv->psr.colorimetry_support &&
+ if (intel_dp->psr.psr2_enabled) {
+ if (intel_dp->psr.colorimetry_support &&
intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
/* [PSR2, +Colorimetry] */
intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
@@ -1724,6 +1723,7 @@ intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
{
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int pixel_clock;
if (pipe_config->vrr.enable)
return;
@@ -1742,10 +1742,18 @@ intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
return;
pipe_config->has_drrs = true;
- intel_link_compute_m_n(output_bpp, pipe_config->lane_count,
- intel_connector->panel.downclock_mode->clock,
+
+ pixel_clock = intel_connector->panel.downclock_mode->clock;
+ if (pipe_config->splitter.enable)
+ pixel_clock /= pipe_config->splitter.link_count;
+
+ intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
pipe_config->port_clock, &pipe_config->dp_m2_n2,
constant_n, pipe_config->fec_enable);
+
+ /* FIXME: abstract this better */
+ if (pipe_config->splitter.enable)
+ pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count;
}
int
@@ -1820,6 +1828,26 @@ intel_dp_compute_config(struct intel_encoder *encoder,
output_bpp = intel_dp_output_bpp(pipe_config->output_format,
pipe_config->pipe_bpp);
+ if (intel_dp->mso_link_count) {
+ int n = intel_dp->mso_link_count;
+ int overlap = intel_dp->mso_pixel_overlap;
+
+ pipe_config->splitter.enable = true;
+ pipe_config->splitter.link_count = n;
+ pipe_config->splitter.pixel_overlap = overlap;
+
+ drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n",
+ n, overlap);
+
+ adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap;
+ adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap;
+ adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap;
+ adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap;
+ adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap;
+ adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap;
+ adjusted_mode->crtc_clock /= n;
+ }
+
intel_link_compute_m_n(output_bpp,
pipe_config->lane_count,
adjusted_mode->crtc_clock,
@@ -1827,6 +1855,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
&pipe_config->dp_m_n,
constant_n, pipe_config->fec_enable);
+ /* FIXME: abstract this better */
+ if (pipe_config->splitter.enable)
+ pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count;
+
if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config);
@@ -2359,7 +2391,7 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
return false;
}
- if (CAN_PSR(i915) && intel_dp_is_edp(intel_dp)) {
+ if (CAN_PSR(intel_dp)) {
drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n");
crtc_state->uapi.mode_changed = true;
return false;
@@ -2650,7 +2682,7 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp)
if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
int ret, mode;
- drm_dbg(&dev_priv->drm, "Couldnt set FRL mode, continuing with TMDS mode\n");
+ drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n");
ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
@@ -3517,6 +3549,64 @@ static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
}
}
+static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ int n = intel_dp->mso_link_count;
+ int overlap = intel_dp->mso_pixel_overlap;
+
+ if (!mode || !n)
+ return;
+
+ mode->hdisplay = (mode->hdisplay - overlap) * n;
+ mode->hsync_start = (mode->hsync_start - overlap) * n;
+ mode->hsync_end = (mode->hsync_end - overlap) * n;
+ mode->htotal = (mode->htotal - overlap) * n;
+ mode->clock *= n;
+
+ drm_mode_set_name(mode);
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] using generated MSO mode: ",
+ connector->base.base.id, connector->base.name);
+ drm_mode_debug_printmodeline(mode);
+}
+
+static void intel_edp_mso_init(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 mso;
+
+ if (intel_dp->edp_dpcd[0] < DP_EDP_14)
+ return;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) {
+ drm_err(&i915->drm, "Failed to read MSO cap\n");
+ return;
+ }
+
+ /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */
+ mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK;
+ if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) {
+ drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso);
+ mso = 0;
+ }
+
+ if (mso) {
+ drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration\n",
+ mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso);
+ if (!HAS_MSO(i915)) {
+ drm_err(&i915->drm, "No source MSO support, disabling\n");
+ mso = 0;
+ }
+ }
+
+ intel_dp->mso_link_count = mso;
+ intel_dp->mso_pixel_overlap = 0; /* FIXME: read from DisplayID v2.0 */
+}
+
static bool
intel_edp_init_dpcd(struct intel_dp *intel_dp)
{
@@ -3600,6 +3690,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
*/
intel_edp_init_source_oui(intel_dp, true);
+ intel_edp_mso_init(intel_dp);
+
return true;
}
@@ -5548,19 +5640,18 @@ static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct edid *edid;
+ int num_modes = 0;
edid = intel_connector->detect_edid;
if (edid) {
- int ret = intel_connector_update_modes(connector, edid);
+ num_modes = intel_connector_update_modes(connector, edid);
if (intel_vrr_is_capable(connector))
drm_connector_set_vrr_capable_property(connector,
true);
- if (ret)
- return ret;
}
- /* if eDP has no EDID, fall back to fixed mode */
+ /* Also add fixed mode, which may or may not be present in EDID */
if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
intel_connector->panel.fixed_mode) {
struct drm_display_mode *mode;
@@ -5569,10 +5660,13 @@ static int intel_dp_get_modes(struct drm_connector *connector)
intel_connector->panel.fixed_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
- return 1;
+ num_modes++;
}
}
+ if (num_modes)
+ return num_modes;
+
if (!edid) {
struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
struct drm_display_mode *mode;
@@ -5582,11 +5676,11 @@ static int intel_dp_get_modes(struct drm_connector *connector)
intel_dp->downstream_ports);
if (mode) {
drm_mode_probed_add(connector, mode);
- return 1;
+ num_modes++;
}
}
- return 0;
+ return num_modes;
}
static int
@@ -6459,6 +6553,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (fixed_mode)
downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
+ /* multiply the mode clock and horizontal timings for MSO */
+ intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
+ intel_edp_mso_mode_fixup(intel_connector, downclock_mode);
+
/* fallback to VBT if available for eDP */
if (!fixed_mode)
fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
@@ -6641,6 +6739,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp->frl.is_trained = false;
intel_dp->frl.trained_rate_gbps = 0;
+ intel_psr_init(intel_dp);
+
return true;
fail:
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 4dba5bb15af5..40c516e90193 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -698,30 +698,6 @@ intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
return 0;
}
-static bool intel_dp_mst_get_qses_status(struct intel_digital_port *dig_port,
- struct intel_connector *connector)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct drm_dp_query_stream_enc_status_ack_reply reply;
- struct intel_dp *intel_dp = &dig_port->dp;
- int ret;
-
- ret = drm_dp_send_query_stream_enc_status(&intel_dp->mst_mgr,
- connector->port, &reply);
- if (ret) {
- drm_dbg_kms(&i915->drm,
- "[%s:%d] failed QSES ret=%d\n",
- connector->base.name, connector->base.base.id, ret);
- return false;
- }
-
- drm_dbg_kms(&i915->drm, "[%s:%d] QSES stream auth: %d stream enc: %d\n",
- connector->base.name, connector->base.base.id,
- reply.auth_completed, reply.encryption_enabled);
-
- return reply.auth_completed && reply.encryption_enabled;
-}
-
static int
intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
bool enable)
@@ -757,11 +733,6 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
return 0;
}
-/*
- * DP v2.0 I.3.3 ignore the stream signature L' in QSES reply msg reply.
- * I.3.5 MST source device may use a QSES msg to query downstream status
- * for a particular stream.
- */
static
int intel_dp_mst_hdcp2_check_link(struct intel_digital_port *dig_port,
struct intel_connector *connector)
@@ -781,7 +752,7 @@ int intel_dp_mst_hdcp2_check_link(struct intel_digital_port *dig_port,
return ret;
}
- return intel_dp_mst_get_qses_status(dig_port, connector) ? 0 : -EINVAL;
+ return 0;
}
static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index b4621ed0127e..906860ad8eb8 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -39,6 +39,7 @@
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
#include "intel_hdcp.h"
+#include "skl_scaler.h"
static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
@@ -590,7 +591,7 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *dig_port = intel_mst->primary;
- intel_ddi_get_config(&dig_port->base, pipe_config);
+ dig_port->base.get_config(&dig_port->base, pipe_config);
}
static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 7ba7f315aaee..166e9a3a8c09 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -3,11 +3,13 @@
* Copyright © 2020 Intel Corporation
*/
#include <linux/kernel.h>
+#include "intel_crtc.h"
#include "intel_display_types.h"
#include "intel_display.h"
#include "intel_dpll.h"
#include "intel_lvds.h"
#include "intel_panel.h"
+#include "intel_sideband.h"
struct intel_limit {
struct {
@@ -1361,3 +1363,510 @@ intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
else
dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
}
+
+static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
+{
+ if (IS_I830(dev_priv))
+ return false;
+
+ return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
+void i9xx_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ i915_reg_t reg = DPLL(crtc->pipe);
+ u32 dpll = crtc_state->dpll_hw_state.dpll;
+ int i;
+
+ assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
+
+ /* PLL is protected by panel, make sure we can write it */
+ if (i9xx_has_pps(dev_priv))
+ assert_panel_unlocked(dev_priv, crtc->pipe);
+
+ /*
+ * Apparently we need to have VGA mode enabled prior to changing
+ * the P1/P2 dividers. Otherwise the DPLL will keep using the old
+ * dividers, even though the register value does change.
+ */
+ intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
+ intel_de_write(dev_priv, reg, dpll);
+
+ /* Wait for the clocks to stabilize. */
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ if (INTEL_GEN(dev_priv) >= 4) {
+ intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
+ crtc_state->dpll_hw_state.dpll_md);
+ } else {
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ intel_de_write(dev_priv, reg, dpll);
+ }
+
+ /* We do this three times for luck */
+ for (i = 0; i < 3; i++) {
+ intel_de_write(dev_priv, reg, dpll);
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150); /* wait for warmup */
+ }
+}
+
+static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ u32 reg_val;
+
+ /*
+ * PLLB opamp always calibrates to max value of 0x3f, force enable it
+ * and set it to a reasonable value instead.
+ */
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+ reg_val &= 0xffffff00;
+ reg_val |= 0x00000030;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+ reg_val &= 0x00ffffff;
+ reg_val |= 0x8c000000;
+ vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+ reg_val &= 0xffffff00;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+ reg_val &= 0x00ffffff;
+ reg_val |= 0xb0000000;
+ vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+}
+
+static void _vlv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+ udelay(150);
+
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
+ drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
+}
+
+void vlv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
+
+ /* PLL is protected by panel, make sure we can write it */
+ assert_panel_unlocked(dev_priv, pipe);
+
+ if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
+ _vlv_enable_pll(crtc, pipe_config);
+
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ pipe_config->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
+}
+
+
+static void _chv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 tmp;
+
+ vlv_dpio_get(dev_priv);
+
+ /* Enable back the 10bit clock to display controller */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ tmp |= DPIO_DCLKP_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
+
+ vlv_dpio_put(dev_priv);
+
+ /*
+ * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
+ */
+ udelay(1);
+
+ /* Enable PLL */
+ intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+
+ /* Check PLL is locked */
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
+ drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
+}
+
+void chv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
+
+ /* PLL is protected by panel, make sure we can write it */
+ assert_panel_unlocked(dev_priv, pipe);
+
+ if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
+ _chv_enable_pll(crtc, pipe_config);
+
+ if (pipe != PIPE_A) {
+ /*
+ * WaPixelRepeatModeFixForC0:chv
+ *
+ * DPLLCMD is AWOL. Use chicken bits to propagate
+ * the value from DPLLBMD to either pipe B or C.
+ */
+ intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
+ intel_de_write(dev_priv, DPLL_MD(PIPE_B),
+ pipe_config->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, CBR4_VLV, 0);
+ dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
+
+ /*
+ * DPLLB VGA mode also seems to cause problems.
+ * We should always have it disabled.
+ */
+ drm_WARN_ON(&dev_priv->drm,
+ (intel_de_read(dev_priv, DPLL(PIPE_B)) &
+ DPLL_VGA_MODE_DIS) == 0);
+ } else {
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ pipe_config->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
+ }
+}
+
+void vlv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ u32 mdiv;
+ u32 bestn, bestm1, bestm2, bestp1, bestp2;
+ u32 coreclk, reg_val;
+
+ /* Enable Refclk */
+ intel_de_write(dev_priv, DPLL(pipe),
+ pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+
+ /* No need to actually set up the DPLL with DSI */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
+ vlv_dpio_get(dev_priv);
+
+ bestn = pipe_config->dpll.n;
+ bestm1 = pipe_config->dpll.m1;
+ bestm2 = pipe_config->dpll.m2;
+ bestp1 = pipe_config->dpll.p1;
+ bestp2 = pipe_config->dpll.p2;
+
+ /* See eDP HDMI DPIO driver vbios notes doc */
+
+ /* PLL B needs special handling */
+ if (pipe == PIPE_B)
+ vlv_pllb_recal_opamp(dev_priv, pipe);
+
+ /* Set up Tx target for periodic Rcomp update */
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
+
+ /* Disable target IRef on PLL */
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
+ reg_val &= 0x00ffffff;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
+
+ /* Disable fast lock */
+ vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
+
+ /* Set idtafcrecal before PLL is enabled */
+ mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
+ mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
+ mdiv |= ((bestn << DPIO_N_SHIFT));
+ mdiv |= (1 << DPIO_K_SHIFT);
+
+ /*
+ * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
+ * but we don't support that).
+ * Note: don't use the DAC post divider as it seems unstable.
+ */
+ mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+
+ mdiv |= DPIO_ENABLE_CALIBRATION;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+
+ /* Set HBR and RBR LPF coefficients */
+ if (pipe_config->port_clock == 162000 ||
+ intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
+ intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+ 0x009f0003);
+ else
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+ 0x00d0000f);
+
+ if (intel_crtc_has_dp_encoder(pipe_config)) {
+ /* Use SSC source */
+ if (pipe == PIPE_A)
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df40000);
+ else
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df70000);
+ } else { /* HDMI or VGA */
+ /* Use bend source */
+ if (pipe == PIPE_A)
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df70000);
+ else
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df40000);
+ }
+
+ coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
+ coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ coreclk |= 0x01000000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
+
+ vlv_dpio_put(dev_priv);
+}
+
+void chv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 loopfilter, tribuf_calcntr;
+ u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
+ u32 dpio_val;
+ int vco;
+
+ /* Enable Refclk and SSC */
+ intel_de_write(dev_priv, DPLL(pipe),
+ pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+
+ /* No need to actually set up the DPLL with DSI */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
+ bestn = pipe_config->dpll.n;
+ bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
+ bestm1 = pipe_config->dpll.m1;
+ bestm2 = pipe_config->dpll.m2 >> 22;
+ bestp1 = pipe_config->dpll.p1;
+ bestp2 = pipe_config->dpll.p2;
+ vco = pipe_config->dpll.vco;
+ dpio_val = 0;
+ loopfilter = 0;
+
+ vlv_dpio_get(dev_priv);
+
+ /* p1 and p2 divider */
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
+ 5 << DPIO_CHV_S1_DIV_SHIFT |
+ bestp1 << DPIO_CHV_P1_DIV_SHIFT |
+ bestp2 << DPIO_CHV_P2_DIV_SHIFT |
+ 1 << DPIO_CHV_K_DIV_SHIFT);
+
+ /* Feedback post-divider - m2 */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
+
+ /* Feedback refclk divider - n and m1 */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
+ DPIO_CHV_M1_DIV_BY_2 |
+ 1 << DPIO_CHV_N_DIV_SHIFT);
+
+ /* M2 fraction division */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+
+ /* M2 fraction division enable */
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+ dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
+ dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
+ if (bestm2_frac)
+ dpio_val |= DPIO_CHV_FRAC_DIV_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
+
+ /* Program digital lock detect threshold */
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
+ dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
+ DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
+ dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
+ if (!bestm2_frac)
+ dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
+
+ /* Loop filter */
+ if (vco == 5400000) {
+ loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x9;
+ } else if (vco <= 6200000) {
+ loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x9;
+ } else if (vco <= 6480000) {
+ loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x8;
+ } else {
+ /* Not supported. Apply the same limits as in the max case */
+ loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0;
+ }
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
+
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
+ dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
+ dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
+
+ /* AFC Recal */
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
+ vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
+ DPIO_AFC_RECAL);
+
+ vlv_dpio_put(dev_priv);
+}
+
+/**
+ * vlv_force_pll_on - forcibly enable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ * @dpll: PLL configuration
+ *
+ * Enable the PLL for @pipe using the supplied @dpll config. To be used
+ * in cases where we need the PLL enabled even when @pipe is not going to
+ * be enabled.
+ */
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+ const struct dpll *dpll)
+{
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc_state *pipe_config;
+
+ pipe_config = intel_crtc_state_alloc(crtc);
+ if (!pipe_config)
+ return -ENOMEM;
+
+ pipe_config->cpu_transcoder = (enum transcoder)pipe;
+ pipe_config->pixel_multiplier = 1;
+ pipe_config->dpll = *dpll;
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ chv_compute_dpll(crtc, pipe_config);
+ chv_prepare_pll(crtc, pipe_config);
+ chv_enable_pll(crtc, pipe_config);
+ } else {
+ vlv_compute_dpll(crtc, pipe_config);
+ vlv_prepare_pll(crtc, pipe_config);
+ vlv_enable_pll(crtc, pipe_config);
+ }
+
+ kfree(pipe_config);
+
+ return 0;
+}
+
+void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ u32 val;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
+
+ val = DPLL_INTEGRATED_REF_CLK_VLV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ intel_de_write(dev_priv, DPLL(pipe), val);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+}
+
+void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 val;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
+
+ val = DPLL_SSC_REF_CLK_CHV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ intel_de_write(dev_priv, DPLL(pipe), val);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+
+ vlv_dpio_get(dev_priv);
+
+ /* Disable 10bit clock to display controller */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ val &= ~DPIO_DCLKP_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+
+ vlv_dpio_put(dev_priv);
+}
+
+void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /* Don't disable pipe or pipe PLLs if needed */
+ if (IS_I830(dev_priv))
+ return;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
+
+ intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+}
+
+
+/**
+ * vlv_force_pll_off - forcibly disable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe. To be used in cases where we need
+ * the PLL enabled even when @pipe is not going to be enabled.
+ */
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_disable_pll(dev_priv, pipe);
+ else
+ vlv_disable_pll(dev_priv, pipe);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index caf4615092e1..7ff4b0d29ed1 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -10,6 +10,7 @@ struct dpll;
struct drm_i915_private;
struct intel_crtc;
struct intel_crtc_state;
+enum pipe;
void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
int vlv_calc_dpll_params(int refclk, struct dpll *clock);
@@ -20,4 +21,21 @@ void vlv_compute_dpll(struct intel_crtc *crtc,
void chv_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+ const struct dpll *dpll);
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
+void i9xx_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
+void vlv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config);
+void chv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config);
+void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void i9xx_disable_pll(const struct intel_crtc_state *crtc_state);
+void vlv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config);
+void chv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index f6ad257a260e..22ee8e13b518 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -176,7 +176,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
return;
mutex_lock(&dev_priv->dpll.lock);
- drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
+ drm_WARN_ON(&dev_priv->drm, !pll->state.pipe_mask);
if (!pll->active_mask) {
drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
drm_WARN_ON(&dev_priv->drm, pll->on);
@@ -198,7 +198,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+ unsigned int pipe_mask = BIT(crtc->pipe);
unsigned int old_mask;
if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
@@ -207,16 +207,16 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
mutex_lock(&dev_priv->dpll.lock);
old_mask = pll->active_mask;
- if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
- drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
+ if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
+ drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
goto out;
- pll->active_mask |= crtc_mask;
+ pll->active_mask |= pipe_mask;
drm_dbg_kms(&dev_priv->drm,
- "enable %s (active %x, on? %d) for crtc %d\n",
+ "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
pll->info->name, pll->active_mask, pll->on,
- crtc->base.base.id);
+ crtc->base.base.id, crtc->base.name);
if (old_mask) {
drm_WARN_ON(&dev_priv->drm, !pll->on);
@@ -244,7 +244,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+ unsigned int pipe_mask = BIT(crtc->pipe);
/* PCH only available on ILK+ */
if (INTEL_GEN(dev_priv) < 5)
@@ -254,18 +254,20 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
return;
mutex_lock(&dev_priv->dpll.lock);
- if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
+ if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
+ "%s not used by [CRTC:%d:%s]\n", pll->info->name,
+ crtc->base.base.id, crtc->base.name))
goto out;
drm_dbg_kms(&dev_priv->drm,
- "disable %s (active %x, on? %d) for crtc %d\n",
+ "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
pll->info->name, pll->active_mask, pll->on,
- crtc->base.base.id);
+ crtc->base.base.id, crtc->base.name);
assert_shared_dpll_enabled(dev_priv, pll);
drm_WARN_ON(&dev_priv->drm, !pll->on);
- pll->active_mask &= ~crtc_mask;
+ pll->active_mask &= ~pipe_mask;
if (pll->active_mask)
goto out;
@@ -296,7 +298,7 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
pll = &dev_priv->dpll.shared_dplls[i];
/* Only want to check enabled timings first */
- if (shared_dpll[i].crtc_mask == 0) {
+ if (shared_dpll[i].pipe_mask == 0) {
if (!unused_pll)
unused_pll = pll;
continue;
@@ -306,10 +308,10 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
&shared_dpll[i].hw_state,
sizeof(*pll_state)) == 0) {
drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
+ "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
crtc->base.base.id, crtc->base.name,
pll->info->name,
- shared_dpll[i].crtc_mask,
+ shared_dpll[i].pipe_mask,
pll->active_mask);
return pll;
}
@@ -338,13 +340,13 @@ intel_reference_shared_dpll(struct intel_atomic_state *state,
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
- if (shared_dpll[id].crtc_mask == 0)
+ if (shared_dpll[id].pipe_mask == 0)
shared_dpll[id].hw_state = *pll_state;
drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
pipe_name(crtc->pipe));
- shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
+ shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
}
static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
@@ -354,7 +356,7 @@ static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
struct intel_shared_dpll_state *shared_dpll;
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
- shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
+ shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
}
static void intel_put_dpll(struct intel_atomic_state *state,
@@ -3559,7 +3561,13 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
- if (IS_DG1(dev_priv)) {
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ dpll_mask =
+ BIT(DPLL_ID_DG1_DPLL3) |
+ BIT(DPLL_ID_DG1_DPLL2) |
+ BIT(DPLL_ID_ICL_DPLL1) |
+ BIT(DPLL_ID_ICL_DPLL0);
+ } else if (IS_DG1(dev_priv)) {
if (port == PORT_D || port == PORT_E) {
dpll_mask =
BIT(DPLL_ID_DG1_DPLL2) |
@@ -3865,7 +3873,10 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!(val & PLL_ENABLE))
goto out;
- if (IS_DG1(dev_priv)) {
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
+ } else if (IS_DG1(dev_priv)) {
hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
} else if (IS_ROCKETLAKE(dev_priv)) {
@@ -3921,7 +3932,10 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
i915_reg_t cfgcr0_reg, cfgcr1_reg;
- if (IS_DG1(dev_priv)) {
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
+ cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
+ } else if (IS_DG1(dev_priv)) {
cfgcr0_reg = DG1_DPLL_CFGCR0(id);
cfgcr1_reg = DG1_DPLL_CFGCR1(id);
} else if (IS_ROCKETLAKE(dev_priv)) {
@@ -4384,6 +4398,22 @@ static const struct intel_dpll_mgr dg1_pll_mgr = {
.dump_hw_state = icl_dump_hw_state,
};
+static const struct dpll_info adls_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
+ { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr adls_pll_mgr = {
+ .dpll_info = adls_plls,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_ref_clks = icl_update_dpll_ref_clks,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
/**
* intel_shared_dpll_init - Initialize shared DPLLs
* @dev: drm device
@@ -4397,7 +4427,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_DG1(dev_priv))
+ if (IS_ALDERLAKE_S(dev_priv))
+ dpll_mgr = &adls_pll_mgr;
+ else if (IS_DG1(dev_priv))
dpll_mgr = &dg1_pll_mgr;
else if (IS_ROCKETLAKE(dev_priv))
dpll_mgr = &rkl_pll_mgr;
@@ -4567,27 +4599,30 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
POWER_DOMAIN_DPLL_DC_OFF);
}
- pll->state.crtc_mask = 0;
+ pll->state.pipe_mask = 0;
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
- pll->state.crtc_mask |= 1 << crtc->pipe;
+ pll->state.pipe_mask |= BIT(crtc->pipe);
}
- pll->active_mask = pll->state.crtc_mask;
+ pll->active_mask = pll->state.pipe_mask;
drm_dbg_kms(&i915->drm,
- "%s hw state readout: crtc_mask 0x%08x, on %i\n",
- pll->info->name, pll->state.crtc_mask, pll->on);
+ "%s hw state readout: pipe_mask 0x%x, on %i\n",
+ pll->info->name, pll->state.pipe_mask, pll->on);
}
-void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
+void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
{
- int i;
-
if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
i915->dpll.mgr->update_ref_clks(i915);
+}
+
+void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
+{
+ int i;
for (i = 0; i < i915->dpll.num_shared_dpll; i++)
readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 2eb7618ef957..7fd031a70cfd 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -241,9 +241,9 @@ struct intel_dpll_hw_state {
*/
struct intel_shared_dpll_state {
/**
- * @crtc_mask: mask of CRTC using this DPLL, active or not
+ * @pipe_mask: mask of pipes using this DPLL, active or not
*/
- unsigned crtc_mask;
+ u8 pipe_mask;
/**
* @hw_state: hardware configuration for the DPLL stored in
@@ -351,9 +351,9 @@ struct intel_shared_dpll {
struct intel_shared_dpll_state state;
/**
- * @active_mask: mask of active CRTCs (i.e. DPMS on) using this DPLL
+ * @active_mask: mask of active pipes (i.e. DPMS on) using this DPLL
*/
- unsigned active_mask;
+ u8 active_mask;
/**
* @on: is the PLL actually active? Disabled during modeset
@@ -410,6 +410,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
+void intel_dpll_update_ref_clks(struct drm_i915_private *dev_priv);
void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv);
void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index eed037ec0b29..e349caef1926 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -425,7 +425,7 @@ static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
const u16 slave_addr)
{
struct drm_device *drm_dev = intel_dsi->base.base.dev;
- struct device *dev = &drm_dev->pdev->dev;
+ struct device *dev = drm_dev->dev;
struct acpi_device *acpi_dev;
struct list_head resource_list;
struct i2c_adapter_lookup lookup;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 84f853f113b9..07db8e83f98e 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -167,7 +167,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
const struct i915_ggtt_view view = {
.type = I915_GGTT_VIEW_NORMAL,
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index b2eb96ae10a2..60b29110099a 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -3,6 +3,8 @@
* Copyright © 2020 Intel Corporation
*/
#include "intel_atomic.h"
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
@@ -550,6 +552,142 @@ train_done:
drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
}
+/* Starting with Haswell, different DDI ports can work in FDI mode for
+ * connection to the PCH-located connectors. For this, it is necessary to train
+ * both the DDI port and PCH receiver for the desired DDI buffer settings.
+ *
+ * The recommended port to work in FDI mode is DDI E, which we use here. Also,
+ * please note that when FDI mode is active on DDI E, it shares 2 lines with
+ * DDI A (which is used for eDP)
+ */
+void hsw_fdi_link_train(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 temp, i, rx_ctl_val;
+ int n_entries;
+
+ intel_ddi_get_buf_trans_fdi(dev_priv, &n_entries);
+
+ intel_prepare_dp_ddi_buffers(encoder, crtc_state);
+
+ /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
+ * mode set "sequence for CRT port" document:
+ * - TP1 to TP2 time with the default value
+ * - FDI delay to 90h
+ *
+ * WaFDIAutoLinkSetTimingOverrride:hsw
+ */
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ /* Enable the PCH Receiver FDI PLL */
+ rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+ FDI_RX_PLL_ENABLE |
+ FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ udelay(220);
+
+ /* Switch from Rawclk to PCDclk */
+ rx_ctl_val |= FDI_PCDCLK;
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+
+ /* Configure Port Clock Select */
+ drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
+ intel_ddi_enable_clock(encoder, crtc_state);
+
+ /* Start the training iterating through available voltages and emphasis,
+ * testing each value twice. */
+ for (i = 0; i < n_entries * 2; i++) {
+ /* Configure DP_TP_CTL with auto-training */
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_LINK_TRAIN_PAT1 |
+ DP_TP_CTL_ENABLE);
+
+ /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
+ * DDI E does not support port reversal, the functionality is
+ * achieved on the PCH side in FDI_RX_CTL, so no need to set the
+ * port reversal bit */
+ intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
+ DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
+
+ udelay(600);
+
+ /* Program PCH FDI Receiver TU */
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
+
+ /* Enable PCH FDI Receiver with auto-training */
+ rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
+
+ /* Wait for FDI receiver lane calibration */
+ udelay(30);
+
+ /* Unset FDI_RX_MISC pwrdn lanes */
+ temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
+
+ /* Wait for FDI auto training time */
+ udelay(5);
+
+ temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
+ if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI link training done on step %d\n", i);
+ break;
+ }
+
+ /*
+ * Leave things enabled even if we failed to train FDI.
+ * Results in less fireworks from the state checker.
+ */
+ if (i == n_entries * 2 - 1) {
+ drm_err(&dev_priv->drm, "FDI link training failed!\n");
+ break;
+ }
+
+ rx_ctl_val &= ~FDI_RX_ENABLE;
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
+
+ temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
+ temp &= ~DDI_BUF_CTL_ENABLE;
+ intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
+
+ /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
+ temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
+ temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
+ intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
+
+ intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+ /* Reset FDI_RX_MISC pwrdn lanes */
+ temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
+ }
+
+ /* Enable normal pixel sending for FDI */
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
+}
+
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
index a9cd21663eb8..af01d2c173a8 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.h
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -9,6 +9,7 @@
struct drm_i915_private;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_encoder;
#define I915_DISPLAY_CONFIG_RETRY 1
int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
@@ -18,5 +19,7 @@ void ilk_fdi_disable(struct intel_crtc *crtc);
void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc);
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state);
void intel_fdi_init_hook(struct drm_i915_private *dev_priv);
+void hsw_fdi_link_train(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 7b38eee9980f..6fc6965b6133 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -224,6 +224,8 @@ static void frontbuffer_release(struct kref *ref)
struct drm_i915_gem_object *obj = front->obj;
struct i915_vma *vma;
+ drm_WARN_ON(obj->base.dev, atomic_read(&front->bits));
+
spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
i915_vma_clear_scanout(vma);
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index b0d71bbbf2ad..0c952e1d720e 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -840,7 +840,7 @@ static const struct i2c_lock_operations gmbus_lock_ops = {
*/
int intel_gmbus_setup(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_gmbus *bus;
unsigned int pin;
int ret;
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 95919d325b0b..7f384f259fc8 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -2233,6 +2233,16 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_OK;
}
+static int intel_hdmi_port_clock(int clock, int bpc)
+{
+ /*
+ * Need to adjust the port link by:
+ * 1.5x for 12bpc
+ * 1.25x for 10bpc
+ */
+ return clock * bpc / 8;
+}
+
static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -2264,17 +2274,18 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
clock /= 2;
/* check if we can do 8bpc */
- status = hdmi_port_clock_valid(hdmi, clock, true, has_hdmi_sink);
+ status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 8),
+ true, has_hdmi_sink);
if (has_hdmi_sink) {
/* if we can't do 8bpc we may still be able to do 12bpc */
if (status != MODE_OK && !HAS_GMCH(dev_priv))
- status = hdmi_port_clock_valid(hdmi, clock * 3 / 2,
+ status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 12),
true, has_hdmi_sink);
/* if we can't do 8,12bpc we may still be able to do 10bpc */
if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11)
- status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
+ status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 10),
true, has_hdmi_sink);
}
if (status != MODE_OK)
@@ -2382,16 +2393,6 @@ intel_hdmi_ycbcr420_config(struct intel_crtc_state *crtc_state,
return intel_pch_panel_fitting(crtc_state, conn_state);
}
-static int intel_hdmi_port_clock(int clock, int bpc)
-{
- /*
- * Need to adjust the port link by:
- * 1.5x for 12bpc
- * 1.25x for 10bpc
- */
- return clock * bpc / 8;
-}
-
static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int clock)
@@ -3137,11 +3138,45 @@ static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return GMBUS_PIN_1_BXT + phy;
}
+static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port port)
+{
+ enum phy phy = intel_port_to_phy(i915, port);
+
+ drm_WARN_ON(&i915->drm, port == PORT_A);
+
+ /*
+ * Pin mapping for GEN9 BC depends on which PCH is present. With TGP,
+ * final two outputs use type-c pins, even though they're actually
+ * combo outputs. With CMP, the traditional DDI A-D pins are used for
+ * all outputs.
+ */
+ if (INTEL_PCH_TYPE(i915) >= PCH_TGP && phy >= PHY_C)
+ return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
+
+ return GMBUS_PIN_1_BXT + phy;
+}
+
static u8 dg1_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
{
return intel_port_to_phy(dev_priv, port) + 1;
}
+static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+
+ WARN_ON(port == PORT_B || port == PORT_C);
+
+ /*
+ * Pin mapping for ADL-S requires TC pins for all combo phy outputs
+ * except first combo output.
+ */
+ if (phy == PHY_A)
+ return GMBUS_PIN_1_BXT;
+
+ return GMBUS_PIN_9_TC1_ICP + phy - PHY_B;
+}
+
static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -3179,10 +3214,14 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
return ddc_pin;
}
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ if (HAS_PCH_ADP(dev_priv))
+ ddc_pin = adls_port_to_ddc_pin(dev_priv, port);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
ddc_pin = dg1_port_to_ddc_pin(dev_priv, port);
else if (IS_ROCKETLAKE(dev_priv))
ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
+ else if (IS_GEN9_BC(dev_priv) && HAS_PCH_TGP(dev_priv))
+ ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_MCC(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 1c939f9c9bc9..7f3c638c8950 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -80,6 +80,7 @@ static struct platform_device *
lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct platform_device_info pinfo = {};
struct resource *rsc;
struct platform_device *platdev;
@@ -99,9 +100,9 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
rsc[0].flags = IORESOURCE_IRQ;
rsc[0].name = "hdmi-lpe-audio-irq";
- rsc[1].start = pci_resource_start(dev->pdev, 0) +
+ rsc[1].start = pci_resource_start(pdev, 0) +
I915_HDMI_LPE_AUDIO_BASE;
- rsc[1].end = pci_resource_start(dev->pdev, 0) +
+ rsc[1].end = pci_resource_start(pdev, 0) +
I915_HDMI_LPE_AUDIO_BASE + I915_HDMI_LPE_AUDIO_SIZE - 1;
rsc[1].flags = IORESOURCE_MEM;
rsc[1].name = "hdmi-lpe-audio-mmio";
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 4f77cf849171..dfd724e506b5 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -247,7 +247,7 @@ static int swsci(struct drm_i915_private *dev_priv,
u32 function, u32 parm, u32 *parm_out)
{
struct opregion_swsci *swsci = dev_priv->opregion.swsci;
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 main_function, sub_function, scic;
u16 swsci_val;
u32 dslp;
@@ -807,7 +807,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
if (!name || !*name)
return -ENOENT;
- ret = request_firmware(&fw, name, &dev_priv->drm.pdev->dev);
+ ret = request_firmware(&fw, name, dev_priv->drm.dev);
if (ret) {
drm_err(&dev_priv->drm,
"Requesting VBT firmware \"%s\" failed (%d)\n",
@@ -840,7 +840,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
int intel_opregion_setup(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index f455040fa989..ef8f44f5e751 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -203,7 +203,7 @@ struct intel_overlay {
static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
bool enable)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u8 val;
/* WA_OVERLAY_CLKGATE:alm */
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 5fdf52643150..4653b5ef382f 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -596,7 +596,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unuse
if (panel->backlight.combination_mode) {
u8 lbpc;
- pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc);
+ pci_read_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, &lbpc);
val *= lbpc;
}
@@ -664,7 +664,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
level /= lbpc;
- pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
+ pci_write_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, lbpc);
}
if (IS_GEN(dev_priv, 4)) {
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index c4867a8020a5..f20ba71f4307 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_dpll.h"
#include "intel_pps.h"
static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 850cb7f5b332..cd434285e3b7 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -32,6 +32,7 @@
#include "intel_hdmi.h"
#include "intel_psr.h"
#include "intel_sprite.h"
+#include "skl_universal_plane.h"
/**
* DOC: Panel Self Refresh (PSR/SRD)
@@ -80,9 +81,11 @@
* use page flips.
*/
-static bool psr_global_enabled(struct drm_i915_private *i915)
+static bool psr_global_enabled(struct intel_dp *intel_dp)
{
- switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
return i915->params.enable_psr;
case I915_PSR_DEBUG_DISABLE:
@@ -92,9 +95,9 @@ static bool psr_global_enabled(struct drm_i915_private *i915)
}
}
-static bool psr2_global_enabled(struct drm_i915_private *dev_priv)
+static bool psr2_global_enabled(struct intel_dp *intel_dp)
{
- switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+ switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
@@ -103,11 +106,12 @@ static bool psr2_global_enabled(struct drm_i915_private *dev_priv)
}
}
-static void psr_irq_control(struct drm_i915_private *dev_priv)
+static void psr_irq_control(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder trans_shift;
- u32 mask, val;
i915_reg_t imr_reg;
+ u32 mask, val;
/*
* gen12+ has registers relative to transcoder and one per transcoder
@@ -116,14 +120,14 @@ static void psr_irq_control(struct drm_i915_private *dev_priv)
*/
if (INTEL_GEN(dev_priv) >= 12) {
trans_shift = 0;
- imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+ imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
} else {
- trans_shift = dev_priv->psr.transcoder;
+ trans_shift = intel_dp->psr.transcoder;
imr_reg = EDP_PSR_IMR;
}
mask = EDP_PSR_ERROR(trans_shift);
- if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ)
+ if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
mask |= EDP_PSR_POST_EXIT(trans_shift) |
EDP_PSR_PRE_ENTRY(trans_shift);
@@ -172,30 +176,31 @@ static void psr_event_print(struct drm_i915_private *i915,
drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
}
-void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
+void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
{
- enum transcoder cpu_transcoder = dev_priv->psr.transcoder;
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ ktime_t time_ns = ktime_get();
enum transcoder trans_shift;
i915_reg_t imr_reg;
- ktime_t time_ns = ktime_get();
if (INTEL_GEN(dev_priv) >= 12) {
trans_shift = 0;
- imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+ imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
} else {
- trans_shift = dev_priv->psr.transcoder;
+ trans_shift = intel_dp->psr.transcoder;
imr_reg = EDP_PSR_IMR;
}
if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
- dev_priv->psr.last_entry_attempt = time_ns;
+ intel_dp->psr.last_entry_attempt = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
- dev_priv->psr.last_exit = time_ns;
+ intel_dp->psr.last_exit = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR exit completed\n",
transcoder_name(cpu_transcoder));
@@ -203,7 +208,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
if (INTEL_GEN(dev_priv) >= 9) {
u32 val = intel_de_read(dev_priv,
PSR_EVENT(cpu_transcoder));
- bool psr2_enabled = dev_priv->psr.psr2_enabled;
+ bool psr2_enabled = intel_dp->psr.psr2_enabled;
intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
val);
@@ -217,7 +222,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
transcoder_name(cpu_transcoder));
- dev_priv->psr.irq_aux_error = true;
+ intel_dp->psr.irq_aux_error = true;
/*
* If this interruption is not masked it will keep
@@ -231,7 +236,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
val |= EDP_PSR_ERROR(trans_shift);
intel_de_write(dev_priv, imr_reg, val);
- schedule_work(&dev_priv->psr.work);
+ schedule_work(&intel_dp->psr.work);
}
}
@@ -292,12 +297,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv =
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
- if (dev_priv->psr.dp) {
- drm_warn(&dev_priv->drm,
- "More than one eDP panel found, PSR support should be extended\n");
- return;
- }
-
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
@@ -318,12 +317,10 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
return;
}
- dev_priv->psr.sink_support = true;
- dev_priv->psr.sink_sync_latency =
+ intel_dp->psr.sink_support = true;
+ intel_dp->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
- dev_priv->psr.dp = intel_dp;
-
if (INTEL_GEN(dev_priv) >= 9 &&
(intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
bool y_req = intel_dp->psr_dpcd[1] &
@@ -341,14 +338,14 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
* Y-coordinate requirement panels we would need to enable
* GTC first.
*/
- dev_priv->psr.sink_psr2_support = y_req && alpm;
+ intel_dp->psr.sink_psr2_support = y_req && alpm;
drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
- dev_priv->psr.sink_psr2_support ? "" : "not ");
+ intel_dp->psr.sink_psr2_support ? "" : "not ");
- if (dev_priv->psr.sink_psr2_support) {
- dev_priv->psr.colorimetry_support =
+ if (intel_dp->psr.sink_psr2_support) {
+ intel_dp->psr.colorimetry_support =
intel_dp_get_colorimetry_status(intel_dp);
- dev_priv->psr.su_x_granularity =
+ intel_dp->psr.su_x_granularity =
intel_dp_get_su_x_granulartiy(intel_dp);
}
}
@@ -374,7 +371,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
intel_de_write(dev_priv,
- EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
+ EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2),
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
@@ -385,7 +382,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
/* Select only valid bits for SRD_AUX_CTL */
aux_ctl &= psr_aux_mask;
- intel_de_write(dev_priv, EDP_PSR_AUX_CTL(dev_priv->psr.transcoder),
+ intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder),
aux_ctl);
}
@@ -395,14 +392,14 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
u8 dpcd_val = DP_PSR_ENABLE;
/* Enable ALPM at sink for psr2 */
- if (dev_priv->psr.psr2_enabled) {
+ if (intel_dp->psr.psr2_enabled) {
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
DP_ALPM_ENABLE |
DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
} else {
- if (dev_priv->psr.link_standby)
+ if (intel_dp->psr.link_standby)
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
if (INTEL_GEN(dev_priv) >= 8)
@@ -465,7 +462,7 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
* off-by-one issue that HW has in some cases.
*/
idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
idle_frames = 0xf;
@@ -485,7 +482,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (IS_HASWELL(dev_priv))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
- if (dev_priv->psr.link_standby)
+ if (intel_dp->psr.link_standby)
val |= EDP_PSR_LINK_STANDBY;
val |= intel_psr1_get_tp_time(intel_dp);
@@ -493,9 +490,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
- val |= (intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) &
+ val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
- intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
}
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
@@ -530,7 +527,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
val |= EDP_Y_COORDINATE_ENABLE;
- val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
+ val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
val |= intel_psr2_get_tp_time(intel_dp);
if (INTEL_GEN(dev_priv) >= 12) {
@@ -549,29 +546,29 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FAST_WAKE(7);
}
- if (dev_priv->psr.psr2_sel_fetch_enabled) {
+ if (intel_dp->psr.psr2_sel_fetch_enabled) {
/* WA 1408330847 */
- if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
+ if (IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0) ||
IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
DIS_RAM_BYPASS_PSR2_MAN_TRACK,
DIS_RAM_BYPASS_PSR2_MAN_TRACK);
intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder),
+ PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
PSR2_MAN_TRK_CTL_ENABLE);
} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), 0);
+ PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
}
/*
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
+ intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
- intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
}
static bool
@@ -594,55 +591,58 @@ static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
drm_mode_vrefresh(&cstate->hw.adjusted_mode));
}
-static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
+static void psr2_program_idle_frames(struct intel_dp *intel_dp,
u32 idle_frames)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val;
idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
- val = intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
val &= ~EDP_PSR2_IDLE_FRAME_MASK;
val |= idle_frames;
- intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
}
-static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
+static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
{
- psr2_program_idle_frames(dev_priv, 0);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ psr2_program_idle_frames(intel_dp, 0);
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
}
-static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
+static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
{
- struct intel_dp *intel_dp = dev_priv->psr.dp;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
- psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp));
+ psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
}
static void tgl_dc3co_disable_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.dc3co_work.work);
+ struct intel_dp *intel_dp =
+ container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
- mutex_lock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
/* If delayed work is pending, it is not idle */
- if (delayed_work_pending(&dev_priv->psr.dc3co_work))
+ if (delayed_work_pending(&intel_dp->psr.dc3co_work))
goto unlock;
- tgl_psr2_disable_dc3co(dev_priv);
+ tgl_psr2_disable_dc3co(intel_dp);
unlock:
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
}
-static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
+static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
{
- if (!dev_priv->psr.dc3co_enabled)
+ if (!intel_dp->psr.dc3co_enabled)
return;
- cancel_delayed_work(&dev_priv->psr.dc3co_work);
+ cancel_delayed_work(&intel_dp->psr.dc3co_work);
/* Before PSR2 exit disallow dc3co*/
- tgl_psr2_disable_dc3co(dev_priv);
+ tgl_psr2_disable_dc3co(intel_dp);
}
static void
@@ -654,6 +654,13 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 exit_scanlines;
+ /*
+ * DMC's DC3CO exit mechanism has an issue with Selective Fecth
+ * TODO: when the issue is addressed, this restriction should be removed.
+ */
+ if (crtc_state->enable_psr2_sel_fetch)
+ return;
+
if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
return;
@@ -684,7 +691,8 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
struct intel_plane *plane;
int i;
- if (!dev_priv->params.enable_psr2_sel_fetch) {
+ if (!dev_priv->params.enable_psr2_sel_fetch &&
+ intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 sel fetch not enabled, disabled by parameter\n");
return false;
@@ -715,9 +723,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
- if (!dev_priv->psr.sink_psr2_support)
+ if (!intel_dp->psr.sink_psr2_support)
return false;
+ /* JSL and EHL only supports eDP 1.3 */
+ if (IS_JSL_EHL(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
+ return false;
+ }
+
if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not supported in transcoder %s\n",
@@ -725,7 +739,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (!psr2_global_enabled(dev_priv)) {
+ if (!psr2_global_enabled(intel_dp)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
return false;
}
@@ -774,10 +788,10 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* only need to validate the SU block width is a multiple of
* x granularity.
*/
- if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
+ if (crtc_hdisplay % intel_dp->psr.su_x_granularity) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
- crtc_hdisplay, dev_priv->psr.su_x_granularity);
+ crtc_hdisplay, intel_dp->psr.su_x_granularity);
return false;
}
@@ -806,7 +820,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -819,30 +832,15 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
if (crtc_state->vrr.enable)
return;
- if (!CAN_PSR(dev_priv))
+ if (!CAN_PSR(intel_dp))
return;
- if (intel_dp != dev_priv->psr.dp)
- return;
-
- if (!psr_global_enabled(dev_priv)) {
+ if (!psr_global_enabled(intel_dp)) {
drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
return;
}
- /*
- * HSW spec explicitly says PSR is tied to port A.
- * BDW+ platforms have a instance of PSR registers per transcoder but
- * for now it only supports one instance of PSR, so lets keep it
- * hardcoded to PORT_A
- */
- if (dig_port->base.port != PORT_A) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR condition failed: Port not supported\n");
- return;
- }
-
- if (dev_priv->psr.sink_not_reliable) {
+ if (intel_dp->psr.sink_not_reliable) {
drm_dbg_kms(&dev_priv->drm,
"PSR sink implementation is not reliable\n");
return;
@@ -878,23 +876,24 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder transcoder = intel_dp->psr.transcoder;
- if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
+ if (transcoder_has_psr2(dev_priv, transcoder))
drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
+ intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
- drm_WARN_ON(&dev_priv->drm, dev_priv->psr.active);
- lockdep_assert_held(&dev_priv->psr.lock);
+ intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
+ lockdep_assert_held(&intel_dp->psr.lock);
/* psr1 and psr2 are mutually exclusive.*/
- if (dev_priv->psr.psr2_enabled)
+ if (intel_dp->psr.psr2_enabled)
hsw_activate_psr2(intel_dp);
else
hsw_activate_psr1(intel_dp);
- dev_priv->psr.active = true;
+ intel_dp->psr.active = true;
}
static void intel_psr_enable_source(struct intel_dp *intel_dp,
@@ -910,7 +909,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_psr_setup_aux(intel_dp);
- if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
+ if (intel_dp->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv))) {
i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
u32 chicken = intel_de_read(dev_priv, reg);
@@ -934,10 +933,10 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (INTEL_GEN(dev_priv) < 11)
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
- intel_de_write(dev_priv, EDP_PSR_DEBUG(dev_priv->psr.transcoder),
+ intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
mask);
- psr_irq_control(dev_priv);
+ psr_irq_control(intel_dp);
if (crtc_state->dc3co_exitline) {
u32 val;
@@ -955,30 +954,30 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
- dev_priv->psr.psr2_sel_fetch_enabled ?
+ intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
}
-static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
+static void intel_psr_enable_locked(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = dev_priv->psr.dp;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
u32 val;
- drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
- dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
- dev_priv->psr.busy_frontbuffer_bits = 0;
- dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
- dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
+ intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
+ intel_dp->psr.busy_frontbuffer_bits = 0;
+ intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ intel_dp->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
+ intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
/* DC5/DC6 requires at least 6 idle frames */
val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
- dev_priv->psr.dc3co_exit_delay = val;
- dev_priv->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
+ intel_dp->psr.dc3co_exit_delay = val;
+ intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
/*
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
@@ -990,27 +989,27 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
*/
if (INTEL_GEN(dev_priv) >= 12) {
val = intel_de_read(dev_priv,
- TRANS_PSR_IIR(dev_priv->psr.transcoder));
+ TRANS_PSR_IIR(intel_dp->psr.transcoder));
val &= EDP_PSR_ERROR(0);
} else {
val = intel_de_read(dev_priv, EDP_PSR_IIR);
- val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
+ val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
}
if (val) {
- dev_priv->psr.sink_not_reliable = true;
+ intel_dp->psr.sink_not_reliable = true;
drm_dbg_kms(&dev_priv->drm,
"PSR interruption error set, not enabling PSR\n");
return;
}
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
- dev_priv->psr.psr2_enabled ? "2" : "1");
+ intel_dp->psr.psr2_enabled ? "2" : "1");
intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
- &dev_priv->psr.vsc);
- intel_write_dp_vsc_sdp(encoder, crtc_state, &dev_priv->psr.vsc);
+ &intel_dp->psr.vsc);
+ intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
intel_psr_enable_sink(intel_dp);
intel_psr_enable_source(intel_dp, crtc_state);
- dev_priv->psr.enabled = true;
+ intel_dp->psr.enabled = true;
intel_psr_activate(intel_dp);
}
@@ -1029,7 +1028,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp)
+ if (!CAN_PSR(intel_dp))
return;
if (!crtc_state->has_psr)
@@ -1037,46 +1036,47 @@ void intel_psr_enable(struct intel_dp *intel_dp,
drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
- mutex_lock(&dev_priv->psr.lock);
- intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
+ intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
+ mutex_unlock(&intel_dp->psr.lock);
}
-static void intel_psr_exit(struct drm_i915_private *dev_priv)
+static void intel_psr_exit(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val;
- if (!dev_priv->psr.active) {
- if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
+ if (!intel_dp->psr.active) {
+ if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ EDP_PSR2_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
}
val = intel_de_read(dev_priv,
- EDP_PSR_CTL(dev_priv->psr.transcoder));
+ EDP_PSR_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
return;
}
- if (dev_priv->psr.psr2_enabled) {
- tgl_disallow_dc3co_on_psr2_exit(dev_priv);
+ if (intel_dp->psr.psr2_enabled) {
+ tgl_disallow_dc3co_on_psr2_exit(intel_dp);
val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ EDP_PSR2_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
val &= ~EDP_PSR2_ENABLE;
intel_de_write(dev_priv,
- EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
} else {
val = intel_de_read(dev_priv,
- EDP_PSR_CTL(dev_priv->psr.transcoder));
+ EDP_PSR_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
val &= ~EDP_PSR_ENABLE;
intel_de_write(dev_priv,
- EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+ EDP_PSR_CTL(intel_dp->psr.transcoder), val);
}
- dev_priv->psr.active = false;
+ intel_dp->psr.active = false;
}
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
@@ -1085,21 +1085,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
i915_reg_t psr_status;
u32 psr_status_mask;
- lockdep_assert_held(&dev_priv->psr.lock);
+ lockdep_assert_held(&intel_dp->psr.lock);
- if (!dev_priv->psr.enabled)
+ if (!intel_dp->psr.enabled)
return;
drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
- dev_priv->psr.psr2_enabled ? "2" : "1");
+ intel_dp->psr.psr2_enabled ? "2" : "1");
- intel_psr_exit(dev_priv);
+ intel_psr_exit(intel_dp);
- if (dev_priv->psr.psr2_enabled) {
- psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
+ if (intel_dp->psr.psr2_enabled) {
+ psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder);
+ psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
}
@@ -1109,8 +1109,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
/* WA 1408330847 */
- if (dev_priv->psr.psr2_sel_fetch_enabled &&
- (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
+ if (intel_dp->psr.psr2_sel_fetch_enabled &&
+ (IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0) ||
IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
@@ -1118,10 +1118,10 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
- if (dev_priv->psr.psr2_enabled)
+ if (intel_dp->psr.psr2_enabled)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
- dev_priv->psr.enabled = false;
+ intel_dp->psr.enabled = false;
}
/**
@@ -1139,20 +1139,22 @@ void intel_psr_disable(struct intel_dp *intel_dp,
if (!old_crtc_state->has_psr)
return;
- if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
return;
- mutex_lock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
intel_psr_disable_locked(intel_dp);
- mutex_unlock(&dev_priv->psr.lock);
- cancel_work_sync(&dev_priv->psr.work);
- cancel_delayed_work_sync(&dev_priv->psr.dc3co_work);
+ mutex_unlock(&intel_dp->psr.lock);
+ cancel_work_sync(&intel_dp->psr.work);
+ cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
}
-static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
+static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
if (IS_TIGERLAKE(dev_priv))
/*
* Writes to CURSURFLIVE in TGL are causing IOMMU errors and
@@ -1166,7 +1168,7 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
* So using this workaround until this issue is root caused
* and a better fix is found.
*/
- intel_psr_exit(dev_priv);
+ intel_psr_exit(intel_dp);
else if (INTEL_GEN(dev_priv) >= 9)
/*
* Display WA #0884: skl+
@@ -1177,13 +1179,13 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
* but it makes more sense write to the current active
* pipe.
*/
- intel_de_write(dev_priv, CURSURFLIVE(dev_priv->psr.pipe), 0);
+ intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
else
/*
* A write to CURSURFLIVE do not cause HW tracking to exit PSR
* on older gens so doing the manual exit instead.
*/
- intel_psr_exit(dev_priv);
+ intel_psr_exit(intel_dp);
}
void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
@@ -1231,15 +1233,13 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct i915_psr *psr = &dev_priv->psr;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
!crtc_state->enable_psr2_sel_fetch)
return;
- intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(psr->transcoder),
+ intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
crtc_state->psr2_man_track_ctl);
}
@@ -1435,29 +1435,30 @@ void intel_psr_update(struct intel_dp *intel_dp,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct i915_psr *psr = &dev_priv->psr;
+ struct intel_psr *psr = &intel_dp->psr;
bool enable, psr2_enable;
- if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
+ if (!CAN_PSR(intel_dp))
return;
- mutex_lock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
enable = crtc_state->has_psr;
psr2_enable = crtc_state->has_psr2;
- if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
+ if (enable == psr->enabled && psr2_enable == psr->psr2_enabled &&
+ crtc_state->enable_psr2_sel_fetch == psr->psr2_sel_fetch_enabled) {
/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
if (crtc_state->crc_enabled && psr->enabled)
- psr_force_hw_tracking_exit(dev_priv);
+ psr_force_hw_tracking_exit(intel_dp);
else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) {
/*
* Activate PSR again after a force exit when enabling
* CRC in older gens
*/
- if (!dev_priv->psr.active &&
- !dev_priv->psr.busy_frontbuffer_bits)
- schedule_work(&dev_priv->psr.work);
+ if (!intel_dp->psr.active &&
+ !intel_dp->psr.busy_frontbuffer_bits)
+ schedule_work(&intel_dp->psr.work);
}
goto unlock;
@@ -1467,34 +1468,23 @@ void intel_psr_update(struct intel_dp *intel_dp,
intel_psr_disable_locked(intel_dp);
if (enable)
- intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
+ intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
unlock:
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
}
/**
- * intel_psr_wait_for_idle - wait for PSR1 to idle
- * @new_crtc_state: new CRTC state
+ * psr_wait_for_idle - wait for PSR1 to idle
+ * @intel_dp: Intel DP
* @out_value: PSR status in case of failure
*
- * This function is expected to be called from pipe_update_start() where it is
- * not expected to race with PSR enable or disable.
- *
* Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
+ *
*/
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
- u32 *out_value)
+static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
- return 0;
-
- /* FIXME: Update this for PSR2 if we need to wait for idle */
- if (READ_ONCE(dev_priv->psr.psr2_enabled))
- return 0;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
/*
* From bspec: Panel Self Refresh (BDW+)
@@ -1502,32 +1492,68 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
* exit training time + 1.5 ms of aux channel handshake. 50 ms is
* defensive enough to cover everything.
*/
-
return __intel_wait_for_register(&dev_priv->uncore,
- EDP_PSR_STATUS(dev_priv->psr.transcoder),
+ EDP_PSR_STATUS(intel_dp->psr.transcoder),
EDP_PSR_STATUS_STATE_MASK,
EDP_PSR_STATUS_STATE_IDLE, 2, 50,
out_value);
}
-static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
+/**
+ * intel_psr_wait_for_idle - wait for PSR1 to idle
+ * @new_crtc_state: new CRTC state
+ *
+ * This function is expected to be called from pipe_update_start() where it is
+ * not expected to race with PSR enable or disable.
+ */
+void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
+ struct intel_encoder *encoder;
+
+ if (!new_crtc_state->has_psr)
+ return;
+
+ for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
+ new_crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u32 psr_status;
+
+ mutex_lock(&intel_dp->psr.lock);
+ if (!intel_dp->psr.enabled ||
+ (intel_dp->psr.enabled && intel_dp->psr.psr2_enabled)) {
+ mutex_unlock(&intel_dp->psr.lock);
+ continue;
+ }
+
+ /* when the PSR1 is enabled */
+ if (psr_wait_for_idle(intel_dp, &psr_status))
+ drm_err(&dev_priv->drm,
+ "PSR idle timed out 0x%x, atomic update may fail\n",
+ psr_status);
+ mutex_unlock(&intel_dp->psr.lock);
+ }
+}
+
+static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t reg;
u32 mask;
int err;
- if (!dev_priv->psr.enabled)
+ if (!intel_dp->psr.enabled)
return false;
- if (dev_priv->psr.psr2_enabled) {
- reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
+ if (intel_dp->psr.psr2_enabled) {
+ reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- reg = EDP_PSR_STATUS(dev_priv->psr.transcoder);
+ reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
mask = EDP_PSR_STATUS_STATE_MASK;
}
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
if (err)
@@ -1535,8 +1561,8 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
"Timed out waiting for PSR Idle for re-enable\n");
/* After the unlocked wait, verify that PSR is still wanted! */
- mutex_lock(&dev_priv->psr.lock);
- return err == 0 && dev_priv->psr.enabled;
+ mutex_lock(&intel_dp->psr.lock);
+ return err == 0 && intel_dp->psr.enabled;
}
static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
@@ -1602,33 +1628,34 @@ retry:
return err;
}
-int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
+int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
u32 old_mode;
int ret;
if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
- mode > I915_PSR_DEBUG_FORCE_PSR1) {
+ mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
return -EINVAL;
}
- ret = mutex_lock_interruptible(&dev_priv->psr.lock);
+ ret = mutex_lock_interruptible(&intel_dp->psr.lock);
if (ret)
return ret;
- old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
- dev_priv->psr.debug = val;
+ old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
+ intel_dp->psr.debug = val;
/*
* Do it right away if it's already enabled, otherwise it will be done
* when enabling the source.
*/
- if (dev_priv->psr.enabled)
- psr_irq_control(dev_priv);
+ if (intel_dp->psr.enabled)
+ psr_irq_control(intel_dp);
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
if (old_mode != mode)
ret = intel_psr_fastset_force(dev_priv);
@@ -1636,28 +1663,28 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
return ret;
}
-static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
+static void intel_psr_handle_irq(struct intel_dp *intel_dp)
{
- struct i915_psr *psr = &dev_priv->psr;
+ struct intel_psr *psr = &intel_dp->psr;
- intel_psr_disable_locked(psr->dp);
+ intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
/* let's make sure that sink is awaken */
- drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
static void intel_psr_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.work);
+ struct intel_dp *intel_dp =
+ container_of(work, typeof(*intel_dp), psr.work);
- mutex_lock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
- if (!dev_priv->psr.enabled)
+ if (!intel_dp->psr.enabled)
goto unlock;
- if (READ_ONCE(dev_priv->psr.irq_aux_error))
- intel_psr_handle_irq(dev_priv);
+ if (READ_ONCE(intel_dp->psr.irq_aux_error))
+ intel_psr_handle_irq(intel_dp);
/*
* We have to make sure PSR is ready for re-enable
@@ -1665,7 +1692,7 @@ static void intel_psr_work(struct work_struct *work)
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
- if (!__psr_wait_for_idle_locked(dev_priv))
+ if (!__psr_wait_for_idle_locked(intel_dp))
goto unlock;
/*
@@ -1673,12 +1700,12 @@ static void intel_psr_work(struct work_struct *work)
* recheck. Since psr_flush first clears this and then reschedules we
* won't ever miss a flush when bailing out here.
*/
- if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
+ if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
goto unlock;
- intel_psr_activate(dev_priv->psr.dp);
+ intel_psr_activate(intel_dp);
unlock:
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
}
/**
@@ -1697,27 +1724,31 @@ unlock:
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- if (!CAN_PSR(dev_priv))
- return;
+ struct intel_encoder *encoder;
if (origin == ORIGIN_FLIP)
return;
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock(&intel_dp->psr.lock);
+ if (!intel_dp->psr.enabled) {
+ mutex_unlock(&intel_dp->psr.lock);
+ continue;
+ }
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
- dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+ pipe_frontbuffer_bits &=
+ INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
+ intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
- if (frontbuffer_bits)
- intel_psr_exit(dev_priv);
+ if (pipe_frontbuffer_bits)
+ intel_psr_exit(intel_dp);
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
+ }
}
-
/*
* When we will be completely rely on PSR2 S/W tracking in future,
* intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
@@ -1725,15 +1756,15 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
* accordingly in future.
*/
static void
-tgl_dc3co_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits, enum fb_op_origin origin)
+tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
{
- mutex_lock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
- if (!dev_priv->psr.dc3co_enabled)
+ if (!intel_dp->psr.dc3co_enabled)
goto unlock;
- if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active)
+ if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
goto unlock;
/*
@@ -1741,15 +1772,15 @@ tgl_dc3co_flush(struct drm_i915_private *dev_priv,
* when delayed work schedules that means display has been idle.
*/
if (!(frontbuffer_bits &
- INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)))
+ INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
goto unlock;
- tgl_psr2_enable_dc3co(dev_priv);
- mod_delayed_work(system_wq, &dev_priv->psr.dc3co_work,
- dev_priv->psr.dc3co_exit_delay);
+ tgl_psr2_enable_dc3co(intel_dp);
+ mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
+ intel_dp->psr.dc3co_exit_delay);
unlock:
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
}
/**
@@ -1768,46 +1799,69 @@ unlock:
void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- if (!CAN_PSR(dev_priv))
- return;
+ struct intel_encoder *encoder;
- if (origin == ORIGIN_FLIP) {
- tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin);
- return;
- }
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
+ if (origin == ORIGIN_FLIP) {
+ tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin);
+ continue;
+ }
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
- dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+ mutex_lock(&intel_dp->psr.lock);
+ if (!intel_dp->psr.enabled) {
+ mutex_unlock(&intel_dp->psr.lock);
+ continue;
+ }
+
+ pipe_frontbuffer_bits &=
+ INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
+ intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
- /* By definition flush = invalidate + flush */
- if (frontbuffer_bits)
- psr_force_hw_tracking_exit(dev_priv);
+ /* By definition flush = invalidate + flush */
+ if (pipe_frontbuffer_bits)
+ psr_force_hw_tracking_exit(intel_dp);
- if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
- schedule_work(&dev_priv->psr.work);
- mutex_unlock(&dev_priv->psr.lock);
+ if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
+ schedule_work(&intel_dp->psr.work);
+ mutex_unlock(&intel_dp->psr.lock);
+ }
}
/**
* intel_psr_init - Init basic PSR work and mutex.
- * @dev_priv: i915 device private
+ * @intel_dp: Intel DP
*
- * This function is called only once at driver load to initialize basic
- * PSR stuff.
+ * This function is called after the initializing connector.
+ * (the initializing of connector treats the handling of connector capabilities)
+ * And it initializes basic PSR stuff for each DP Encoder.
*/
-void intel_psr_init(struct drm_i915_private *dev_priv)
+void intel_psr_init(struct intel_dp *intel_dp)
{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
if (!HAS_PSR(dev_priv))
return;
- if (!dev_priv->psr.sink_support)
+ /*
+ * HSW spec explicitly says PSR is tied to port A.
+ * BDW+ platforms have a instance of PSR registers per transcoder but
+ * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
+ * than eDP one.
+ * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
+ * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
+ * But GEN12 supports a instance of PSR registers per transcoder.
+ */
+ if (INTEL_GEN(dev_priv) < 12 && dig_port->base.port != PORT_A) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: Port not supported\n");
return;
+ }
+
+ intel_dp->psr.source_support = true;
if (IS_HASWELL(dev_priv))
/*
@@ -1824,14 +1878,14 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
- dev_priv->psr.link_standby = false;
+ intel_dp->psr.link_standby = false;
else if (INTEL_GEN(dev_priv) < 12)
/* For new platforms up to TGL let's respect VBT back again */
- dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
+ intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
- INIT_WORK(&dev_priv->psr.work, intel_psr_work);
- INIT_DELAYED_WORK(&dev_priv->psr.dc3co_work, tgl_dc3co_disable_work);
- mutex_init(&dev_priv->psr.lock);
+ INIT_WORK(&intel_dp->psr.work, intel_psr_work);
+ INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
+ mutex_init(&intel_dp->psr.lock);
}
static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
@@ -1857,7 +1911,7 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct drm_dp_aux *aux = &intel_dp->aux;
- struct i915_psr *psr = &dev_priv->psr;
+ struct intel_psr *psr = &intel_dp->psr;
u8 val;
int r;
@@ -1884,7 +1938,7 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
static void psr_capability_changed_check(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct i915_psr *psr = &dev_priv->psr;
+ struct intel_psr *psr = &intel_dp->psr;
u8 val;
int r;
@@ -1908,18 +1962,18 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
void intel_psr_short_pulse(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct i915_psr *psr = &dev_priv->psr;
+ struct intel_psr *psr = &intel_dp->psr;
u8 status, error_status;
const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
DP_PSR_LINK_CRC_ERROR;
- if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+ if (!CAN_PSR(intel_dp))
return;
mutex_lock(&psr->lock);
- if (!psr->enabled || psr->dp != intel_dp)
+ if (!psr->enabled)
goto exit;
if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
@@ -1962,15 +2016,14 @@ exit:
bool intel_psr_enabled(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
bool ret;
- if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+ if (!CAN_PSR(intel_dp))
return false;
- mutex_lock(&dev_priv->psr.lock);
- ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
+ ret = intel_dp->psr.enabled;
+ mutex_unlock(&intel_dp->psr.lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 0a517978e8af..0491a49ffd50 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -18,7 +18,6 @@ struct intel_atomic_state;
struct intel_plane_state;
struct intel_plane;
-#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
@@ -28,20 +27,19 @@ void intel_psr_disable(struct intel_dp *intel_dp,
void intel_psr_update(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
-int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
+int intel_psr_debug_set(struct intel_dp *intel_dp, u64 value);
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
-void intel_psr_init(struct drm_i915_private *dev_priv);
+void intel_psr_init(struct intel_dp *intel_dp);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
-void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
+void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
- u32 *out_value);
+void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
bool intel_psr_enabled(struct intel_dp *intel_dp);
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 46beb155d835..98dd787b00e3 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -160,7 +160,7 @@ static struct intel_quirk intel_quirks[] = {
void intel_init_quirks(struct drm_i915_private *i915)
{
- struct pci_dev *d = i915->drm.pdev;
+ struct pci_dev *d = to_pci_dev(i915->drm.dev);
int i;
for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 4eaa4aa86ecd..3fac60899d8e 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -3281,7 +3281,7 @@ static bool
intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
sdvo->ddc.owner = THIS_MODULE;
sdvo->ddc.class = I2C_CLASS_DDC;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 993543334a1e..4cbdb8fd4bb1 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -45,252 +45,10 @@
#include "intel_atomic_plane.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
-#include "intel_pm.h"
-#include "intel_psr.h"
-#include "intel_dsi.h"
#include "intel_sprite.h"
#include "i9xx_plane.h"
#include "intel_vrr.h"
-int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
- int usecs)
-{
- /* paranoia */
- if (!adjusted_mode->crtc_htotal)
- return 1;
-
- return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock,
- 1000 * adjusted_mode->crtc_htotal);
-}
-
-static int intel_mode_vblank_start(const struct drm_display_mode *mode)
-{
- int vblank_start = mode->crtc_vblank_start;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- vblank_start = DIV_ROUND_UP(vblank_start, 2);
-
- return vblank_start;
-}
-
-/**
- * intel_pipe_update_start() - start update of a set of display registers
- * @new_crtc_state: the new crtc state
- *
- * Mark the start of an update to pipe registers that should be updated
- * atomically regarding vblank. If the next vblank will happens within
- * the next 100 us, this function waits until the vblank passes.
- *
- * After a successful call to this function, interrupts will be disabled
- * until a subsequent call to intel_pipe_update_end(). That is done to
- * avoid random delays.
- */
-void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
- long timeout = msecs_to_jiffies_timeout(1);
- int scanline, min, max, vblank_start;
- wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
- bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
- DEFINE_WAIT(wait);
- u32 psr_status;
-
- if (new_crtc_state->uapi.async_flip)
- return;
-
- if (new_crtc_state->vrr.enable)
- vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
- else
- vblank_start = intel_mode_vblank_start(adjusted_mode);
-
- /* FIXME needs to be calibrated sensibly */
- min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
- VBLANK_EVASION_TIME_US);
- max = vblank_start - 1;
-
- if (min <= 0 || max <= 0)
- goto irq_disable;
-
- if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
- goto irq_disable;
-
- /*
- * Wait for psr to idle out after enabling the VBL interrupts
- * VBL interrupts will start the PSR exit and prevent a PSR
- * re-entry as well.
- */
- if (intel_psr_wait_for_idle(new_crtc_state, &psr_status))
- drm_err(&dev_priv->drm,
- "PSR idle timed out 0x%x, atomic update may fail\n",
- psr_status);
-
- local_irq_disable();
-
- crtc->debug.min_vbl = min;
- crtc->debug.max_vbl = max;
- trace_intel_pipe_update_start(crtc);
-
- for (;;) {
- /*
- * prepare_to_wait() has a memory barrier, which guarantees
- * other CPUs can see the task state update by the time we
- * read the scanline.
- */
- prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
-
- scanline = intel_get_crtc_scanline(crtc);
- if (scanline < min || scanline > max)
- break;
-
- if (!timeout) {
- drm_err(&dev_priv->drm,
- "Potential atomic update failure on pipe %c\n",
- pipe_name(crtc->pipe));
- break;
- }
-
- local_irq_enable();
-
- timeout = schedule_timeout(timeout);
-
- local_irq_disable();
- }
-
- finish_wait(wq, &wait);
-
- drm_crtc_vblank_put(&crtc->base);
-
- /*
- * On VLV/CHV DSI the scanline counter would appear to
- * increment approx. 1/3 of a scanline before start of vblank.
- * The registers still get latched at start of vblank however.
- * This means we must not write any registers on the first
- * line of vblank (since not the whole line is actually in
- * vblank). And unfortunately we can't use the interrupt to
- * wait here since it will fire too soon. We could use the
- * frame start interrupt instead since it will fire after the
- * critical scanline, but that would require more changes
- * in the interrupt code. So for now we'll just do the nasty
- * thing and poll for the bad scanline to pass us by.
- *
- * FIXME figure out if BXT+ DSI suffers from this as well
- */
- while (need_vlv_dsi_wa && scanline == vblank_start)
- scanline = intel_get_crtc_scanline(crtc);
-
- crtc->debug.scanline_start = scanline;
- crtc->debug.start_vbl_time = ktime_get();
- crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
-
- trace_intel_pipe_update_vblank_evaded(crtc);
- return;
-
-irq_disable:
- local_irq_disable();
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
-static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end)
-{
- u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time));
- unsigned int h;
-
- h = ilog2(delta >> 9);
- if (h >= ARRAY_SIZE(crtc->debug.vbl.times))
- h = ARRAY_SIZE(crtc->debug.vbl.times) - 1;
- crtc->debug.vbl.times[h]++;
-
- crtc->debug.vbl.sum += delta;
- if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min)
- crtc->debug.vbl.min = delta;
- if (delta > crtc->debug.vbl.max)
- crtc->debug.vbl.max = delta;
-
- if (delta > 1000 * VBLANK_EVASION_TIME_US) {
- drm_dbg_kms(crtc->base.dev,
- "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
- pipe_name(crtc->pipe),
- div_u64(delta, 1000),
- VBLANK_EVASION_TIME_US);
- crtc->debug.vbl.over++;
- }
-}
-#else
-static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
-#endif
-
-/**
- * intel_pipe_update_end() - end update of a set of display registers
- * @new_crtc_state: the new crtc state
- *
- * Mark the end of an update started with intel_pipe_update_start(). This
- * re-enables interrupts and verifies the update was actually completed
- * before a vblank.
- */
-void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- enum pipe pipe = crtc->pipe;
- int scanline_end = intel_get_crtc_scanline(crtc);
- u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
- ktime_t end_vbl_time = ktime_get();
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (new_crtc_state->uapi.async_flip)
- return;
-
- trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end);
-
- /*
- * Incase of mipi dsi command mode, we need to set frame update
- * request for every commit.
- */
- if (INTEL_GEN(dev_priv) >= 11 &&
- intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
- icl_dsi_frame_update(new_crtc_state);
-
- /* We're still in the vblank-evade critical section, this can't race.
- * Would be slightly nice to just grab the vblank count and arm the
- * event outside of the critical section - the spinlock might spin for a
- * while ... */
- if (new_crtc_state->uapi.event) {
- drm_WARN_ON(&dev_priv->drm,
- drm_crtc_vblank_get(&crtc->base) != 0);
-
- spin_lock(&crtc->base.dev->event_lock);
- drm_crtc_arm_vblank_event(&crtc->base,
- new_crtc_state->uapi.event);
- spin_unlock(&crtc->base.dev->event_lock);
-
- new_crtc_state->uapi.event = NULL;
- }
-
- local_irq_enable();
-
- /* Send VRR Push to terminate Vblank */
- intel_vrr_send_push(new_crtc_state);
-
- if (intel_vgpu_active(dev_priv))
- return;
-
- if (crtc->debug.start_vbl_count &&
- crtc->debug.start_vbl_count != end_vbl_count) {
- drm_err(&dev_priv->drm,
- "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
- pipe_name(pipe), crtc->debug.start_vbl_count,
- end_vbl_count,
- ktime_us_delta(end_vbl_time,
- crtc->debug.start_vbl_time),
- crtc->debug.min_vbl, crtc->debug.max_vbl,
- crtc->debug.scanline_start, scanline_end);
- }
-
- dbg_vblank_evade(crtc, end_vbl_time);
-}
-
int intel_plane_check_stride(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
@@ -380,584 +138,6 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
return 0;
}
-static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915)
-{
- if (IS_ROCKETLAKE(i915))
- return BIT(PLANE_SPRITE2) | BIT(PLANE_SPRITE3);
- else
- return BIT(PLANE_SPRITE4) | BIT(PLANE_SPRITE5);
-}
-
-bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
- enum plane_id plane_id)
-{
- return INTEL_GEN(dev_priv) >= 11 &&
- icl_nv12_y_plane_mask(dev_priv) & BIT(plane_id);
-}
-
-bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
-{
- return INTEL_GEN(dev_priv) >= 11 &&
- icl_hdr_plane_mask() & BIT(plane_id);
-}
-
-static void
-skl_plane_ratio(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- unsigned int *num, unsigned int *den)
-{
- struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
-
- if (fb->format->cpp[0] == 8) {
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
- *num = 10;
- *den = 8;
- } else {
- *num = 9;
- *den = 8;
- }
- } else {
- *num = 1;
- *den = 1;
- }
-}
-
-static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
- unsigned int num, den;
- unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
-
- skl_plane_ratio(crtc_state, plane_state, &num, &den);
-
- /* two pixels per clock on glk+ */
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- den *= 2;
-
- return DIV_ROUND_UP(pixel_rate * num, den);
-}
-
-static int skl_plane_max_width(const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- int cpp = fb->format->cpp[color_plane];
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- /*
- * Validated limit is 4k, but has 5k should
- * work apart from the following features:
- * - Ytile (already limited to 4k)
- * - FP16 (already limited to 4k)
- * - render compression (already limited to 4k)
- * - KVMR sprite and cursor (don't care)
- * - horizontal panning (TODO verify this)
- * - pipe and plane scaling (TODO verify this)
- */
- if (cpp == 8)
- return 4096;
- else
- return 5120;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- /* FIXME AUX plane? */
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- if (cpp == 8)
- return 2048;
- else
- return 4096;
- default:
- MISSING_CASE(fb->modifier);
- return 2048;
- }
-}
-
-static int glk_plane_max_width(const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- int cpp = fb->format->cpp[color_plane];
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- if (cpp == 8)
- return 4096;
- else
- return 5120;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- /* FIXME AUX plane? */
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- if (cpp == 8)
- return 2048;
- else
- return 5120;
- default:
- MISSING_CASE(fb->modifier);
- return 2048;
- }
-}
-
-static int icl_plane_min_width(const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- /* Wa_14011264657, Wa_14011050563: gen11+ */
- switch (fb->format->format) {
- case DRM_FORMAT_C8:
- return 18;
- case DRM_FORMAT_RGB565:
- return 10;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_XVYU2101010:
- case DRM_FORMAT_Y212:
- case DRM_FORMAT_Y216:
- return 6;
- case DRM_FORMAT_NV12:
- return 20;
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- return 12;
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ARGB16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XVYU12_16161616:
- case DRM_FORMAT_XVYU16161616:
- return 4;
- default:
- return 1;
- }
-}
-
-static int icl_plane_max_width(const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- return 5120;
-}
-
-static int skl_plane_max_height(const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- return 4096;
-}
-
-static int icl_plane_max_height(const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- return 4320;
-}
-
-static unsigned int
-skl_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
-{
- const struct drm_format_info *info = drm_format_info(pixel_format);
- int cpp = info->cpp[0];
-
- /*
- * "The stride in bytes must not exceed the
- * of the size of 8K pixels and 32K bytes."
- */
- if (drm_rotation_90_or_270(rotation))
- return min(8192, 32768 / cpp);
- else
- return min(8192 * cpp, 32768);
-}
-
-static void
-skl_program_scaler(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- enum pipe pipe = plane->pipe;
- int scaler_id = plane_state->scaler_id;
- const struct intel_scaler *scaler =
- &crtc_state->scaler_state.scalers[scaler_id];
- int crtc_x = plane_state->uapi.dst.x1;
- int crtc_y = plane_state->uapi.dst.y1;
- u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
- u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
- u16 y_hphase, uv_rgb_hphase;
- u16 y_vphase, uv_rgb_vphase;
- int hscale, vscale;
- u32 ps_ctrl;
-
- hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
- &plane_state->uapi.dst,
- 0, INT_MAX);
- vscale = drm_rect_calc_vscale(&plane_state->uapi.src,
- &plane_state->uapi.dst,
- 0, INT_MAX);
-
- /* TODO: handle sub-pixel coordinates */
- if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
- !icl_is_hdr_plane(dev_priv, plane->id)) {
- y_hphase = skl_scaler_calc_phase(1, hscale, false);
- y_vphase = skl_scaler_calc_phase(1, vscale, false);
-
- /* MPEG2 chroma siting convention */
- uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
- uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
- } else {
- /* not used */
- y_hphase = 0;
- y_vphase = 0;
-
- uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
- uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
- }
-
- ps_ctrl = skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
- ps_ctrl |= PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode;
-
- skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0,
- plane_state->hw.scaling_filter);
-
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
- intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id),
- (crtc_x << 16) | crtc_y);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id),
- (crtc_w << 16) | crtc_h);
-}
-
-/* Preoffset values for YUV to RGB Conversion */
-#define PREOFF_YUV_TO_RGB_HI 0x1800
-#define PREOFF_YUV_TO_RGB_ME 0x0000
-#define PREOFF_YUV_TO_RGB_LO 0x1800
-
-#define ROFF(x) (((x) & 0xffff) << 16)
-#define GOFF(x) (((x) & 0xffff) << 0)
-#define BOFF(x) (((x) & 0xffff) << 16)
-
-/*
- * Programs the input color space conversion stage for ICL HDR planes.
- * Note that it is assumed that this stage always happens after YUV
- * range correction. Thus, the input to this stage is assumed to be
- * in full-range YCbCr.
- */
-static void
-icl_program_input_csc(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
- enum plane_id plane_id = plane->id;
-
- static const u16 input_csc_matrix[][9] = {
- /*
- * BT.601 full range YCbCr -> full range RGB
- * The matrix required is :
- * [1.000, 0.000, 1.371,
- * 1.000, -0.336, -0.698,
- * 1.000, 1.732, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT601] = {
- 0x7AF8, 0x7800, 0x0,
- 0x8B28, 0x7800, 0x9AC0,
- 0x0, 0x7800, 0x7DD8,
- },
- /*
- * BT.709 full range YCbCr -> full range RGB
- * The matrix required is :
- * [1.000, 0.000, 1.574,
- * 1.000, -0.187, -0.468,
- * 1.000, 1.855, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT709] = {
- 0x7C98, 0x7800, 0x0,
- 0x9EF8, 0x7800, 0xAC00,
- 0x0, 0x7800, 0x7ED8,
- },
- /*
- * BT.2020 full range YCbCr -> full range RGB
- * The matrix required is :
- * [1.000, 0.000, 1.474,
- * 1.000, -0.1645, -0.5713,
- * 1.000, 1.8814, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT2020] = {
- 0x7BC8, 0x7800, 0x0,
- 0x8928, 0x7800, 0xAA88,
- 0x0, 0x7800, 0x7F10,
- },
- };
- const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
-
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
- ROFF(csc[0]) | GOFF(csc[1]));
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1),
- BOFF(csc[2]));
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2),
- ROFF(csc[3]) | GOFF(csc[4]));
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3),
- BOFF(csc[5]));
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4),
- ROFF(csc[6]) | GOFF(csc[7]));
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5),
- BOFF(csc[8]));
-
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
- PREOFF_YUV_TO_RGB_HI);
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- PREOFF_YUV_TO_RGB_ME);
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
- PREOFF_YUV_TO_RGB_LO);
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
-}
-
-static void
-skl_plane_async_flip(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- bool async_flip)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- unsigned long irqflags;
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- u32 surf_addr = plane_state->color_plane[0].offset;
- u32 plane_ctl = plane_state->ctl;
-
- plane_ctl |= skl_plane_ctl_crtc(crtc_state);
-
- if (async_flip)
- plane_ctl |= PLANE_CTL_ASYNC_FLIP;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + surf_addr);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void
-skl_program_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 surf_addr = plane_state->color_plane[color_plane].offset;
- u32 stride = skl_plane_stride(plane_state, color_plane);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int aux_plane = intel_main_to_aux_plane(fb, color_plane);
- int crtc_x = plane_state->uapi.dst.x1;
- int crtc_y = plane_state->uapi.dst.y1;
- u32 x = plane_state->color_plane[color_plane].x;
- u32 y = plane_state->color_plane[color_plane].y;
- u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- u8 alpha = plane_state->hw.alpha >> 8;
- u32 plane_color_ctl = 0, aux_dist = 0;
- unsigned long irqflags;
- u32 keymsk, keymax;
- u32 plane_ctl = plane_state->ctl;
-
- plane_ctl |= skl_plane_ctl_crtc(crtc_state);
-
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- plane_color_ctl = plane_state->color_ctl |
- glk_plane_color_ctl_crtc(crtc_state);
-
- /* Sizes are 0 based */
- src_w--;
- src_h--;
-
- keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
-
- keymsk = key->channel_mask & 0x7ffffff;
- if (alpha < 0xff)
- keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
-
- /* The scaler will handle the output position */
- if (plane_state->scaler_id >= 0) {
- crtc_x = 0;
- crtc_y = 0;
- }
-
- if (aux_plane) {
- aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr;
-
- if (INTEL_GEN(dev_priv) < 12)
- aux_dist |= skl_plane_stride(plane_state, aux_plane);
- }
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride);
- intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
- (crtc_y << 16) | crtc_x);
- intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
- (src_h << 16) | src_w);
-
- intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist);
-
- if (icl_is_hdr_plane(dev_priv, plane_id))
- intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id),
- plane_state->cus_ctl);
-
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
- plane_color_ctl);
-
- if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
- icl_program_input_csc(plane, crtc_state, plane_state);
-
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
- intel_uncore_write64_fw(&dev_priv->uncore,
- PLANE_CC_VAL(pipe, plane_id), plane_state->ccval);
-
- skl_write_plane_wm(plane, crtc_state);
-
- intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
- key->min_value);
- intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk);
- intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), keymax);
-
- intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
- (y << 16) | x);
-
- if (INTEL_GEN(dev_priv) < 11)
- intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
- (plane_state->color_plane[1].y << 16) | plane_state->color_plane[1].x);
-
- if (!drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
-
- /*
- * The control register self-arms if the plane was previously
- * disabled. Try to make the plane enable atomic by writing
- * the control register just before the surface register.
- */
- intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + surf_addr);
-
- if (plane_state->scaler_id >= 0)
- skl_program_scaler(plane, crtc_state, plane_state);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void
-skl_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- int color_plane = 0;
-
- if (plane_state->planar_linked_plane && !plane_state->planar_slave)
- /* Program the UV plane on planar master */
- color_plane = 1;
-
- skl_program_plane(plane, crtc_state, plane_state, color_plane);
-}
-static void
-skl_disable_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- if (icl_is_hdr_plane(dev_priv, plane_id))
- intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0);
-
- skl_write_plane_wm(plane, crtc_state);
-
- intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static bool
-skl_plane_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum intel_display_power_domain power_domain;
- enum plane_id plane_id = plane->id;
- intel_wakeref_t wakeref;
- bool ret;
-
- power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return false;
-
- ret = intel_de_read(dev_priv, PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
-
- *pipe = plane->pipe;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return ret;
-}
-
-static void
-skl_plane_enable_flip_done(struct intel_plane *plane)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
-
- spin_lock_irq(&i915->irq_lock);
- bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
- spin_unlock_irq(&i915->irq_lock);
-}
-
-static void
-skl_plane_disable_flip_done(struct intel_plane *plane)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
-
- spin_lock_irq(&i915->irq_lock);
- bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
- spin_unlock_irq(&i915->irq_lock);
-}
-
static void i9xx_plane_linear_gamma(u16 gamma[8])
{
/* The points are not evenly spaced. */
@@ -2123,19 +1303,18 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
-static bool intel_fb_scalable(const struct drm_framebuffer *fb)
+static bool g4x_fb_scalable(const struct drm_framebuffer *fb)
{
if (!fb)
return false;
switch (fb->format->format) {
case DRM_FORMAT_C8:
- return false;
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ABGR16161616F:
- return INTEL_GEN(to_i915(fb->dev)) >= 11;
+ return false;
default:
return true;
}
@@ -2212,7 +1391,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
- if (intel_fb_scalable(plane_state->hw.fb)) {
+ if (g4x_fb_scalable(plane_state->hw.fb)) {
if (INTEL_GEN(dev_priv) < 7) {
min_scale = 1;
max_scale = 16 << 16;
@@ -2301,240 +1480,6 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
return 0;
}
-static bool intel_format_is_p01x(u32 format)
-{
- switch (format) {
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- return true;
- default:
- return false;
- }
-}
-
-static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- struct drm_format_name_buf format_name;
-
- if (!fb)
- return 0;
-
- if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
- is_ccs_modifier(fb->modifier)) {
- drm_dbg_kms(&dev_priv->drm,
- "RC support only with 0/180 degree rotation (%x)\n",
- rotation);
- return -EINVAL;
- }
-
- if (rotation & DRM_MODE_REFLECT_X &&
- fb->modifier == DRM_FORMAT_MOD_LINEAR) {
- drm_dbg_kms(&dev_priv->drm,
- "horizontal flip is not supported with linear surface formats\n");
- return -EINVAL;
- }
-
- if (drm_rotation_90_or_270(rotation)) {
- if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
- fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
- drm_dbg_kms(&dev_priv->drm,
- "Y/Yf tiling required for 90/270!\n");
- return -EINVAL;
- }
-
- /*
- * 90/270 is not allowed with RGB64 16:16:16:16 and
- * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
- */
- switch (fb->format->format) {
- case DRM_FORMAT_RGB565:
- if (INTEL_GEN(dev_priv) >= 11)
- break;
- fallthrough;
- case DRM_FORMAT_C8:
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ARGB16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_Y210:
- case DRM_FORMAT_Y212:
- case DRM_FORMAT_Y216:
- case DRM_FORMAT_XVYU12_16161616:
- case DRM_FORMAT_XVYU16161616:
- drm_dbg_kms(&dev_priv->drm,
- "Unsupported pixel format %s for 90/270!\n",
- drm_get_format_name(fb->format->format,
- &format_name));
- return -EINVAL;
- default:
- break;
- }
- }
-
- /* Y-tiling is not supported in IF-ID Interlace mode */
- if (crtc_state->hw.enable &&
- crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
- (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) {
- drm_dbg_kms(&dev_priv->drm,
- "Y/Yf tiling not supported in IF-ID mode\n");
- return -EINVAL;
- }
-
- /* Wa_1606054188:tgl */
- if (IS_TIGERLAKE(dev_priv) &&
- plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE &&
- intel_format_is_p01x(fb->format->format)) {
- drm_dbg_kms(&dev_priv->drm,
- "Source color keying not supported with P01x formats\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- int crtc_x = plane_state->uapi.dst.x1;
- int crtc_w = drm_rect_width(&plane_state->uapi.dst);
- int pipe_src_w = crtc_state->pipe_src_w;
-
- /*
- * Display WA #1175: cnl,glk
- * Planes other than the cursor may cause FIFO underflow and display
- * corruption if starting less than 4 pixels from the right edge of
- * the screen.
- * Besides the above WA fix the similar problem, where planes other
- * than the cursor ending less than 4 pixels from the left edge of the
- * screen may cause FIFO underflow and display corruption.
- */
- if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
- (crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
- drm_dbg_kms(&dev_priv->drm,
- "requested plane X %s position %d invalid (valid range %d-%d)\n",
- crtc_x + crtc_w < 4 ? "end" : "start",
- crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
- 4, pipe_src_w - 4);
- return -ERANGE;
- }
-
- return 0;
-}
-
-static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
-
- /* Display WA #1106 */
- if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
- src_w & 3 &&
- (rotation == DRM_MODE_ROTATE_270 ||
- rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
- DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
- const struct drm_framebuffer *fb)
-{
- /*
- * We don't yet know the final source width nor
- * whether we can use the HQ scaler mode. Assume
- * the best case.
- * FIXME need to properly check this later.
- */
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
- !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
- return 0x30000 - 1;
- else
- return 0x20000 - 1;
-}
-
-static int skl_plane_check(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int min_scale = DRM_PLANE_HELPER_NO_SCALING;
- int max_scale = DRM_PLANE_HELPER_NO_SCALING;
- int ret;
-
- ret = skl_plane_check_fb(crtc_state, plane_state);
- if (ret)
- return ret;
-
- /* use scaler when colorkey is not required */
- if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
- min_scale = 1;
- max_scale = skl_plane_max_scale(dev_priv, fb);
- }
-
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- min_scale, max_scale, true);
- if (ret)
- return ret;
-
- ret = skl_check_plane_surface(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- ret = skl_plane_check_dst_coordinates(crtc_state, plane_state);
- if (ret)
- return ret;
-
- ret = intel_plane_check_src_coordinates(plane_state);
- if (ret)
- return ret;
-
- ret = skl_plane_check_nv12_rotation(plane_state);
- if (ret)
- return ret;
-
- /* HW only has 8 bits pixel precision, disable plane if invisible */
- if (!(plane_state->hw.alpha >> 8))
- plane_state->uapi.visible = false;
-
- plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
-
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
- plane_state);
-
- if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
- icl_is_hdr_plane(dev_priv, plane->id))
- /* Enable and use MPEG-2 chroma siting */
- plane_state->cus_ctl = PLANE_CUS_ENABLE |
- PLANE_CUS_HPHASE_0 |
- PLANE_CUS_VPHASE_SIGN_NEGATIVE | PLANE_CUS_VPHASE_0_25;
- else
- plane_state->cus_ctl = 0;
-
- return 0;
-}
-
static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
{
return INTEL_GEN(dev_priv) >= 9;
@@ -2712,186 +1657,6 @@ static const u32 chv_pipe_b_sprite_formats[] = {
DRM_FORMAT_VYUY,
};
-static const u32 skl_plane_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_XRGB16161616F,
- DRM_FORMAT_XBGR16161616F,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_XYUV8888,
-};
-
-static const u32 skl_planar_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_XRGB16161616F,
- DRM_FORMAT_XBGR16161616F,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_NV12,
- DRM_FORMAT_XYUV8888,
-};
-
-static const u32 glk_planar_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_XRGB16161616F,
- DRM_FORMAT_XBGR16161616F,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_NV12,
- DRM_FORMAT_XYUV8888,
- DRM_FORMAT_P010,
- DRM_FORMAT_P012,
- DRM_FORMAT_P016,
-};
-
-static const u32 icl_sdr_y_plane_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_Y210,
- DRM_FORMAT_Y212,
- DRM_FORMAT_Y216,
- DRM_FORMAT_XYUV8888,
- DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
-};
-
-static const u32 icl_sdr_uv_plane_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_NV12,
- DRM_FORMAT_P010,
- DRM_FORMAT_P012,
- DRM_FORMAT_P016,
- DRM_FORMAT_Y210,
- DRM_FORMAT_Y212,
- DRM_FORMAT_Y216,
- DRM_FORMAT_XYUV8888,
- DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
-};
-
-static const u32 icl_hdr_plane_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_XRGB16161616F,
- DRM_FORMAT_XBGR16161616F,
- DRM_FORMAT_ARGB16161616F,
- DRM_FORMAT_ABGR16161616F,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_NV12,
- DRM_FORMAT_P010,
- DRM_FORMAT_P012,
- DRM_FORMAT_P016,
- DRM_FORMAT_Y210,
- DRM_FORMAT_Y212,
- DRM_FORMAT_Y216,
- DRM_FORMAT_XYUV8888,
- DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
-};
-
-static const u64 skl_plane_format_modifiers_noccs[] = {
- I915_FORMAT_MOD_Yf_TILED,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const u64 skl_plane_format_modifiers_ccs[] = {
- I915_FORMAT_MOD_Yf_TILED_CCS,
- I915_FORMAT_MOD_Y_TILED_CCS,
- I915_FORMAT_MOD_Yf_TILED,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
- I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
@@ -2984,150 +1749,6 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
}
}
-static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- struct intel_plane *plane = to_intel_plane(_plane);
-
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- break;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- if (!plane->has_ccs)
- return false;
- break;
- default:
- return false;
- }
-
- switch (format) {
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- if (is_ccs_modifier(modifier))
- return true;
- fallthrough;
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_XYUV8888:
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- case DRM_FORMAT_XVYU2101010:
- if (modifier == I915_FORMAT_MOD_Yf_TILED)
- return true;
- fallthrough;
- case DRM_FORMAT_C8:
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_ARGB16161616F:
- case DRM_FORMAT_Y210:
- case DRM_FORMAT_Y212:
- case DRM_FORMAT_Y216:
- case DRM_FORMAT_XVYU12_16161616:
- case DRM_FORMAT_XVYU16161616:
- if (modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED ||
- modifier == I915_FORMAT_MOD_Y_TILED)
- return true;
- fallthrough;
- default:
- return false;
- }
-}
-
-static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
- enum plane_id plane_id)
-{
- /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
- if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
- IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0))
- return false;
-
- return plane_id < PLANE_SPRITE4;
-}
-
-static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- struct drm_i915_private *dev_priv = to_i915(_plane->dev);
- struct intel_plane *plane = to_intel_plane(_plane);
-
- switch (modifier) {
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id))
- return false;
- fallthrough;
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- break;
- default:
- return false;
- }
-
- switch (format) {
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- if (is_ccs_modifier(modifier))
- return true;
- fallthrough;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_XYUV8888:
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)
- return true;
- fallthrough;
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_XVYU2101010:
- case DRM_FORMAT_C8:
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_ARGB16161616F:
- case DRM_FORMAT_Y210:
- case DRM_FORMAT_Y212:
- case DRM_FORMAT_Y216:
- case DRM_FORMAT_XVYU12_16161616:
- case DRM_FORMAT_XVYU16161616:
- if (modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED ||
- modifier == I915_FORMAT_MOD_Y_TILED)
- return true;
- fallthrough;
- default:
- return false;
- }
-}
-
static const struct drm_plane_funcs g4x_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -3155,257 +1776,6 @@ static const struct drm_plane_funcs vlv_sprite_funcs = {
.format_mod_supported = vlv_sprite_format_mod_supported,
};
-static const struct drm_plane_funcs skl_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = skl_plane_format_mod_supported,
-};
-
-static const struct drm_plane_funcs gen12_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = gen12_plane_format_mod_supported,
-};
-
-static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- if (!HAS_FBC(dev_priv))
- return false;
-
- return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
-}
-
-static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- /* Display WA #0870: skl, bxt */
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return false;
-
- if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
- return false;
-
- if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
- return false;
-
- return true;
-}
-
-static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id,
- int *num_formats)
-{
- if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
- *num_formats = ARRAY_SIZE(skl_planar_formats);
- return skl_planar_formats;
- } else {
- *num_formats = ARRAY_SIZE(skl_plane_formats);
- return skl_plane_formats;
- }
-}
-
-static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id,
- int *num_formats)
-{
- if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
- *num_formats = ARRAY_SIZE(glk_planar_formats);
- return glk_planar_formats;
- } else {
- *num_formats = ARRAY_SIZE(skl_plane_formats);
- return skl_plane_formats;
- }
-}
-
-static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id,
- int *num_formats)
-{
- if (icl_is_hdr_plane(dev_priv, plane_id)) {
- *num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
- return icl_hdr_plane_formats;
- } else if (icl_is_nv12_y_plane(dev_priv, plane_id)) {
- *num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats);
- return icl_sdr_y_plane_formats;
- } else {
- *num_formats = ARRAY_SIZE(icl_sdr_uv_plane_formats);
- return icl_sdr_uv_plane_formats;
- }
-}
-
-static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv,
- enum plane_id plane_id)
-{
- if (gen12_plane_supports_mc_ccs(dev_priv, plane_id))
- return gen12_plane_format_modifiers_mc_ccs;
- else
- return gen12_plane_format_modifiers_rc_ccs;
-}
-
-static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- if (plane_id == PLANE_CURSOR)
- return false;
-
- if (INTEL_GEN(dev_priv) >= 10)
- return true;
-
- if (IS_GEMINILAKE(dev_priv))
- return pipe != PIPE_C;
-
- return pipe != PIPE_C &&
- (plane_id == PLANE_PRIMARY ||
- plane_id == PLANE_SPRITE0);
-}
-
-struct intel_plane *
-skl_universal_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- const struct drm_plane_funcs *plane_funcs;
- struct intel_plane *plane;
- enum drm_plane_type plane_type;
- unsigned int supported_rotations;
- unsigned int supported_csc;
- const u64 *modifiers;
- const u32 *formats;
- int num_formats;
- int ret;
-
- plane = intel_plane_alloc();
- if (IS_ERR(plane))
- return plane;
-
- plane->pipe = pipe;
- plane->id = plane_id;
- plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
-
- plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
- if (plane->has_fbc) {
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
- }
-
- if (INTEL_GEN(dev_priv) >= 11) {
- plane->min_width = icl_plane_min_width;
- plane->max_width = icl_plane_max_width;
- plane->max_height = icl_plane_max_height;
- } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
- plane->max_width = glk_plane_max_width;
- plane->max_height = skl_plane_max_height;
- } else {
- plane->max_width = skl_plane_max_width;
- plane->max_height = skl_plane_max_height;
- }
-
- plane->max_stride = skl_plane_max_stride;
- plane->update_plane = skl_update_plane;
- plane->disable_plane = skl_disable_plane;
- plane->get_hw_state = skl_plane_get_hw_state;
- plane->check_plane = skl_plane_check;
- plane->min_cdclk = skl_plane_min_cdclk;
-
- if (plane_id == PLANE_PRIMARY) {
- plane->need_async_flip_disable_wa = IS_GEN_RANGE(dev_priv, 9, 10);
- plane->async_flip = skl_plane_async_flip;
- plane->enable_flip_done = skl_plane_enable_flip_done;
- plane->disable_flip_done = skl_plane_disable_flip_done;
- }
-
- if (INTEL_GEN(dev_priv) >= 11)
- formats = icl_get_plane_formats(dev_priv, pipe,
- plane_id, &num_formats);
- else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- formats = glk_get_plane_formats(dev_priv, pipe,
- plane_id, &num_formats);
- else
- formats = skl_get_plane_formats(dev_priv, pipe,
- plane_id, &num_formats);
-
- plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
- if (INTEL_GEN(dev_priv) >= 12) {
- modifiers = gen12_get_plane_modifiers(dev_priv, plane_id);
- plane_funcs = &gen12_plane_funcs;
- } else {
- if (plane->has_ccs)
- modifiers = skl_plane_format_modifiers_ccs;
- else
- modifiers = skl_plane_format_modifiers_noccs;
- plane_funcs = &skl_plane_funcs;
- }
-
- if (plane_id == PLANE_PRIMARY)
- plane_type = DRM_PLANE_TYPE_PRIMARY;
- else
- plane_type = DRM_PLANE_TYPE_OVERLAY;
-
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- 0, plane_funcs,
- formats, num_formats, modifiers,
- plane_type,
- "plane %d%c", plane_id + 1,
- pipe_name(pipe));
- if (ret)
- goto fail;
-
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
- DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
-
- if (INTEL_GEN(dev_priv) >= 10)
- supported_rotations |= DRM_MODE_REFLECT_X;
-
- drm_plane_create_rotation_property(&plane->base,
- DRM_MODE_ROTATE_0,
- supported_rotations);
-
- supported_csc = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709);
-
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- supported_csc |= BIT(DRM_COLOR_YCBCR_BT2020);
-
- drm_plane_create_color_properties(&plane->base,
- supported_csc,
- BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
- BIT(DRM_COLOR_YCBCR_FULL_RANGE),
- DRM_COLOR_YCBCR_BT709,
- DRM_COLOR_YCBCR_LIMITED_RANGE);
-
- drm_plane_create_alpha_property(&plane->base);
- drm_plane_create_blend_mode_property(&plane->base,
- BIT(DRM_MODE_BLEND_PIXEL_NONE) |
- BIT(DRM_MODE_BLEND_PREMULTI) |
- BIT(DRM_MODE_BLEND_COVERAGE));
-
- drm_plane_create_zpos_immutable_property(&plane->base, plane_id);
-
- if (INTEL_GEN(dev_priv) >= 12)
- drm_plane_enable_fb_damage_clips(&plane->base);
-
- if (INTEL_GEN(dev_priv) >= 10)
- drm_plane_create_scaling_filter_property(&plane->base,
- BIT(DRM_SCALING_FILTER_DEFAULT) |
- BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
-
- drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
-
- return plane;
-
-fail:
- intel_plane_free(plane);
-
- return ERR_PTR(ret);
-}
-
struct intel_plane *
intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int sprite)
@@ -3418,10 +1788,6 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
int num_formats;
int ret, zpos;
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_universal_plane_create(dev_priv, pipe,
- PLANE_SPRITE0 + sprite);
-
plane = intel_plane_alloc();
if (IS_ERR(plane))
return plane;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index 76126dd8d584..f6989da2dc4b 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -38,9 +38,6 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
int intel_plane_check_stride(const struct intel_plane_state *plane_state);
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
-struct intel_plane *
-skl_universal_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id);
static inline u8 icl_hdr_plane_mask(void)
{
@@ -48,10 +45,6 @@ static inline u8 icl_hdr_plane_mask(void)
BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
}
-bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
- enum plane_id plane_id);
-bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
-
int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 187ec573de59..dbe24d7e7375 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -327,6 +327,10 @@ enum vbt_gmbus_ddi {
ICL_DDC_BUS_PORT_4,
TGL_DDC_BUS_PORT_5,
TGL_DDC_BUS_PORT_6,
+ ADLS_DDC_BUS_PORT_TC1 = 0x2,
+ ADLS_DDC_BUS_PORT_TC2,
+ ADLS_DDC_BUS_PORT_TC3,
+ ADLS_DDC_BUS_PORT_TC4
};
#define DP_AUX_A 0x40
@@ -339,10 +343,21 @@ enum vbt_gmbus_ddi {
#define DP_AUX_H 0x80
#define DP_AUX_I 0x90
-#define VBT_DP_MAX_LINK_RATE_HBR3 0
-#define VBT_DP_MAX_LINK_RATE_HBR2 1
-#define VBT_DP_MAX_LINK_RATE_HBR 2
-#define VBT_DP_MAX_LINK_RATE_LBR 3
+/* DP max link rate 216+ */
+#define BDB_216_VBT_DP_MAX_LINK_RATE_HBR3 0
+#define BDB_216_VBT_DP_MAX_LINK_RATE_HBR2 1
+#define BDB_216_VBT_DP_MAX_LINK_RATE_HBR 2
+#define BDB_216_VBT_DP_MAX_LINK_RATE_LBR 3
+
+/* DP max link rate 230+ */
+#define BDB_230_VBT_DP_MAX_LINK_RATE_DEF 0
+#define BDB_230_VBT_DP_MAX_LINK_RATE_LBR 1
+#define BDB_230_VBT_DP_MAX_LINK_RATE_HBR 2
+#define BDB_230_VBT_DP_MAX_LINK_RATE_HBR2 3
+#define BDB_230_VBT_DP_MAX_LINK_RATE_HBR3 4
+#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR10 5
+#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR13P5 6
+#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR20 7
/*
* The child device config, aka the display device data structure, provides a
@@ -441,8 +456,8 @@ struct child_device_config {
u16 dp_gpio_pin_num; /* 195 */
u8 dp_iboost_level:4; /* 196 */
u8 hdmi_iboost_level:4; /* 196 */
- u8 dp_max_link_rate:2; /* 216 CNL+ */
- u8 dp_max_link_rate_reserved:6; /* 216 */
+ u8 dp_max_link_rate:3; /* 216/230 CNL+ */
+ u8 dp_max_link_rate_reserved:5; /* 216/230 */
} __packed;
struct bdb_general_definitions {
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index be333699c515..5f8e4f53649d 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -25,7 +25,7 @@ static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915)
/* Disable the VGA plane that we never use */
void intel_vga_disable(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
u8 sr1;
@@ -76,7 +76,7 @@ void intel_vga_redisable(struct drm_i915_private *i915)
void intel_vga_reset_io_mem(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -136,7 +136,7 @@ intel_vga_set_decode(void *cookie, bool enable_decode)
int intel_vga_register(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
int ret;
/*
@@ -156,7 +156,7 @@ int intel_vga_register(struct drm_i915_private *i915)
void intel_vga_unregister(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
vga_client_register(pdev, NULL, NULL, NULL);
}
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
new file mode 100644
index 000000000000..b37a87bb190f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include "intel_display_types.h"
+#include "skl_scaler.h"
+#include "skl_universal_plane.h"
+
+/*
+ * The hardware phase 0.0 refers to the center of the pixel.
+ * We want to start from the top/left edge which is phase
+ * -0.5. That matches how the hardware calculates the scaling
+ * factors (from top-left of the first pixel to bottom-right
+ * of the last pixel, as opposed to the pixel centers).
+ *
+ * For 4:2:0 subsampled chroma planes we obviously have to
+ * adjust that so that the chroma sample position lands in
+ * the right spot.
+ *
+ * Note that for packed YCbCr 4:2:2 formats there is no way to
+ * control chroma siting. The hardware simply replicates the
+ * chroma samples for both of the luma samples, and thus we don't
+ * actually get the expected MPEG2 chroma siting convention :(
+ * The same behaviour is observed on pre-SKL platforms as well.
+ *
+ * Theory behind the formula (note that we ignore sub-pixel
+ * source coordinates):
+ * s = source sample position
+ * d = destination sample position
+ *
+ * Downscaling 4:1:
+ * -0.5
+ * | 0.0
+ * | | 1.5 (initial phase)
+ * | | |
+ * v v v
+ * | s | s | s | s |
+ * | d |
+ *
+ * Upscaling 1:4:
+ * -0.5
+ * | -0.375 (initial phase)
+ * | | 0.0
+ * | | |
+ * v v v
+ * | s |
+ * | d | d | d | d |
+ */
+static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
+{
+ int phase = -0x8000;
+ u16 trip = 0;
+
+ if (chroma_cosited)
+ phase += (sub - 1) * 0x8000 / sub;
+
+ phase += scale / (2 * sub);
+
+ /*
+ * Hardware initial phase limited to [-0.5:1.5].
+ * Since the max hardware scale factor is 3.0, we
+ * should never actually excdeed 1.0 here.
+ */
+ WARN_ON(phase < -0x8000 || phase > 0x18000);
+
+ if (phase < 0)
+ phase = 0x10000 + phase;
+ else
+ trip = PS_PHASE_TRIP;
+
+ return ((phase >> 2) & PS_PHASE_MASK) | trip;
+}
+
+#define SKL_MIN_SRC_W 8
+#define SKL_MAX_SRC_W 4096
+#define SKL_MIN_SRC_H 8
+#define SKL_MAX_SRC_H 4096
+#define SKL_MIN_DST_W 8
+#define SKL_MAX_DST_W 4096
+#define SKL_MIN_DST_H 8
+#define SKL_MAX_DST_H 4096
+#define ICL_MAX_SRC_W 5120
+#define ICL_MAX_SRC_H 4096
+#define ICL_MAX_DST_W 5120
+#define ICL_MAX_DST_H 4096
+#define SKL_MIN_YUV_420_SRC_W 16
+#define SKL_MIN_YUV_420_SRC_H 16
+
+static int
+skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+ unsigned int scaler_user, int *scaler_id,
+ int src_w, int src_h, int dst_w, int dst_h,
+ const struct drm_format_info *format,
+ u64 modifier, bool need_scaler)
+{
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ if (src_w != dst_w || src_h != dst_h)
+ need_scaler = true;
+
+ /*
+ * Scaling/fitting not supported in IF-ID mode in GEN9+
+ * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
+ * Once NV12 is enabled, handle it here while allocating scaler
+ * for NV12.
+ */
+ if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
+ need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Pipe/Plane scaling not supported with IF-ID mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * if plane is being disabled or scaler is no more required or force detach
+ * - free scaler binded to this plane/crtc
+ * - in order to do this, update crtc->scaler_usage
+ *
+ * Here scaler state in crtc_state is set free so that
+ * scaler can be assigned to other user. Actual register
+ * update to free the scaler is done in plane/panel-fit programming.
+ * For this purpose crtc/plane_state->scaler_id isn't reset here.
+ */
+ if (force_detach || !need_scaler) {
+ if (*scaler_id >= 0) {
+ scaler_state->scaler_users &= ~(1 << scaler_user);
+ scaler_state->scalers[*scaler_id].in_use = 0;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: "
+ "Staged freeing scaler id %d scaler_users = 0x%x\n",
+ intel_crtc->pipe, scaler_user, *scaler_id,
+ scaler_state->scaler_users);
+ *scaler_id = -1;
+ }
+ return 0;
+ }
+
+ if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
+ (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Planar YUV: src dimensions not met\n");
+ return -EINVAL;
+ }
+
+ /* range checks */
+ if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
+ dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
+ (INTEL_GEN(dev_priv) >= 11 &&
+ (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
+ dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
+ (INTEL_GEN(dev_priv) < 11 &&
+ (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
+ dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: src %ux%u dst %ux%u "
+ "size is out of scaler range\n",
+ intel_crtc->pipe, scaler_user, src_w, src_h,
+ dst_w, dst_h);
+ return -EINVAL;
+ }
+
+ /* mark this plane as a scaler user in crtc_state */
+ scaler_state->scaler_users |= (1 << scaler_user);
+ drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
+ "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+ intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+ scaler_state->scaler_users);
+
+ return 0;
+}
+
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int width, height;
+
+ if (crtc_state->pch_pfit.enabled) {
+ width = drm_rect_width(&crtc_state->pch_pfit.dst);
+ height = drm_rect_height(&crtc_state->pch_pfit.dst);
+ } else {
+ width = pipe_mode->crtc_hdisplay;
+ height = pipe_mode->crtc_vdisplay;
+ }
+ return skl_update_scaler(crtc_state, !crtc_state->hw.active,
+ SKL_CRTC_INDEX,
+ &crtc_state->scaler_state.scaler_id,
+ crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+ width, height, NULL, 0,
+ crtc_state->pch_pfit.enabled);
+}
+
+/**
+ * skl_update_scaler_plane - Stages update to scaler state for a given plane.
+ * @crtc_state: crtc's scaler state
+ * @plane_state: atomic plane state to update
+ *
+ * Return
+ * 0 - scaler_usage updated successfully
+ * error - requested scaling cannot be supported or other error condition
+ */
+int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *intel_plane =
+ to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ int ret;
+ bool force_detach = !fb || !plane_state->uapi.visible;
+ bool need_scaler = false;
+
+ /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
+ if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
+ fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
+ need_scaler = true;
+
+ ret = skl_update_scaler(crtc_state, force_detach,
+ drm_plane_index(&intel_plane->base),
+ &plane_state->scaler_id,
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ drm_rect_height(&plane_state->uapi.src) >> 16,
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst),
+ fb ? fb->format : NULL,
+ fb ? fb->modifier : 0,
+ need_scaler);
+
+ if (ret || plane_state->scaler_id < 0)
+ return ret;
+
+ /* check colorkey */
+ if (plane_state->ckey.flags) {
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] scaling with color key not allowed",
+ intel_plane->base.base.id,
+ intel_plane->base.name);
+ return -EINVAL;
+ }
+
+ /* Check src format */
+ switch (fb->format->format) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ if (INTEL_GEN(dev_priv) >= 11)
+ break;
+ fallthrough;
+ default:
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
+ intel_plane->base.base.id, intel_plane->base.name,
+ fb->base.id, fb->format->format);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cnl_coef_tap(int i)
+{
+ return i % 7;
+}
+
+static u16 cnl_nearest_filter_coef(int t)
+{
+ return t == 3 ? 0x0800 : 0x3000;
+}
+
+/*
+ * Theory behind setting nearest-neighbor integer scaling:
+ *
+ * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
+ * The letter represents the filter tap (D is the center tap) and the number
+ * represents the coefficient set for a phase (0-16).
+ *
+ * +------------+------------------------+------------------------+
+ * |Index value | Data value coeffient 1 | Data value coeffient 2 |
+ * +------------+------------------------+------------------------+
+ * | 00h | B0 | A0 |
+ * +------------+------------------------+------------------------+
+ * | 01h | D0 | C0 |
+ * +------------+------------------------+------------------------+
+ * | 02h | F0 | E0 |
+ * +------------+------------------------+------------------------+
+ * | 03h | A1 | G0 |
+ * +------------+------------------------+------------------------+
+ * | 04h | C1 | B1 |
+ * +------------+------------------------+------------------------+
+ * | ... | ... | ... |
+ * +------------+------------------------+------------------------+
+ * | 38h | B16 | A16 |
+ * +------------+------------------------+------------------------+
+ * | 39h | D16 | C16 |
+ * +------------+------------------------+------------------------+
+ * | 3Ah | F16 | C16 |
+ * +------------+------------------------+------------------------+
+ * | 3Bh | Reserved | G16 |
+ * +------------+------------------------+------------------------+
+ *
+ * To enable nearest-neighbor scaling: program scaler coefficents with
+ * the center tap (Dxx) values set to 1 and all other values set to 0 as per
+ * SCALER_COEFFICIENT_FORMAT
+ *
+ */
+
+static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int id, int set)
+{
+ int i;
+
+ intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set),
+ PS_COEE_INDEX_AUTO_INC);
+
+ for (i = 0; i < 17 * 7; i += 2) {
+ u32 tmp;
+ int t;
+
+ t = cnl_coef_tap(i);
+ tmp = cnl_nearest_filter_coef(t);
+
+ t = cnl_coef_tap(i + 1);
+ tmp |= cnl_nearest_filter_coef(t) << 16;
+
+ intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set),
+ tmp);
+ }
+
+ intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
+}
+
+static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
+{
+ if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
+ return (PS_FILTER_PROGRAMMED |
+ PS_Y_VERT_FILTER_SELECT(set) |
+ PS_Y_HORZ_FILTER_SELECT(set) |
+ PS_UV_VERT_FILTER_SELECT(set) |
+ PS_UV_HORZ_FILTER_SELECT(set));
+ }
+
+ return PS_FILTER_MEDIUM;
+}
+
+static void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
+ int id, int set, enum drm_scaling_filter filter)
+{
+ switch (filter) {
+ case DRM_SCALING_FILTER_DEFAULT:
+ break;
+ case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
+ cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set);
+ break;
+ default:
+ MISSING_CASE(filter);
+ }
+}
+
+void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct drm_rect src = {
+ .x2 = crtc_state->pipe_src_w << 16,
+ .y2 = crtc_state->pipe_src_h << 16,
+ };
+ const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
+ u16 uv_rgb_hphase, uv_rgb_vphase;
+ enum pipe pipe = crtc->pipe;
+ int width = drm_rect_width(dst);
+ int height = drm_rect_height(dst);
+ int x = dst->x1;
+ int y = dst->y1;
+ int hscale, vscale;
+ unsigned long irqflags;
+ int id;
+ u32 ps_ctrl;
+
+ if (!crtc_state->pch_pfit.enabled)
+ return;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ crtc_state->scaler_state.scaler_id < 0))
+ return;
+
+ hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
+ vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
+
+ uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
+
+ id = scaler_state->scaler_id;
+
+ ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
+ ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ skl_scaler_setup_filter(dev_priv, pipe, id, 0,
+ crtc_state->hw.scaling_filter);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
+
+ intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
+ x << 16 | y);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
+ width << 16 | height);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+void
+skl_program_plane_scaler(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum pipe pipe = plane->pipe;
+ int scaler_id = plane_state->scaler_id;
+ const struct intel_scaler *scaler =
+ &crtc_state->scaler_state.scalers[scaler_id];
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
+ u16 y_hphase, uv_rgb_hphase;
+ u16 y_vphase, uv_rgb_vphase;
+ int hscale, vscale;
+ u32 ps_ctrl;
+
+ hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
+ 0, INT_MAX);
+ vscale = drm_rect_calc_vscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
+ 0, INT_MAX);
+
+ /* TODO: handle sub-pixel coordinates */
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ !icl_is_hdr_plane(dev_priv, plane->id)) {
+ y_hphase = skl_scaler_calc_phase(1, hscale, false);
+ y_vphase = skl_scaler_calc_phase(1, vscale, false);
+
+ /* MPEG2 chroma siting convention */
+ uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
+ uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
+ } else {
+ /* not used */
+ y_hphase = 0;
+ y_vphase = 0;
+
+ uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
+ }
+
+ ps_ctrl = skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
+ ps_ctrl |= PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode;
+
+ skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0,
+ plane_state->hw.scaling_filter);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+ intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id),
+ (crtc_x << 16) | crtc_y);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id),
+ (crtc_w << 16) | crtc_h);
+}
+
+static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/*
+ * This function detaches (aka. unbinds) unused scalers in hardware
+ */
+void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ int i;
+
+ /* loop through and disable scalers that aren't in use */
+ for (i = 0; i < intel_crtc->num_scalers; i++) {
+ if (!scaler_state->scalers[i].in_use)
+ skl_detach_scaler(intel_crtc, i);
+ }
+}
+
+void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ int i;
+
+ for (i = 0; i < crtc->num_scalers; i++)
+ skl_detach_scaler(crtc, i);
+}
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
new file mode 100644
index 000000000000..0097d5d08e10
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_scaler.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#ifndef INTEL_SCALER_H
+#define INTEL_SCALER_H
+
+#include <linux/types.h>
+
+enum drm_scaling_filter;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_plane_state;
+struct intel_plane;
+enum pipe;
+
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
+
+int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
+
+void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
+
+void skl_program_plane_scaler(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void skl_detach_scalers(const struct intel_crtc_state *crtc_state);
+void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
+#endif
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
new file mode 100644
index 000000000000..1f335cb09149
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -0,0 +1,2266 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+
+#include "i915_drv.h"
+#include "intel_atomic_plane.h"
+#include "intel_display_types.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+#include "skl_scaler.h"
+#include "skl_universal_plane.h"
+
+static const u32 skl_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_XYUV8888,
+};
+
+static const u32 skl_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_XYUV8888,
+};
+
+static const u32 glk_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+};
+
+static const u32 icl_sdr_y_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 icl_sdr_uv_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 icl_hdr_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ARGB16161616F,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
+static const u64 skl_plane_format_modifiers_noccs[] = {
+ I915_FORMAT_MOD_Yf_TILED,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const u64 skl_plane_format_modifiers_ccs[] = {
+ I915_FORMAT_MOD_Yf_TILED_CCS,
+ I915_FORMAT_MOD_Y_TILED_CCS,
+ I915_FORMAT_MOD_Yf_TILED,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
+ I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
+{
+ switch (format) {
+ case PLANE_CTL_FORMAT_RGB_565:
+ return DRM_FORMAT_RGB565;
+ case PLANE_CTL_FORMAT_NV12:
+ return DRM_FORMAT_NV12;
+ case PLANE_CTL_FORMAT_XYUV:
+ return DRM_FORMAT_XYUV8888;
+ case PLANE_CTL_FORMAT_P010:
+ return DRM_FORMAT_P010;
+ case PLANE_CTL_FORMAT_P012:
+ return DRM_FORMAT_P012;
+ case PLANE_CTL_FORMAT_P016:
+ return DRM_FORMAT_P016;
+ case PLANE_CTL_FORMAT_Y210:
+ return DRM_FORMAT_Y210;
+ case PLANE_CTL_FORMAT_Y212:
+ return DRM_FORMAT_Y212;
+ case PLANE_CTL_FORMAT_Y216:
+ return DRM_FORMAT_Y216;
+ case PLANE_CTL_FORMAT_Y410:
+ return DRM_FORMAT_XVYU2101010;
+ case PLANE_CTL_FORMAT_Y412:
+ return DRM_FORMAT_XVYU12_16161616;
+ case PLANE_CTL_FORMAT_Y416:
+ return DRM_FORMAT_XVYU16161616;
+ default:
+ case PLANE_CTL_FORMAT_XRGB_8888:
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR8888;
+ else
+ return DRM_FORMAT_XBGR8888;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB8888;
+ else
+ return DRM_FORMAT_XRGB8888;
+ }
+ case PLANE_CTL_FORMAT_XRGB_2101010:
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR2101010;
+ else
+ return DRM_FORMAT_XBGR2101010;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB2101010;
+ else
+ return DRM_FORMAT_XRGB2101010;
+ }
+ case PLANE_CTL_FORMAT_XRGB_16161616F:
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR16161616F;
+ else
+ return DRM_FORMAT_XBGR16161616F;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB16161616F;
+ else
+ return DRM_FORMAT_XRGB16161616F;
+ }
+ }
+}
+
+static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915)
+{
+ if (HAS_D12_PLANE_MINIMIZATION(i915))
+ return BIT(PLANE_SPRITE2) | BIT(PLANE_SPRITE3);
+ else
+ return BIT(PLANE_SPRITE4) | BIT(PLANE_SPRITE5);
+}
+
+bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
+{
+ return INTEL_GEN(dev_priv) >= 11 &&
+ icl_nv12_y_plane_mask(dev_priv) & BIT(plane_id);
+}
+
+bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
+{
+ return INTEL_GEN(dev_priv) >= 11 &&
+ icl_hdr_plane_mask() & BIT(plane_id);
+}
+
+static void
+skl_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (fb->format->cpp[0] == 8) {
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 9;
+ *den = 8;
+ }
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
+ unsigned int num, den;
+ unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
+
+ skl_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* two pixels per clock on glk+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ den *= 2;
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static int skl_plane_max_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ int cpp = fb->format->cpp[color_plane];
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ /*
+ * Validated limit is 4k, but has 5k should
+ * work apart from the following features:
+ * - Ytile (already limited to 4k)
+ * - FP16 (already limited to 4k)
+ * - render compression (already limited to 4k)
+ * - KVMR sprite and cursor (don't care)
+ * - horizontal panning (TODO verify this)
+ * - pipe and plane scaling (TODO verify this)
+ */
+ if (cpp == 8)
+ return 4096;
+ else
+ return 5120;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ /* FIXME AUX plane? */
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (cpp == 8)
+ return 2048;
+ else
+ return 4096;
+ default:
+ MISSING_CASE(fb->modifier);
+ return 2048;
+ }
+}
+
+static int glk_plane_max_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ int cpp = fb->format->cpp[color_plane];
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ if (cpp == 8)
+ return 4096;
+ else
+ return 5120;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ /* FIXME AUX plane? */
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (cpp == 8)
+ return 2048;
+ else
+ return 5120;
+ default:
+ MISSING_CASE(fb->modifier);
+ return 2048;
+ }
+}
+
+static int icl_plane_min_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ /* Wa_14011264657, Wa_14011050563: gen11+ */
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ return 18;
+ case DRM_FORMAT_RGB565:
+ return 10;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ return 6;
+ case DRM_FORMAT_NV12:
+ return 20;
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return 12;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ return 4;
+ default:
+ return 1;
+ }
+}
+
+static int icl_plane_max_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 5120;
+}
+
+static int skl_plane_max_height(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 4096;
+}
+
+static int icl_plane_max_height(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 4320;
+}
+
+static unsigned int
+skl_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /*
+ * "The stride in bytes must not exceed the
+ * of the size of 8K pixels and 32K bytes."
+ */
+ if (drm_rotation_90_or_270(rotation))
+ return min(8192, 32768 / cpp);
+ else
+ return min(8192 * cpp, 32768);
+}
+
+
+/* Preoffset values for YUV to RGB Conversion */
+#define PREOFF_YUV_TO_RGB_HI 0x1800
+#define PREOFF_YUV_TO_RGB_ME 0x0000
+#define PREOFF_YUV_TO_RGB_LO 0x1800
+
+#define ROFF(x) (((x) & 0xffff) << 16)
+#define GOFF(x) (((x) & 0xffff) << 0)
+#define BOFF(x) (((x) & 0xffff) << 16)
+
+/*
+ * Programs the input color space conversion stage for ICL HDR planes.
+ * Note that it is assumed that this stage always happens after YUV
+ * range correction. Thus, the input to this stage is assumed to be
+ * in full-range YCbCr.
+ */
+static void
+icl_program_input_csc(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+
+ static const u16 input_csc_matrix[][9] = {
+ /*
+ * BT.601 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.371,
+ * 1.000, -0.336, -0.698,
+ * 1.000, 1.732, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x7AF8, 0x7800, 0x0,
+ 0x8B28, 0x7800, 0x9AC0,
+ 0x0, 0x7800, 0x7DD8,
+ },
+ /*
+ * BT.709 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.574,
+ * 1.000, -0.187, -0.468,
+ * 1.000, 1.855, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x7C98, 0x7800, 0x0,
+ 0x9EF8, 0x7800, 0xAC00,
+ 0x0, 0x7800, 0x7ED8,
+ },
+ /*
+ * BT.2020 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.474,
+ * 1.000, -0.1645, -0.5713,
+ * 1.000, 1.8814, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT2020] = {
+ 0x7BC8, 0x7800, 0x0,
+ 0x8928, 0x7800, 0xAA88,
+ 0x0, 0x7800, 0x7F10,
+ },
+ };
+ const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
+
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
+ ROFF(csc[0]) | GOFF(csc[1]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1),
+ BOFF(csc[2]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2),
+ ROFF(csc[3]) | GOFF(csc[4]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3),
+ BOFF(csc[5]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4),
+ ROFF(csc[6]) | GOFF(csc[7]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5),
+ BOFF(csc[8]));
+
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
+ PREOFF_YUV_TO_RGB_HI);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
+ PREOFF_YUV_TO_RGB_LO);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
+}
+
+static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
+{
+ return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
+ is_gen12_ccs_plane(fb, color_plane);
+}
+
+static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int rotation)
+{
+ /*
+ * The stride is either expressed as a multiple of 64 bytes chunks for
+ * linear buffers or in number of tiles for tiled buffers.
+ */
+ if (is_surface_linear(fb, color_plane))
+ return 64;
+ else if (drm_rotation_90_or_270(rotation))
+ return intel_tile_height(fb, color_plane);
+ else
+ return intel_tile_width_bytes(fb, color_plane);
+}
+
+static u32 skl_plane_stride(const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ u32 stride = plane_state->color_plane[color_plane].stride;
+
+ if (color_plane >= fb->format->num_planes)
+ return 0;
+
+ return stride / skl_plane_stride_mult(fb, color_plane, rotation);
+}
+
+static void
+skl_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (icl_is_hdr_plane(dev_priv, plane_id))
+ intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0);
+
+ skl_write_plane_wm(plane, crtc_state);
+
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static bool
+skl_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum plane_id plane_id = plane->id;
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = intel_de_read(dev_priv, PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
+
+ *pipe = plane->pipe;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static u32 skl_plane_ctl_format(u32 pixel_format)
+{
+ switch (pixel_format) {
+ case DRM_FORMAT_C8:
+ return PLANE_CTL_FORMAT_INDEXED;
+ case DRM_FORMAT_RGB565:
+ return PLANE_CTL_FORMAT_RGB_565;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ return PLANE_CTL_FORMAT_XRGB_8888;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ return PLANE_CTL_FORMAT_XRGB_2101010;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ return PLANE_CTL_FORMAT_XRGB_16161616F;
+ case DRM_FORMAT_XYUV8888:
+ return PLANE_CTL_FORMAT_XYUV;
+ case DRM_FORMAT_YUYV:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
+ case DRM_FORMAT_YVYU:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
+ case DRM_FORMAT_UYVY:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
+ case DRM_FORMAT_VYUY:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
+ case DRM_FORMAT_NV12:
+ return PLANE_CTL_FORMAT_NV12;
+ case DRM_FORMAT_P010:
+ return PLANE_CTL_FORMAT_P010;
+ case DRM_FORMAT_P012:
+ return PLANE_CTL_FORMAT_P012;
+ case DRM_FORMAT_P016:
+ return PLANE_CTL_FORMAT_P016;
+ case DRM_FORMAT_Y210:
+ return PLANE_CTL_FORMAT_Y210;
+ case DRM_FORMAT_Y212:
+ return PLANE_CTL_FORMAT_Y212;
+ case DRM_FORMAT_Y216:
+ return PLANE_CTL_FORMAT_Y216;
+ case DRM_FORMAT_XVYU2101010:
+ return PLANE_CTL_FORMAT_Y410;
+ case DRM_FORMAT_XVYU12_16161616:
+ return PLANE_CTL_FORMAT_Y412;
+ case DRM_FORMAT_XVYU16161616:
+ return PLANE_CTL_FORMAT_Y416;
+ default:
+ MISSING_CASE(pixel_format);
+ }
+
+ return 0;
+}
+
+static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
+{
+ if (!plane_state->hw.fb->format->has_alpha)
+ return PLANE_CTL_ALPHA_DISABLE;
+
+ switch (plane_state->hw.pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return PLANE_CTL_ALPHA_DISABLE;
+ case DRM_MODE_BLEND_PREMULTI:
+ return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ case DRM_MODE_BLEND_COVERAGE:
+ return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
+ default:
+ MISSING_CASE(plane_state->hw.pixel_blend_mode);
+ return PLANE_CTL_ALPHA_DISABLE;
+ }
+}
+
+static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
+{
+ if (!plane_state->hw.fb->format->has_alpha)
+ return PLANE_COLOR_ALPHA_DISABLE;
+
+ switch (plane_state->hw.pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return PLANE_COLOR_ALPHA_DISABLE;
+ case DRM_MODE_BLEND_PREMULTI:
+ return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
+ case DRM_MODE_BLEND_COVERAGE:
+ return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
+ default:
+ MISSING_CASE(plane_state->hw.pixel_blend_mode);
+ return PLANE_COLOR_ALPHA_DISABLE;
+ }
+}
+
+static u32 skl_plane_ctl_tiling(u64 fb_modifier)
+{
+ switch (fb_modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ break;
+ case I915_FORMAT_MOD_X_TILED:
+ return PLANE_CTL_TILED_X;
+ case I915_FORMAT_MOD_Y_TILED:
+ return PLANE_CTL_TILED_Y;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ return PLANE_CTL_TILED_Y |
+ PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
+ PLANE_CTL_CLEAR_COLOR_DISABLE;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
+ case I915_FORMAT_MOD_Yf_TILED:
+ return PLANE_CTL_TILED_YF;
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+ default:
+ MISSING_CASE(fb_modifier);
+ }
+
+ return 0;
+}
+
+static u32 skl_plane_ctl_rotate(unsigned int rotate)
+{
+ switch (rotate) {
+ case DRM_MODE_ROTATE_0:
+ break;
+ /*
+ * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
+ * while i915 HW rotation is clockwise, thats why this swapping.
+ */
+ case DRM_MODE_ROTATE_90:
+ return PLANE_CTL_ROTATE_270;
+ case DRM_MODE_ROTATE_180:
+ return PLANE_CTL_ROTATE_180;
+ case DRM_MODE_ROTATE_270:
+ return PLANE_CTL_ROTATE_90;
+ default:
+ MISSING_CASE(rotate);
+ }
+
+ return 0;
+}
+
+static u32 cnl_plane_ctl_flip(unsigned int reflect)
+{
+ switch (reflect) {
+ case 0:
+ break;
+ case DRM_MODE_REFLECT_X:
+ return PLANE_CTL_FLIP_HORIZONTAL;
+ case DRM_MODE_REFLECT_Y:
+ default:
+ MISSING_CASE(reflect);
+ }
+
+ return 0;
+}
+
+static u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ u32 plane_ctl = 0;
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ return plane_ctl;
+
+ if (crtc_state->gamma_enable)
+ plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+
+ return plane_ctl;
+}
+
+static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 plane_ctl;
+
+ plane_ctl = PLANE_CTL_ENABLE;
+
+ if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
+ plane_ctl |= skl_plane_ctl_alpha(plane_state);
+ plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
+
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
+ plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
+
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
+ }
+
+ plane_ctl |= skl_plane_ctl_format(fb->format->format);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
+ plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ plane_ctl |= cnl_plane_ctl_flip(rotation &
+ DRM_MODE_REFLECT_MASK);
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+
+ return plane_ctl;
+}
+
+static u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ u32 plane_color_ctl = 0;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ return plane_color_ctl;
+
+ if (crtc_state->gamma_enable)
+ plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+
+ return plane_color_ctl;
+}
+
+static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ u32 plane_color_ctl = 0;
+
+ plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
+ plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
+
+ if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
+ switch (plane_state->hw.color_encoding) {
+ case DRM_COLOR_YCBCR_BT709:
+ plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
+ break;
+ case DRM_COLOR_YCBCR_BT2020:
+ plane_color_ctl |=
+ PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020;
+ break;
+ default:
+ plane_color_ctl |=
+ PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601;
+ }
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
+ } else if (fb->format->is_yuv) {
+ plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
+ }
+
+ return plane_color_ctl;
+}
+
+static int
+main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
+{
+ drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
+ (main_plane && main_plane >= fb->format->num_planes / 2));
+
+ return fb->format->num_planes / 2 + main_plane;
+}
+
+int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
+{
+ drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
+ ccs_plane < fb->format->num_planes / 2);
+
+ if (is_gen12_ccs_cc_plane(fb, ccs_plane))
+ return 0;
+
+ return ccs_plane - fb->format->num_planes / 2;
+}
+
+static int
+skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
+{
+ struct drm_i915_private *i915 = to_i915(fb->dev);
+
+ if (is_ccs_modifier(fb->modifier))
+ return main_to_ccs_plane(fb, main_plane);
+ else if (INTEL_GEN(i915) < 11 &&
+ intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
+ return 1;
+ else
+ return 0;
+}
+
+static void
+skl_program_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 surf_addr = plane_state->color_plane[color_plane].offset;
+ u32 stride = skl_plane_stride(plane_state, color_plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int aux_plane = skl_main_to_aux_plane(fb, color_plane);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 x = plane_state->color_plane[color_plane].x;
+ u32 y = plane_state->color_plane[color_plane].y;
+ u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ u8 alpha = plane_state->hw.alpha >> 8;
+ u32 plane_color_ctl = 0, aux_dist = 0;
+ unsigned long irqflags;
+ u32 keymsk, keymax;
+ u32 plane_ctl = plane_state->ctl;
+
+ plane_ctl |= skl_plane_ctl_crtc(crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ plane_color_ctl = plane_state->color_ctl |
+ glk_plane_color_ctl_crtc(crtc_state);
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+
+ keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
+
+ keymsk = key->channel_mask & 0x7ffffff;
+ if (alpha < 0xff)
+ keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
+
+ /* The scaler will handle the output position */
+ if (plane_state->scaler_id >= 0) {
+ crtc_x = 0;
+ crtc_y = 0;
+ }
+
+ if (aux_plane) {
+ aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr;
+
+ if (INTEL_GEN(dev_priv) < 12)
+ aux_dist |= skl_plane_stride(plane_state, aux_plane);
+ }
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride);
+ intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
+ (src_h << 16) | src_w);
+
+ intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist);
+
+ if (icl_is_hdr_plane(dev_priv, plane_id))
+ intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id),
+ plane_state->cus_ctl);
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
+ plane_color_ctl);
+
+ if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
+ icl_program_input_csc(plane, crtc_state, plane_state);
+
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+ intel_uncore_write64_fw(&dev_priv->uncore,
+ PLANE_CC_VAL(pipe, plane_id), plane_state->ccval);
+
+ skl_write_plane_wm(plane, crtc_state);
+
+ intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
+ key->min_value);
+ intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk);
+ intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), keymax);
+
+ intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
+ (y << 16) | x);
+
+ if (INTEL_GEN(dev_priv) < 11)
+ intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
+ (plane_state->color_plane[1].y << 16) | plane_state->color_plane[1].x);
+
+ if (!drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
+ intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+ intel_plane_ggtt_offset(plane_state) + surf_addr);
+
+ if (plane_state->scaler_id >= 0)
+ skl_program_plane_scaler(plane, crtc_state, plane_state);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+skl_plane_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ unsigned long irqflags;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ u32 surf_addr = plane_state->color_plane[0].offset;
+ u32 plane_ctl = plane_state->ctl;
+
+ plane_ctl |= skl_plane_ctl_crtc(crtc_state);
+
+ if (async_flip)
+ plane_ctl |= PLANE_CTL_ASYNC_FLIP;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+ intel_plane_ggtt_offset(plane_state) + surf_addr);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+skl_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ int color_plane = 0;
+
+ if (plane_state->planar_linked_plane && !plane_state->planar_slave)
+ /* Program the UV plane on planar master */
+ color_plane = 1;
+
+ skl_program_plane(plane, crtc_state, plane_state, color_plane);
+}
+
+static bool intel_format_is_p01x(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ struct drm_format_name_buf format_name;
+
+ if (!fb)
+ return 0;
+
+ if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
+ is_ccs_modifier(fb->modifier)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "RC support only with 0/180 degree rotation (%x)\n",
+ rotation);
+ return -EINVAL;
+ }
+
+ if (rotation & DRM_MODE_REFLECT_X &&
+ fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ drm_dbg_kms(&dev_priv->drm,
+ "horizontal flip is not supported with linear surface formats\n");
+ return -EINVAL;
+ }
+
+ if (drm_rotation_90_or_270(rotation)) {
+ if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
+ fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Y/Yf tiling required for 90/270!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * 90/270 is not allowed with RGB64 16:16:16:16 and
+ * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
+ */
+ switch (fb->format->format) {
+ case DRM_FORMAT_RGB565:
+ if (INTEL_GEN(dev_priv) >= 11)
+ break;
+ fallthrough;
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported pixel format %s for 90/270!\n",
+ drm_get_format_name(fb->format->format,
+ &format_name));
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
+ /* Y-tiling is not supported in IF-ID Interlace mode */
+ if (crtc_state->hw.enable &&
+ crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
+ (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Y/Yf tiling not supported in IF-ID mode\n");
+ return -EINVAL;
+ }
+
+ /* Wa_1606054188:tgl,adl-s */
+ if ((IS_ALDERLAKE_S(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
+ plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE &&
+ intel_format_is_p01x(fb->format->format)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Source color keying not supported with P01x formats\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ int pipe_src_w = crtc_state->pipe_src_w;
+
+ /*
+ * Display WA #1175: cnl,glk
+ * Planes other than the cursor may cause FIFO underflow and display
+ * corruption if starting less than 4 pixels from the right edge of
+ * the screen.
+ * Besides the above WA fix the similar problem, where planes other
+ * than the cursor ending less than 4 pixels from the left edge of the
+ * screen may cause FIFO underflow and display corruption.
+ */
+ if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
+ (crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "requested plane X %s position %d invalid (valid range %d-%d)\n",
+ crtc_x + crtc_w < 4 ? "end" : "start",
+ crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
+ 4, pipe_src_w - 4);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ /* Display WA #1106 */
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ src_w & 3 &&
+ (rotation == DRM_MODE_ROTATE_270 ||
+ rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
+ DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
+ const struct drm_framebuffer *fb)
+{
+ /*
+ * We don't yet know the final source width nor
+ * whether we can use the HQ scaler mode. Assume
+ * the best case.
+ * FIXME need to properly check this later.
+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
+ !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
+ return 0x30000 - 1;
+ else
+ return 0x20000 - 1;
+}
+
+static int intel_plane_min_width(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ if (plane->min_width)
+ return plane->min_width(fb, color_plane, rotation);
+ else
+ return 1;
+}
+
+static int intel_plane_max_width(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ if (plane->max_width)
+ return plane->max_width(fb, color_plane, rotation);
+ else
+ return INT_MAX;
+}
+
+static int intel_plane_max_height(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ if (plane->max_height)
+ return plane->max_height(fb, color_plane, rotation);
+ else
+ return INT_MAX;
+}
+
+static bool
+skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
+ int main_x, int main_y, u32 main_offset,
+ int ccs_plane)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int aux_x = plane_state->color_plane[ccs_plane].x;
+ int aux_y = plane_state->color_plane[ccs_plane].y;
+ u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
+ u32 alignment = intel_surf_alignment(fb, ccs_plane);
+ int hsub;
+ int vsub;
+
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
+ while (aux_offset >= main_offset && aux_y <= main_y) {
+ int x, y;
+
+ if (aux_x == main_x && aux_y == main_y)
+ break;
+
+ if (aux_offset == 0)
+ break;
+
+ x = aux_x / hsub;
+ y = aux_y / vsub;
+ aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ ccs_plane,
+ aux_offset,
+ aux_offset -
+ alignment);
+ aux_x = x * hsub + aux_x % hsub;
+ aux_y = y * vsub + aux_y % vsub;
+ }
+
+ if (aux_x != main_x || aux_y != main_y)
+ return false;
+
+ plane_state->color_plane[ccs_plane].offset = aux_offset;
+ plane_state->color_plane[ccs_plane].x = aux_x;
+ plane_state->color_plane[ccs_plane].y = aux_y;
+
+ return true;
+}
+
+
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+ int *x, int *y, u32 *offset)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const int aux_plane = skl_main_to_aux_plane(fb, 0);
+ const u32 aux_offset = plane_state->color_plane[aux_plane].offset;
+ const u32 alignment = intel_surf_alignment(fb, 0);
+ const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ intel_add_fb_offsets(x, y, plane_state, 0);
+ *offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0);
+ if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
+ return -EINVAL;
+
+ /*
+ * AUX surface offset is specified as the distance from the
+ * main surface offset, and it must be non-negative. Make
+ * sure that is what we will get.
+ */
+ if (aux_plane && *offset > aux_offset)
+ *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+ *offset,
+ aux_offset & ~(alignment - 1));
+
+ /*
+ * When using an X-tiled surface, the plane blows up
+ * if the x offset + width exceed the stride.
+ *
+ * TODO: linear and Y-tiled seem fine, Yf untested,
+ */
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
+ int cpp = fb->format->cpp[0];
+
+ while ((*x + w) * cpp > plane_state->color_plane[0].stride) {
+ if (*offset == 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Unable to find suitable display surface offset due to X-tiling\n");
+ return -EINVAL;
+ }
+
+ *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+ *offset,
+ *offset - alignment);
+ }
+ }
+
+ return 0;
+}
+
+static int skl_check_main_surface(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const unsigned int rotation = plane_state->hw.rotation;
+ int x = plane_state->uapi.src.x1 >> 16;
+ int y = plane_state->uapi.src.y1 >> 16;
+ const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ const int h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ const int min_width = intel_plane_min_width(plane, fb, 0, rotation);
+ const int max_width = intel_plane_max_width(plane, fb, 0, rotation);
+ const int max_height = intel_plane_max_height(plane, fb, 0, rotation);
+ const int aux_plane = skl_main_to_aux_plane(fb, 0);
+ const u32 alignment = intel_surf_alignment(fb, 0);
+ u32 offset;
+ int ret;
+
+ if (w > max_width || w < min_width || h > max_height) {
+ drm_dbg_kms(&dev_priv->drm,
+ "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
+ w, h, min_width, max_width, max_height);
+ return -EINVAL;
+ }
+
+ ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
+ if (ret)
+ return ret;
+
+ /*
+ * CCS AUX surface doesn't have its own x/y offsets, we must make sure
+ * they match with the main surface x/y offsets.
+ */
+ if (is_ccs_modifier(fb->modifier)) {
+ while (!skl_check_main_ccs_coordinates(plane_state, x, y,
+ offset, aux_plane)) {
+ if (offset == 0)
+ break;
+
+ offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
+ offset, offset - alignment);
+ }
+
+ if (x != plane_state->color_plane[aux_plane].x ||
+ y != plane_state->color_plane[aux_plane].y) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Unable to find suitable display surface offset due to CCS\n");
+ return -EINVAL;
+ }
+ }
+
+ drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191);
+
+ plane_state->color_plane[0].offset = offset;
+ plane_state->color_plane[0].x = x;
+ plane_state->color_plane[0].y = y;
+
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->uapi.src,
+ x << 16, y << 16);
+
+ return 0;
+}
+
+static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ int uv_plane = 1;
+ int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation);
+ int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation);
+ int x = plane_state->uapi.src.x1 >> 17;
+ int y = plane_state->uapi.src.y1 >> 17;
+ int w = drm_rect_width(&plane_state->uapi.src) >> 17;
+ int h = drm_rect_height(&plane_state->uapi.src) >> 17;
+ u32 offset;
+
+ /* FIXME not quite sure how/if these apply to the chroma plane */
+ if (w > max_width || h > max_height) {
+ drm_dbg_kms(&i915->drm,
+ "CbCr source size %dx%d too big (limit %dx%d)\n",
+ w, h, max_width, max_height);
+ return -EINVAL;
+ }
+
+ intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
+ offset = intel_plane_compute_aligned_offset(&x, &y,
+ plane_state, uv_plane);
+
+ if (is_ccs_modifier(fb->modifier)) {
+ int ccs_plane = main_to_ccs_plane(fb, uv_plane);
+ u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
+ u32 alignment = intel_surf_alignment(fb, uv_plane);
+
+ if (offset > aux_offset)
+ offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ uv_plane,
+ offset,
+ aux_offset & ~(alignment - 1));
+
+ while (!skl_check_main_ccs_coordinates(plane_state, x, y,
+ offset, ccs_plane)) {
+ if (offset == 0)
+ break;
+
+ offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ uv_plane,
+ offset, offset - alignment);
+ }
+
+ if (x != plane_state->color_plane[ccs_plane].x ||
+ y != plane_state->color_plane[ccs_plane].y) {
+ drm_dbg_kms(&i915->drm,
+ "Unable to find suitable display surface offset due to CCS\n");
+ return -EINVAL;
+ }
+ }
+
+ drm_WARN_ON(&i915->drm, x > 8191 || y > 8191);
+
+ plane_state->color_plane[uv_plane].offset = offset;
+ plane_state->color_plane[uv_plane].x = x;
+ plane_state->color_plane[uv_plane].y = y;
+
+ return 0;
+}
+
+static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_x = plane_state->uapi.src.x1 >> 16;
+ int src_y = plane_state->uapi.src.y1 >> 16;
+ u32 offset;
+ int ccs_plane;
+
+ for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
+ int main_hsub, main_vsub;
+ int hsub, vsub;
+ int x, y;
+
+ if (!is_ccs_plane(fb, ccs_plane) ||
+ is_gen12_ccs_cc_plane(fb, ccs_plane))
+ continue;
+
+ intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
+ skl_ccs_to_main_plane(fb, ccs_plane));
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
+
+ hsub *= main_hsub;
+ vsub *= main_vsub;
+ x = src_x / hsub;
+ y = src_y / vsub;
+
+ intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
+
+ offset = intel_plane_compute_aligned_offset(&x, &y,
+ plane_state,
+ ccs_plane);
+
+ plane_state->color_plane[ccs_plane].offset = offset;
+ plane_state->color_plane[ccs_plane].x = (x * hsub +
+ src_x % hsub) /
+ main_hsub;
+ plane_state->color_plane[ccs_plane].y = (y * vsub +
+ src_y % vsub) /
+ main_vsub;
+ }
+
+ return 0;
+}
+
+static int skl_check_plane_surface(struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int ret, i;
+
+ ret = intel_plane_compute_gtt(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ /*
+ * Handle the AUX surface first since the main surface setup depends on
+ * it.
+ */
+ if (is_ccs_modifier(fb->modifier)) {
+ ret = skl_check_ccs_aux_surface(plane_state);
+ if (ret)
+ return ret;
+ }
+
+ if (intel_format_info_is_yuv_semiplanar(fb->format,
+ fb->modifier)) {
+ ret = skl_check_nv12_aux_surface(plane_state);
+ if (ret)
+ return ret;
+ }
+
+ for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) {
+ plane_state->color_plane[i].offset = 0;
+ plane_state->color_plane[i].x = 0;
+ plane_state->color_plane[i].y = 0;
+ }
+
+ ret = skl_check_main_surface(plane_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static bool skl_fb_scalable(const struct drm_framebuffer *fb)
+{
+ if (!fb)
+ return false;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ return false;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return INTEL_GEN(to_i915(fb->dev)) >= 11;
+ default:
+ return true;
+ }
+}
+
+static int skl_plane_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int min_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int ret;
+
+ ret = skl_plane_check_fb(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ /* use scaler when colorkey is not required */
+ if (!plane_state->ckey.flags && skl_fb_scalable(fb)) {
+ min_scale = 1;
+ max_scale = skl_plane_max_scale(dev_priv, fb);
+ }
+
+ ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+ min_scale, max_scale, true);
+ if (ret)
+ return ret;
+
+ ret = skl_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ ret = skl_plane_check_dst_coordinates(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ ret = skl_plane_check_nv12_rotation(plane_state);
+ if (ret)
+ return ret;
+
+ /* HW only has 8 bits pixel precision, disable plane if invisible */
+ if (!(plane_state->hw.alpha >> 8))
+ plane_state->uapi.visible = false;
+
+ plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
+ plane_state);
+
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ icl_is_hdr_plane(dev_priv, plane->id))
+ /* Enable and use MPEG-2 chroma siting */
+ plane_state->cus_ctl = PLANE_CUS_ENABLE |
+ PLANE_CUS_HPHASE_0 |
+ PLANE_CUS_VPHASE_SIGN_NEGATIVE | PLANE_CUS_VPHASE_0_25;
+ else
+ plane_state->cus_ctl = 0;
+
+ return 0;
+}
+
+static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (!HAS_FBC(dev_priv))
+ return false;
+
+ return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
+}
+
+static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ /* Display WA #0870: skl, bxt */
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ return false;
+
+ if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
+ return false;
+
+ if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
+ return false;
+
+ return true;
+}
+
+static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ *num_formats = ARRAY_SIZE(skl_planar_formats);
+ return skl_planar_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(skl_plane_formats);
+ return skl_plane_formats;
+ }
+}
+
+static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ *num_formats = ARRAY_SIZE(glk_planar_formats);
+ return glk_planar_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(skl_plane_formats);
+ return skl_plane_formats;
+ }
+}
+
+static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (icl_is_hdr_plane(dev_priv, plane_id)) {
+ *num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
+ return icl_hdr_plane_formats;
+ } else if (icl_is_nv12_y_plane(dev_priv, plane_id)) {
+ *num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats);
+ return icl_sdr_y_plane_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(icl_sdr_uv_plane_formats);
+ return icl_sdr_uv_plane_formats;
+ }
+}
+
+static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (plane_id == PLANE_CURSOR)
+ return false;
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ return true;
+
+ if (IS_GEMINILAKE(dev_priv))
+ return pipe != PIPE_C;
+
+ return pipe != PIPE_C &&
+ (plane_id == PLANE_PRIMARY ||
+ plane_id == PLANE_SPRITE0);
+}
+
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ break;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ if (!plane->has_ccs)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ if (is_ccs_modifier(modifier))
+ return true;
+ fallthrough;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ case DRM_FORMAT_XVYU2101010:
+ if (modifier == I915_FORMAT_MOD_Yf_TILED)
+ return true;
+ fallthrough;
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED)
+ return true;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
+static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
+{
+ /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
+ if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
+ IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_C0))
+ return false;
+
+ return plane_id < PLANE_SPRITE4;
+}
+
+static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ struct drm_i915_private *dev_priv = to_i915(_plane->dev);
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ switch (modifier) {
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id))
+ return false;
+ fallthrough;
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ if (is_ccs_modifier(modifier))
+ return true;
+ fallthrough;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)
+ return true;
+ fallthrough;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED)
+ return true;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
+static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
+{
+ if (gen12_plane_supports_mc_ccs(dev_priv, plane_id))
+ return gen12_plane_format_modifiers_mc_ccs;
+ else
+ return gen12_plane_format_modifiers_rc_ccs;
+}
+
+static const struct drm_plane_funcs skl_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = skl_plane_format_mod_supported,
+};
+
+static const struct drm_plane_funcs gen12_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = gen12_plane_format_mod_supported,
+};
+
+static void
+skl_plane_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+skl_plane_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ const struct drm_plane_funcs *plane_funcs;
+ struct intel_plane *plane;
+ enum drm_plane_type plane_type;
+ unsigned int supported_rotations;
+ unsigned int supported_csc;
+ const u64 *modifiers;
+ const u32 *formats;
+ int num_formats;
+ int ret;
+
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
+
+ plane->pipe = pipe;
+ plane->id = plane_id;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
+
+ plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
+ if (plane->has_fbc) {
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ plane->min_width = icl_plane_min_width;
+ plane->max_width = icl_plane_max_width;
+ plane->max_height = icl_plane_max_height;
+ } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ plane->max_width = glk_plane_max_width;
+ plane->max_height = skl_plane_max_height;
+ } else {
+ plane->max_width = skl_plane_max_width;
+ plane->max_height = skl_plane_max_height;
+ }
+
+ plane->max_stride = skl_plane_max_stride;
+ plane->update_plane = skl_update_plane;
+ plane->disable_plane = skl_disable_plane;
+ plane->get_hw_state = skl_plane_get_hw_state;
+ plane->check_plane = skl_plane_check;
+ plane->min_cdclk = skl_plane_min_cdclk;
+
+ if (plane_id == PLANE_PRIMARY) {
+ plane->need_async_flip_disable_wa = IS_GEN_RANGE(dev_priv, 9, 10);
+ plane->async_flip = skl_plane_async_flip;
+ plane->enable_flip_done = skl_plane_enable_flip_done;
+ plane->disable_flip_done = skl_plane_disable_flip_done;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ formats = icl_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
+ else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ formats = glk_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
+ else
+ formats = skl_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
+
+ plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ modifiers = gen12_get_plane_modifiers(dev_priv, plane_id);
+ plane_funcs = &gen12_plane_funcs;
+ } else {
+ if (plane->has_ccs)
+ modifiers = skl_plane_format_modifiers_ccs;
+ else
+ modifiers = skl_plane_format_modifiers_noccs;
+ plane_funcs = &skl_plane_funcs;
+ }
+
+ if (plane_id == PLANE_PRIMARY)
+ plane_type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ plane_type = DRM_PLANE_TYPE_OVERLAY;
+
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ 0, plane_funcs,
+ formats, num_formats, modifiers,
+ plane_type,
+ "plane %d%c", plane_id + 1,
+ pipe_name(pipe));
+ if (ret)
+ goto fail;
+
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ supported_rotations |= DRM_MODE_REFLECT_X;
+
+ drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+
+ supported_csc = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709);
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ supported_csc |= BIT(DRM_COLOR_YCBCR_BT2020);
+
+ drm_plane_create_color_properties(&plane->base,
+ supported_csc,
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+ drm_plane_create_alpha_property(&plane->base);
+ drm_plane_create_blend_mode_property(&plane->base,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
+ drm_plane_create_zpos_immutable_property(&plane->base, plane_id);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ drm_plane_enable_fb_damage_clips(&plane->base);
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ drm_plane_create_scaling_filter_property(&plane->base,
+ BIT(DRM_SCALING_FILTER_DEFAULT) |
+ BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
+
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+
+ return plane;
+
+fail:
+ intel_plane_free(plane);
+
+ return ERR_PTR(ret);
+}
+
+void
+skl_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe;
+ u32 val, base, offset, stride_mult, tiling, alpha;
+ int fourcc, pixel_format;
+ unsigned int aligned_height;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+
+ if (!plane->get_hw_state(plane, &pipe))
+ return;
+
+ drm_WARN_ON(dev, pipe != crtc->pipe);
+
+ if (crtc_state->bigjoiner) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported bigjoiner configuration for initial FB\n");
+ return;
+ }
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
+ drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
+ return;
+ }
+
+ fb = &intel_fb->base;
+
+ fb->dev = dev;
+
+ val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
+ else
+ pixel_format = val & PLANE_CTL_FORMAT_MASK;
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ alpha = intel_de_read(dev_priv,
+ PLANE_COLOR_CTL(pipe, plane_id));
+ alpha &= PLANE_COLOR_ALPHA_MASK;
+ } else {
+ alpha = val & PLANE_CTL_ALPHA_MASK;
+ }
+
+ fourcc = skl_format_to_fourcc(pixel_format,
+ val & PLANE_CTL_ORDER_RGBX, alpha);
+ fb->format = drm_format_info(fourcc);
+
+ tiling = val & PLANE_CTL_TILED_MASK;
+ switch (tiling) {
+ case PLANE_CTL_TILED_LINEAR:
+ fb->modifier = DRM_FORMAT_MOD_LINEAR;
+ break;
+ case PLANE_CTL_TILED_X:
+ plane_config->tiling = I915_TILING_X;
+ fb->modifier = I915_FORMAT_MOD_X_TILED;
+ break;
+ case PLANE_CTL_TILED_Y:
+ plane_config->tiling = I915_TILING_Y;
+ if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
+ fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
+ I915_FORMAT_MOD_Y_TILED_CCS;
+ else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
+ else
+ fb->modifier = I915_FORMAT_MOD_Y_TILED;
+ break;
+ case PLANE_CTL_TILED_YF:
+ if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
+ else
+ fb->modifier = I915_FORMAT_MOD_Yf_TILED;
+ break;
+ default:
+ MISSING_CASE(tiling);
+ goto error;
+ }
+
+ /*
+ * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
+ * while i915 HW rotation is clockwise, thats why this swapping.
+ */
+ switch (val & PLANE_CTL_ROTATE_MASK) {
+ case PLANE_CTL_ROTATE_0:
+ plane_config->rotation = DRM_MODE_ROTATE_0;
+ break;
+ case PLANE_CTL_ROTATE_90:
+ plane_config->rotation = DRM_MODE_ROTATE_270;
+ break;
+ case PLANE_CTL_ROTATE_180:
+ plane_config->rotation = DRM_MODE_ROTATE_180;
+ break;
+ case PLANE_CTL_ROTATE_270:
+ plane_config->rotation = DRM_MODE_ROTATE_90;
+ break;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ val & PLANE_CTL_FLIP_HORIZONTAL)
+ plane_config->rotation |= DRM_MODE_REFLECT_X;
+
+ /* 90/270 degree rotation would require extra work */
+ if (drm_rotation_90_or_270(plane_config->rotation))
+ goto error;
+
+ base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
+ plane_config->base = base;
+
+ offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
+
+ val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
+ fb->height = ((val >> 16) & 0xffff) + 1;
+ fb->width = ((val >> 0) & 0xffff) + 1;
+
+ val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
+ stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
+ fb->pitches[0] = (val & 0x3ff) * stride_mult;
+
+ aligned_height = intel_fb_align_height(fb, 0, fb->height);
+
+ plane_config->size = fb->pitches[0] * aligned_height;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
+ plane_config->size);
+
+ plane_config->fb = intel_fb;
+ return;
+
+error:
+ kfree(intel_fb);
+}
+
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.h b/drivers/gpu/drm/i915/display/skl_universal_plane.h
new file mode 100644
index 000000000000..818266653630
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _SKL_UNIVERSAL_PLANE_H_
+#define _SKL_UNIVERSAL_PLANE_H_
+
+#include <linux/types.h>
+
+struct drm_framebuffer;
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_initial_plane_config;
+struct intel_plane_state;
+
+enum pipe;
+enum plane_id;
+
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id);
+
+void skl_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config);
+
+int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
+
+int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane);
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+ int *x, int *y, u32 *offset);
+
+bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id);
+bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index f94025ec603a..1059a26c1f58 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -38,6 +38,7 @@
#include "intel_fifo_underrun.h"
#include "intel_panel.h"
#include "intel_sideband.h"
+#include "skl_scaler.h"
/* return pixels in terms of txbyteclkhs */
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 3c0b157e2a35..01fe89afe8c0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -35,7 +35,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
* to handle all possible callers, and given typical object sizes,
* the alignment of the buddy allocation will naturally match.
*/
- vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
+ vaddr = dma_alloc_coherent(obj->base.dev->dev,
roundup_pow_of_two(obj->base.size),
&dma, GFP_KERNEL);
if (!vaddr)
@@ -83,7 +83,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
err_st:
kfree(st);
err_pci:
- dma_free_coherent(&obj->base.dev->pdev->dev,
+ dma_free_coherent(obj->base.dev->dev,
roundup_pow_of_two(obj->base.size),
vaddr, dma);
return -ENOMEM;
@@ -129,7 +129,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
sg_free_table(pages);
kfree(pages);
- dma_free_coherent(&obj->base.dev->pdev->dev,
+ dma_free_coherent(obj->base.dev->dev,
roundup_pow_of_two(obj->base.size),
vaddr, dma);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index cf83c208688c..680b370a8ef3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -172,7 +172,7 @@ rebuild_st:
max_segment = PAGE_SIZE;
goto rebuild_st;
} else {
- dev_warn(&i915->drm.pdev->dev,
+ dev_warn(i915->drm.dev,
"Failed to DMA remap %lu pages\n",
page_count);
goto err_pages;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index fb1b1d096975..376e82e17061 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1269,7 +1269,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
/* Waiting to drain ELSP? */
if (execlists_active(&engine->execlists)) {
- synchronize_hardirq(engine->i915->drm.pdev->irq);
+ synchronize_hardirq(to_pci_dev(engine->i915->drm.dev)->irq);
intel_engine_flush_submission(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 700588bc9d57..ec2bf963ced9 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -792,7 +792,7 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
phys_addr_t phys_addr;
int ret;
@@ -862,7 +862,7 @@ static struct resource pci_resource(struct pci_dev *pdev, int bar)
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
unsigned int size;
u16 snb_gmch_ctl;
@@ -1006,7 +1006,7 @@ static u64 iris_pte_encode(dma_addr_t addr,
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
unsigned int size;
u16 snb_gmch_ctl;
@@ -1069,7 +1069,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
phys_addr_t gmadr_base;
int ret;
- ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
+ ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
if (!ret) {
drm_err(&i915->drm, "failed to set up gmch\n");
return -EIO;
@@ -1114,7 +1114,7 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
ggtt->vm.gt = gt;
ggtt->vm.i915 = i915;
- ggtt->vm.dma = &i915->drm.pdev->dev;
+ ggtt->vm.dma = i915->drm.dev;
if (INTEL_GEN(i915) <= 5)
ret = i915_gmch_probe(ggtt);
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 96b85a10ef33..3f940ae27028 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -301,7 +301,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
ppgtt->vm.gt = gt;
ppgtt->vm.i915 = i915;
- ppgtt->vm.dma = &i915->drm.pdev->dev;
+ ppgtt->vm.dma = i915->drm.dev;
ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 35504c97f11d..9843e1d4327f 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -485,14 +485,14 @@ static bool rc6_supported(struct intel_rc6 *rc6)
static void rpm_get(struct intel_rc6 *rc6)
{
GEM_BUG_ON(rc6->wakeref);
- pm_runtime_get_sync(&rc6_to_i915(rc6)->drm.pdev->dev);
+ pm_runtime_get_sync(rc6_to_i915(rc6)->drm.dev);
rc6->wakeref = true;
}
static void rpm_put(struct intel_rc6 *rc6)
{
GEM_BUG_ON(!rc6->wakeref);
- pm_runtime_put(&rc6_to_i915(rc6)->drm.pdev->dev);
+ pm_runtime_put(rc6_to_i915(rc6)->drm.dev);
rc6->wakeref = false;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 60393ce5614d..e326d3c0bc10 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -26,12 +26,12 @@ static int init_fake_lmem_bar(struct intel_memory_region *mem)
if (ret)
return ret;
- mem->remap_addr = dma_map_resource(&i915->drm.pdev->dev,
+ mem->remap_addr = dma_map_resource(i915->drm.dev,
mem->region.start,
mem->fake_mappable.size,
PCI_DMA_BIDIRECTIONAL,
DMA_ATTR_FORCE_CONTIGUOUS);
- if (dma_mapping_error(&i915->drm.pdev->dev, mem->remap_addr)) {
+ if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) {
drm_mm_remove_node(&mem->fake_mappable);
return -EINVAL;
}
@@ -56,7 +56,7 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem)
drm_mm_remove_node(&mem->fake_mappable);
- dma_unmap_resource(&mem->i915->drm.pdev->dev,
+ dma_unmap_resource(mem->i915->drm.dev,
mem->remap_addr,
mem->fake_mappable.size,
PCI_DMA_BIDIRECTIONAL,
@@ -104,7 +104,7 @@ static const struct intel_memory_region_ops intel_region_lmem_ops = {
struct intel_memory_region *
intel_setup_fake_lmem(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_memory_region *mem;
resource_size_t mappable_end;
resource_size_t io_start;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 61410cd62927..afe0342dcd47 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -178,7 +178,7 @@ static int i915_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
int err;
/* Assert reset for at least 20 usec, and wait for acknowledgement. */
@@ -207,7 +207,7 @@ static int g33_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
return wait_for_atomic(g4x_reset_complete(pdev), 50);
@@ -217,7 +217,7 @@ static int g4x_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
struct intel_uncore *uncore = gt->uncore;
int ret;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index ec366cf9ef56..8c0c050c4af9 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -71,17 +71,25 @@ const struct i915_rev_steppings kbl_revids[] = {
[7] = { .gt_stepping = KBL_REVID_G0, .disp_stepping = KBL_REVID_C0 },
};
-const struct i915_rev_steppings tgl_uy_revids[] = {
- [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_A0 },
- [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_C0 },
- [2] = { .gt_stepping = TGL_REVID_B1, .disp_stepping = TGL_REVID_C0 },
- [3] = { .gt_stepping = TGL_REVID_C0, .disp_stepping = TGL_REVID_D0 },
+const struct i915_rev_steppings tgl_uy_revid_step_tbl[] = {
+ [0] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_A0 },
+ [1] = { .gt_stepping = STEP_B0, .disp_stepping = STEP_C0 },
+ [2] = { .gt_stepping = STEP_B1, .disp_stepping = STEP_C0 },
+ [3] = { .gt_stepping = STEP_C0, .disp_stepping = STEP_D0 },
};
/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
-const struct i915_rev_steppings tgl_revids[] = {
- [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_B0 },
- [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_D0 },
+const struct i915_rev_steppings tgl_revid_step_tbl[] = {
+ [0] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_B0 },
+ [1] = { .gt_stepping = STEP_B0, .disp_stepping = STEP_D0 },
+};
+
+const struct i915_rev_steppings adls_revid_step_tbl[] = {
+ [0x0] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_A0 },
+ [0x1] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_A2 },
+ [0x4] = { .gt_stepping = STEP_B0, .disp_stepping = STEP_B0 },
+ [0x8] = { .gt_stepping = STEP_C0, .disp_stepping = STEP_B0 },
+ [0xC] = { .gt_stepping = STEP_D0, .disp_stepping = STEP_C0 },
};
static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
@@ -722,7 +730,8 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (IS_DG1(i915))
dg1_ctx_workarounds_init(engine, wal);
- else if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915))
+ else if (IS_ALDERLAKE_S(i915) || IS_ROCKETLAKE(i915) ||
+ IS_TIGERLAKE(i915))
tgl_ctx_workarounds_init(engine, wal);
else if (IS_GEN(i915, 12))
gen12_ctx_workarounds_init(engine, wal);
@@ -1123,19 +1132,19 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
gen12_gt_workarounds_init(i915, wal);
/* Wa_1409420604:tgl */
- if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0))
wa_write_or(wal,
SUBSLICE_UNIT_LEVEL_CLKGATE2,
CPSSUNIT_CLKGATE_DIS);
/* Wa_1607087056:tgl also know as BUG:1409180338 */
- if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
/* Wa_1408615072:tgl[a0] */
- if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0))
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
VSUNIT_CLKGATE_DIS_TGL);
}
@@ -1613,7 +1622,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
- IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
+ IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0)) {
/*
* Wa_1607138336:tgl[a0],dg1[a0]
* Wa_1607063988:tgl[a0],dg1[a0]
@@ -1623,7 +1632,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
}
- if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
+ if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0)) {
/*
* Wa_1606679103:tgl
* (see also Wa_1606682166:icl)
@@ -1633,45 +1642,45 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_DISABLE_SAMPLER_PREFETCH);
}
- if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
- /* Wa_1606931601:tgl,rkl,dg1 */
+ if (IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
+ IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
+ /* Wa_1606931601:tgl,rkl,dg1,adl-s */
wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
/*
* Wa_1407928979:tgl A*
* Wa_18011464164:tgl[B0+],dg1[B0+]
* Wa_22010931296:tgl[B0+],dg1[B0+]
- * Wa_14010919138:rkl, dg1
+ * Wa_14010919138:rkl,dg1,adl-s
*/
wa_write_or(wal, GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
/*
* Wa_1606700617:tgl,dg1
- * Wa_22010271021:tgl,rkl,dg1
+ * Wa_22010271021:tgl,rkl,dg1, adl-s
*/
wa_masked_en(wal,
GEN9_CS_DEBUG_MODE1,
FF_DOP_CLOCK_GATE_DISABLE);
-
- /* Wa_1406941453:tgl,rkl,dg1 */
- wa_masked_en(wal,
- GEN10_SAMPLER_MODE,
- ENABLE_SMALLPL);
}
- if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
+ if (IS_ALDERLAKE_S(i915) || IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
- /* Wa_1409804808:tgl,rkl,dg1[a0] */
+ /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s */
wa_masked_en(wal, GEN7_ROW_CHICKEN2,
GEN12_PUSH_CONST_DEREF_HOLD_DIS);
/*
* Wa_1409085225:tgl
- * Wa_14010229206:tgl,rkl,dg1[a0]
+ * Wa_14010229206:tgl,rkl,dg1[a0],adl-s
*/
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
+ }
+
+ if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
+ IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/*
* Wa_1607030317:tgl
* Wa_1607186500:tgl
@@ -1688,6 +1697,13 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
}
+ if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
+ /* Wa_1406941453:tgl,rkl,dg1 */
+ wa_masked_en(wal,
+ GEN10_SAMPLER_MODE,
+ ENABLE_SMALLPL);
+ }
+
if (IS_GEN(i915, 11)) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 67b06fde1225..984fa79e0fa7 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -44,9 +44,11 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* List of required GuC and HuC binaries per-platform.
* Must be ordered based on platform + revid, from newer to older.
*
- * Note that RKL uses the same firmware as TGL.
+ * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
+ * firmware as TGL.
*/
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
+ fw_def(ALDERLAKE_S, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \
fw_def(ROCKETLAKE, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \
fw_def(TIGERLAKE, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \
fw_def(JASPERLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index ad86c5eb5bba..b490e3db2e38 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -374,6 +374,7 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
bool primary)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
const struct intel_gvt_device_info *info = &gvt->device_info;
u16 *gmch_ctl;
u8 next;
@@ -407,9 +408,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
- pci_resource_len(gvt->gt->i915->drm.pdev, 0);
+ pci_resource_len(pdev, 0);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
- pci_resource_len(gvt->gt->i915->drm.pdev, 2);
+ pci_resource_len(pdev, 2);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 990a181094e3..1a8274a3f4b1 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -76,7 +76,7 @@ static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
static int expose_firmware_sysfs(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
struct gvt_firmware_header *h;
void *firmware;
void *p;
@@ -127,7 +127,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
static void clean_firmware_sysfs(struct intel_gvt *gvt)
{
- struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
device_remove_bin_file(&pdev->dev, &firmware_attr);
vfree(firmware_attr.private);
@@ -151,7 +151,7 @@ static int verify_firmware(struct intel_gvt *gvt,
const struct firmware *fw)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
struct gvt_firmware_header *h;
unsigned long id, crc32_start;
const void *mem;
@@ -205,7 +205,7 @@ invalid_firmware:
int intel_gvt_load_firmware(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
struct intel_gvt_firmware *firmware = &gvt->firmware;
struct gvt_firmware_header *h;
const struct firmware *fw;
@@ -240,7 +240,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
gvt_dbg_core("request hw state firmware %s...\n", path);
- ret = request_firmware(&fw, path, &gvt->gt->i915->drm.pdev->dev);
+ ret = request_firmware(&fw, path, gvt->gt->i915->drm.dev);
kfree(path);
if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 897c007ea96a..6d12a5a401f6 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -746,7 +746,7 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
{
- struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev;
+ struct device *kdev = spt->vgpu->gvt->gt->i915->drm.dev;
trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
@@ -831,7 +831,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
{
- struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev;
+ struct device *kdev = vgpu->gvt->gt->i915->drm.dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
dma_addr_t daddr;
int ret;
@@ -2402,7 +2402,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
vgpu->gvt->device_info.gtt_entry_size_shift;
void *scratch_pt;
int i;
- struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
+ struct device *dev = vgpu->gvt->gt->i915->drm.dev;
dma_addr_t daddr;
if (drm_WARN_ON(&i915->drm,
@@ -2460,7 +2460,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
static int release_scratch_page_tree(struct intel_vgpu *vgpu)
{
int i;
- struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
+ struct device *dev = vgpu->gvt->gt->i915->drm.dev;
dma_addr_t daddr;
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
@@ -2732,7 +2732,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
{
int ret;
void *page;
- struct device *dev = &gvt->gt->i915->drm.pdev->dev;
+ struct device *dev = gvt->gt->i915->drm.dev;
dma_addr_t daddr;
gvt_dbg_core("init gtt\n");
@@ -2781,7 +2781,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
*/
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
- struct device *dev = &gvt->gt->i915->drm.pdev->dev;
+ struct device *dev = gvt->gt->i915->drm.dev;
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
I915_GTT_PAGE_SHIFT);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index d1d8ee4a5f16..aa7fc0dd1db5 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -50,7 +50,7 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
const char *name)
{
const char *driver_name =
- dev_driver_string(&gvt->gt->i915->drm.pdev->dev);
+ dev_driver_string(gvt->gt->i915->drm.dev);
int i;
name += strlen(driver_name) + 1;
@@ -189,7 +189,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
static void init_device_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
info->max_support_vgpus = 8;
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
@@ -376,7 +376,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
intel_gvt_debugfs_init(gvt);
gvt_dbg_core("gvt device initialization is done\n");
- intel_gvt_host.dev = &i915->drm.pdev->dev;
+ intel_gvt_host.dev = i915->drm.dev;
intel_gvt_host.initialized = true;
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index b4348256ae95..d089770795b8 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -221,7 +221,7 @@ err:
static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t *dma_addr, unsigned long size)
{
- struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
+ struct device *dev = vgpu->gvt->gt->i915->drm.dev;
struct page *page = NULL;
int ret;
@@ -244,7 +244,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t dma_addr, unsigned long size)
{
- struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
+ struct device *dev = vgpu->gvt->gt->i915->drm.dev;
dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
gvt_unpin_guest_page(vgpu, gfn, size);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 88336ff4bf09..51133b8fabb4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -677,7 +677,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
if (!HAS_RUNTIME_PM(dev_priv))
seq_puts(m, "Runtime power management not supported\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8e9cb44e66e5..43ac73861a4c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -38,7 +38,6 @@
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
#include <linux/vt.h>
-#include <acpi/video.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_ioctl.h>
@@ -47,11 +46,9 @@
#include <drm/drm_probe_helper.h>
#include "display/intel_acpi.h"
-#include "display/intel_audio.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
#include "display/intel_csr.h"
-#include "display/intel_display_debugfs.h"
#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbdev.h"
@@ -93,7 +90,7 @@ static const struct drm_driver driver;
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
- int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
+ int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
dev_priv->bridge_dev =
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
@@ -352,7 +349,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_irq_init(dev_priv);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
- intel_init_audio_hooks(dev_priv);
intel_detect_preproduction_hw(dev_priv);
@@ -461,7 +457,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
*/
static int i915_set_dma_info(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
int ret;
@@ -471,9 +466,9 @@ static int i915_set_dma_info(struct drm_i915_private *i915)
* We don't have a max segment size, so set it to the max so sg's
* debugging layer doesn't complain
*/
- dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+ dma_set_max_seg_size(i915->drm.dev, UINT_MAX);
- ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
+ ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
if (ret)
goto mask_err;
@@ -493,7 +488,7 @@ static int i915_set_dma_info(struct drm_i915_private *i915)
if (IS_I965G(i915) || IS_I965GM(i915))
mask_size = 32;
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
+ ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
if (ret)
goto mask_err;
@@ -513,7 +508,7 @@ mask_err:
*/
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
if (i915_inject_probe_failure(dev_priv))
@@ -641,7 +636,7 @@ err_perf:
*/
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
i915_perf_fini(dev_priv);
@@ -666,43 +661,21 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
intel_vgpu_register(dev_priv);
/* Reveal our presence to userspace */
- if (drm_dev_register(dev, 0) == 0) {
- i915_debugfs_register(dev_priv);
- if (HAS_DISPLAY(dev_priv))
- intel_display_debugfs_register(dev_priv);
- i915_setup_sysfs(dev_priv);
-
- /* Depends on sysfs having been initialized */
- i915_perf_register(dev_priv);
- } else
+ if (drm_dev_register(dev, 0)) {
drm_err(&dev_priv->drm,
"Failed to register driver for userspace access!\n");
-
- if (HAS_DISPLAY(dev_priv)) {
- /* Must be done after probing outputs */
- intel_opregion_register(dev_priv);
- acpi_video_register();
+ return;
}
- intel_gt_driver_register(&dev_priv->gt);
+ i915_debugfs_register(dev_priv);
+ i915_setup_sysfs(dev_priv);
- intel_audio_init(dev_priv);
+ /* Depends on sysfs having been initialized */
+ i915_perf_register(dev_priv);
- /*
- * Some ports require correctly set-up hpd registers for detection to
- * work properly (leading to ghost connected connector status), e.g. VGA
- * on gm45. Hence we can only set up the initial fbdev config after hpd
- * irqs are fully enabled. We do it last so that the async config
- * cannot run before the connectors are registered.
- */
- intel_fbdev_initial_config_async(dev);
+ intel_gt_driver_register(&dev_priv->gt);
- /*
- * We need to coordinate the hotplugs with the asynchronous fbdev
- * configuration, for which we use the fbdev->async_cookie.
- */
- if (HAS_DISPLAY(dev_priv))
- drm_kms_helper_poll_init(dev);
+ intel_display_driver_register(dev_priv);
intel_power_domains_enable(dev_priv);
intel_runtime_pm_enable(&dev_priv->runtime_pm);
@@ -726,20 +699,9 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_runtime_pm_disable(&dev_priv->runtime_pm);
intel_power_domains_disable(dev_priv);
- intel_fbdev_unregister(dev_priv);
- intel_audio_deinit(dev_priv);
-
- /*
- * After flushing the fbdev (incl. a late async config which will
- * have delayed queuing of a hotplug event), then flush the hotplug
- * events.
- */
- drm_kms_helper_poll_fini(&dev_priv->drm);
- drm_atomic_helper_shutdown(&dev_priv->drm);
+ intel_display_driver_unregister(dev_priv);
intel_gt_driver_unregister(&dev_priv->gt);
- acpi_video_unregister();
- intel_opregion_unregister(dev_priv);
i915_perf_unregister(dev_priv);
i915_pmu_unregister(dev_priv);
@@ -1049,6 +1011,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
void i915_driver_shutdown(struct drm_i915_private *i915)
{
disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_runtime_pm_disable(&i915->runtime_pm);
+ intel_power_domains_disable(i915);
i915_gem_suspend(i915);
@@ -1064,7 +1028,15 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
+ /*
+ * The only requirement is to reboot with display DC states disabled,
+ * for now leaving all display power wells in the INIT power domain
+ * enabled matching the driver reload sequence.
+ */
+ intel_power_domains_driver_remove(i915);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
+
+ intel_runtime_pm_driver_release(&i915->runtime_pm);
}
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
@@ -1094,7 +1066,7 @@ static int i915_drm_prepare(struct drm_device *dev)
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
pci_power_t opregion_target_state;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1151,7 +1123,7 @@ get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret;
@@ -1281,7 +1253,7 @@ static int i915_drm_resume(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
/*
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 26d69d06aa6d..0c43e44d1722 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -475,42 +475,6 @@ struct i915_drrs {
enum drrs_support_type type;
};
-struct i915_psr {
- struct mutex lock;
-
-#define I915_PSR_DEBUG_MODE_MASK 0x0f
-#define I915_PSR_DEBUG_DEFAULT 0x00
-#define I915_PSR_DEBUG_DISABLE 0x01
-#define I915_PSR_DEBUG_ENABLE 0x02
-#define I915_PSR_DEBUG_FORCE_PSR1 0x03
-#define I915_PSR_DEBUG_IRQ 0x10
-
- u32 debug;
- bool sink_support;
- bool enabled;
- struct intel_dp *dp;
- enum pipe pipe;
- enum transcoder transcoder;
- bool active;
- struct work_struct work;
- unsigned busy_frontbuffer_bits;
- bool sink_psr2_support;
- bool link_standby;
- bool colorimetry_support;
- bool psr2_enabled;
- bool psr2_sel_fetch_enabled;
- u8 sink_sync_latency;
- ktime_t last_entry_attempt;
- ktime_t last_exit;
- bool sink_not_reliable;
- bool irq_aux_error;
- u16 su_x_granularity;
- bool dc3co_enabled;
- u32 dc3co_exit_delay;
- struct delayed_work dc3co_work;
- struct drm_dp_vsc_sdp vsc;
-};
-
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
@@ -1038,8 +1002,6 @@ struct drm_i915_private {
struct i915_power_domains power_domains;
- struct i915_psr psr;
-
struct i915_gpu_error gpu_error;
struct drm_i915_gem_object *vlv_pctx;
@@ -1133,7 +1095,9 @@ struct drm_i915_private {
INTEL_DRAM_DDR3,
INTEL_DRAM_DDR4,
INTEL_DRAM_LPDDR3,
- INTEL_DRAM_LPDDR4
+ INTEL_DRAM_LPDDR4,
+ INTEL_DRAM_DDR5,
+ INTEL_DRAM_LPDDR5,
} type;
u8 num_qgv_points;
} dram_info;
@@ -1280,7 +1244,7 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
#define REVID_FOREVER 0xff
-#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
+#define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision)
#define INTEL_GEN_MASK(s, e) ( \
BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
@@ -1408,6 +1372,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1)
+#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -1550,54 +1515,60 @@ extern const struct i915_rev_steppings kbl_revids[];
(IS_JSL_EHL(p) && IS_REVID(p, since, until))
enum {
- TGL_REVID_A0,
- TGL_REVID_B0,
- TGL_REVID_B1,
- TGL_REVID_C0,
- TGL_REVID_D0,
+ STEP_A0,
+ STEP_A2,
+ STEP_B0,
+ STEP_B1,
+ STEP_C0,
+ STEP_D0,
};
-#define TGL_UY_REVIDS_SIZE 4
-#define TGL_REVIDS_SIZE 2
+#define TGL_UY_REVID_STEP_TBL_SIZE 4
+#define TGL_REVID_STEP_TBL_SIZE 2
+#define ADLS_REVID_STEP_TBL_SIZE 13
-extern const struct i915_rev_steppings tgl_uy_revids[TGL_UY_REVIDS_SIZE];
-extern const struct i915_rev_steppings tgl_revids[TGL_REVIDS_SIZE];
+extern const struct i915_rev_steppings tgl_uy_revid_step_tbl[TGL_UY_REVID_STEP_TBL_SIZE];
+extern const struct i915_rev_steppings tgl_revid_step_tbl[TGL_REVID_STEP_TBL_SIZE];
+extern const struct i915_rev_steppings adls_revid_step_tbl[ADLS_REVID_STEP_TBL_SIZE];
static inline const struct i915_rev_steppings *
-tgl_revids_get(struct drm_i915_private *dev_priv)
+tgl_stepping_get(struct drm_i915_private *dev_priv)
{
u8 revid = INTEL_REVID(dev_priv);
u8 size;
- const struct i915_rev_steppings *tgl_revid_tbl;
-
- if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
- tgl_revid_tbl = tgl_uy_revids;
- size = ARRAY_SIZE(tgl_uy_revids);
+ const struct i915_rev_steppings *revid_step_tbl;
+
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ revid_step_tbl = adls_revid_step_tbl;
+ size = ARRAY_SIZE(adls_revid_step_tbl);
+ } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+ revid_step_tbl = tgl_uy_revid_step_tbl;
+ size = ARRAY_SIZE(tgl_uy_revid_step_tbl);
} else {
- tgl_revid_tbl = tgl_revids;
- size = ARRAY_SIZE(tgl_revids);
+ revid_step_tbl = tgl_revid_step_tbl;
+ size = ARRAY_SIZE(tgl_revid_step_tbl);
}
revid = min_t(u8, revid, size - 1);
- return &tgl_revid_tbl[revid];
+ return &revid_step_tbl[revid];
}
-#define IS_TGL_DISP_REVID(p, since, until) \
+#define IS_TGL_DISP_STEPPING(p, since, until) \
(IS_TIGERLAKE(p) && \
- tgl_revids_get(p)->disp_stepping >= (since) && \
- tgl_revids_get(p)->disp_stepping <= (until))
+ tgl_stepping_get(p)->disp_stepping >= (since) && \
+ tgl_stepping_get(p)->disp_stepping <= (until))
-#define IS_TGL_UY_GT_REVID(p, since, until) \
+#define IS_TGL_UY_GT_STEPPING(p, since, until) \
((IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_revids_get(p)->gt_stepping >= (since) && \
- tgl_revids_get(p)->gt_stepping <= (until))
+ tgl_stepping_get(p)->gt_stepping >= (since) && \
+ tgl_stepping_get(p)->gt_stepping <= (until))
-#define IS_TGL_GT_REVID(p, since, until) \
+#define IS_TGL_GT_STEPPING(p, since, until) \
(IS_TIGERLAKE(p) && \
!(IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_revids_get(p)->gt_stepping >= (since) && \
- tgl_revids_get(p)->gt_stepping <= (until))
+ tgl_stepping_get(p)->gt_stepping >= (since) && \
+ tgl_stepping_get(p)->gt_stepping <= (until))
#define RKL_REVID_A0 0x0
#define RKL_REVID_B0 0x1
@@ -1612,6 +1583,22 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define IS_DG1_REVID(p, since, until) \
(IS_DG1(p) && IS_REVID(p, since, until))
+#define ADLS_REVID_A0 0x0
+#define ADLS_REVID_A2 0x1
+#define ADLS_REVID_B0 0x4
+#define ADLS_REVID_G0 0x8
+#define ADLS_REVID_C0 0xC /*Same as H0 ADLS SOC stepping*/
+
+#define IS_ADLS_DISP_STEPPING(p, since, until) \
+ (IS_ALDERLAKE_S(p) && \
+ tgl_stepping_get(p)->disp_stepping >= (since) && \
+ tgl_stepping_get(p)->disp_stepping <= (until))
+
+#define IS_ADLS_GT_STEPPING(p, since, until) \
+ (IS_ALDERLAKE_S(p) && \
+ tgl_stepping_get(p)->gt_stepping >= (since) && \
+ tgl_stepping_get(p)->gt_stepping <= (until))
+
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
@@ -1703,7 +1690,7 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
-#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
+#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg)
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
#define HAS_PSR_HW_TRACKING(dev_priv) \
(INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
@@ -1718,6 +1705,8 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
+#define HAS_MSO(i915) (INTEL_GEN(i915) >= 12)
+
#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
@@ -1735,7 +1724,7 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
-#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
+#define HAS_LSPCON(dev_priv) (IS_GEN_RANGE(dev_priv, 9, 10))
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
@@ -1760,6 +1749,9 @@ static inline bool run_as_guest(void)
return !hypervisor_is_type(X86_HYPER_NATIVE);
}
+#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
+ IS_ALDERLAKE_S(dev_priv))
+
static inline bool intel_vtd_active(void)
{
#ifdef CONFIG_INTEL_IOMMU
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3ee2f682eff6..486c9953e5b6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,7 +28,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
do {
- if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
+ if (dma_map_sg_attrs(obj->base.dev->dev,
pages->sgl, pages->nents,
PCI_DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC |
@@ -63,8 +63,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
/* Wait a bit, in the hope it avoids the hang */
usleep_range(100, 250);
- dma_unmap_sg(&i915->drm.pdev->dev,
- pages->sgl, pages->nents,
+ dma_unmap_sg(i915->drm.dev, pages->sgl, pages->nents,
PCI_DMA_BIDIRECTIONAL);
}
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 75c3bfc2486e..24e18219eb50 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -12,6 +12,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *i915 = to_i915(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
drm_i915_getparam_t *param = data;
int value;
@@ -24,10 +25,10 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
/* Reject all old ums/dri params. */
return -ENODEV;
case I915_PARAM_CHIPSET_ID:
- value = i915->drm.pdev->device;
+ value = pdev->device;
break;
case I915_PARAM_REVISION:
- value = i915->drm.pdev->revision;
+ value = pdev->revision;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
value = i915->ggtt.num_fences;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index f962693404b7..bb181fe5d47e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -644,7 +644,7 @@ static void err_print_params(struct drm_i915_error_state_buf *m,
static void err_print_pciid(struct drm_i915_error_state_buf *m,
struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1a701367a718..67c6d71f2675 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -209,8 +209,7 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
if (HAS_PCH_DG1(dev_priv))
hpd->pch_hpd = hpd_sde_dg1;
- else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) ||
- HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
hpd->pch_hpd = hpd_icp;
else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
hpd->pch_hpd = hpd_spt;
@@ -795,7 +794,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
int position, vtotal;
if (!crtc->active)
- return -1;
+ return 0;
vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
mode = &vblank->hwmode;
@@ -2095,10 +2094,19 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
ivb_err_int_handler(dev_priv);
if (de_iir & DE_EDP_PSR_INT_HSW) {
- u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR);
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ u32 psr_iir = intel_uncore_read(&dev_priv->uncore,
+ EDP_PSR_IIR);
- intel_psr_irq_handler(dev_priv, psr_iir);
- intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir);
+ intel_psr_irq_handler(intel_dp, psr_iir);
+ intel_uncore_write(&dev_priv->uncore,
+ EDP_PSR_IIR, psr_iir);
+ break;
+ }
}
if (de_iir & DE_AUX_CHANNEL_A_IVB)
@@ -2290,7 +2298,7 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
- if (IS_ROCKETLAKE(dev_priv))
+ if (HAS_D12_PLANE_MINIMIZATION(dev_priv))
return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
else if (INTEL_GEN(dev_priv) >= 11)
return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
@@ -2311,21 +2319,30 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
if (iir & GEN8_DE_EDP_PSR) {
+ struct intel_encoder *encoder;
u32 psr_iir;
i915_reg_t iir_reg;
- if (INTEL_GEN(dev_priv) >= 12)
- iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
- else
- iir_reg = EDP_PSR_IIR;
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
- intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
+ if (INTEL_GEN(dev_priv) >= 12)
+ iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
+ else
+ iir_reg = EDP_PSR_IIR;
+
+ psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
+ intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
- if (psr_iir)
- found = true;
+ if (psr_iir)
+ found = true;
- intel_psr_irq_handler(dev_priv, psr_iir);
+ intel_psr_irq_handler(intel_dp, psr_iir);
+
+ /* prior GEN12 only have one EDP PSR */
+ if (INTEL_GEN(dev_priv) < 12)
+ break;
+ }
}
if (!found)
@@ -3023,6 +3040,24 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
+static void cnp_display_clock_wa(struct drm_i915_private *dev_priv)
+{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ /*
+ * Wa_14010685332:cnp/cmp,tgp,adp
+ * TODO: Clarify which platforms this applies to
+ * TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as
+ * on earlier platforms and whether the workaround is also needed for runtime suspend/resume
+ */
+ if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
+ (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
+ intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS,
+ SBCLK_RUN_REFCLK_DIS);
+ intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
+ }
+}
+
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3046,6 +3081,8 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev_priv);
+
+ cnp_display_clock_wa(dev_priv);
}
static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -3087,15 +3124,7 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(uncore, SDE);
- /* Wa_14010685332:cnp/cmp,tgp,adp */
- if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
- (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
- INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
- intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
- SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
- intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
- SBCLK_RUN_REFCLK_DIS, 0);
- }
+ cnp_display_clock_wa(dev_priv);
}
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
@@ -3747,9 +3776,19 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
}
+static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+ u32 mask = SDE_GMBUS_ICP;
+
+ GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
+}
+
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
{
- if (HAS_PCH_SPLIT(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ icp_irq_postinstall(dev_priv);
+ else if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev_priv);
gen8_gt_irq_postinstall(&dev_priv->gt);
@@ -3758,13 +3797,6 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
gen8_master_intr_enable(dev_priv->uncore.regs);
}
-static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 mask = SDE_GMBUS_ICP;
-
- GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
-}
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
@@ -4287,6 +4319,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
else if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ dev_priv->display.hpd_irq_setup = icp_hpd_irq_setup;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
@@ -4392,7 +4426,7 @@ static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
*/
int intel_irq_install(struct drm_i915_private *dev_priv)
{
- int irq = dev_priv->drm.pdev->irq;
+ int irq = to_pci_dev(dev_priv->drm.dev)->irq;
int ret;
/*
@@ -4427,7 +4461,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
- int irq = dev_priv->drm.pdev->irq;
+ int irq = to_pci_dev(dev_priv->drm.dev)->irq;
/*
* FIXME we can get called twice during driver probe
@@ -4487,5 +4521,5 @@ bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
void intel_synchronize_irq(struct drm_i915_private *i915)
{
- synchronize_irq(i915->drm.pdev->irq);
+ synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
}
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index f031966af5b7..48f47e44e848 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -54,8 +54,8 @@ struct drm_printer;
param(int, enable_dc, -1, 0400) \
param(int, enable_fbc, -1, 0600) \
param(int, enable_psr, -1, 0600) \
- param(bool, psr_safest_params, false, 0600) \
- param(bool, enable_psr2_sel_fetch, false, 0600) \
+ param(bool, psr_safest_params, false, 0400) \
+ param(bool, enable_psr2_sel_fetch, false, 0400) \
param(int, disable_power_well, -1, 0400) \
param(int, enable_ips, 1, 0600) \
param(int, invert_brightness, 0, 0600) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 020b5f561f07..9a481ad5a8f6 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -538,7 +538,7 @@ static const struct intel_device_info vlv_info = {
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
.display.has_ddi = 1, \
- .has_fpga_dbg = 1, \
+ .display.has_fpga_dbg = 1, \
.display.has_psr = 1, \
.display.has_psr_hw_tracking = 1, \
.display.has_dp_mst = 1, \
@@ -689,7 +689,7 @@ static const struct intel_device_info skl_gt4_info = {
BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
- .has_fpga_dbg = 1, \
+ .display.has_fpga_dbg = 1, \
.display.has_fbc = 1, \
.display.has_hdcp = 1, \
.display.has_psr = 1, \
@@ -897,7 +897,6 @@ static const struct intel_device_info rkl_info = {
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C),
- .require_force_probe = 1,
.display.has_hti = 1,
.display.has_psr_hw_tracking = 0,
.platform_engine_mask =
@@ -924,6 +923,18 @@ static const struct intel_device_info dg1_info __maybe_unused = {
.ppgtt_size = 47,
};
+static const struct intel_device_info adl_s_info = {
+ GEN12_FEATURES,
+ PLATFORM(INTEL_ALDERLAKE_S),
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .require_force_probe = 1,
+ .display.has_hti = 1,
+ .display.has_psr_hw_tracking = 0,
+ .platform_engine_mask =
+ BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+ .dma_mask_size = 46,
+};
+
#undef GEN
#undef PLATFORM
@@ -1000,6 +1011,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_JSL_IDS(&jsl_info),
INTEL_TGL_12_IDS(&tgl_info),
INTEL_RKL_IDS(&rkl_info),
+ INTEL_ADLS_IDS(&adl_s_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 112ba5f2ce90..736c09891e24 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -302,7 +302,7 @@ static u32 i915_oa_max_sample_rate = 100000;
* code assumes all reports have a power-of-two size and ~(size - 1) can
* be used as a mask to align the OA tail pointer.
*/
-static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
+static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A13] = { 0, 64 },
[I915_OA_FORMAT_A29] = { 1, 128 },
[I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
@@ -311,17 +311,9 @@ static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
[I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
[I915_OA_FORMAT_C4_B8] = { 7, 64 },
-};
-
-static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A12] = { 0, 64 },
[I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
- [I915_OA_FORMAT_C4_B8] = { 7, 64 },
-};
-
-static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
- [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
};
#define SAMPLE_OA_REPORT (1<<0)
@@ -733,11 +725,6 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
(IS_GEN(stream->perf->i915, 12) ?
OAREPORT_REASON_MASK_EXTENDED :
OAREPORT_REASON_MASK));
- if (reason == 0) {
- if (__ratelimit(&stream->perf->spurious_report_rs))
- DRM_NOTE("Skipping spurious, invalid OA report\n");
- continue;
- }
ctx_id = report32[2] & stream->specific_ctx_id_mask;
@@ -3524,6 +3511,18 @@ static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
2ULL << exponent);
}
+static __always_inline bool
+oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format)
+{
+ return test_bit(format, perf->format_mask);
+}
+
+static __always_inline void
+oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format)
+{
+ __set_bit(format, perf->format_mask);
+}
+
/**
* read_properties_unlocked - validate + copy userspace stream open properties
* @perf: i915 perf instance
@@ -3615,7 +3614,7 @@ static int read_properties_unlocked(struct i915_perf *perf,
value);
return -EINVAL;
}
- if (!perf->oa_formats[value].size) {
+ if (!oa_format_valid(perf, value)) {
DRM_DEBUG("Unsupported OA report format %llu\n",
value);
return -EINVAL;
@@ -4259,6 +4258,50 @@ static struct ctl_table dev_root[] = {
{}
};
+static void oa_init_supported_formats(struct i915_perf *perf)
+{
+ struct drm_i915_private *i915 = perf->i915;
+ enum intel_platform platform = INTEL_INFO(i915)->platform;
+
+ switch (platform) {
+ case INTEL_HASWELL:
+ oa_format_add(perf, I915_OA_FORMAT_A13);
+ oa_format_add(perf, I915_OA_FORMAT_A13);
+ oa_format_add(perf, I915_OA_FORMAT_A29);
+ oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8);
+ oa_format_add(perf, I915_OA_FORMAT_B4_C8);
+ oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8);
+ oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16);
+ oa_format_add(perf, I915_OA_FORMAT_C4_B8);
+ break;
+
+ case INTEL_BROADWELL:
+ case INTEL_CHERRYVIEW:
+ case INTEL_SKYLAKE:
+ case INTEL_BROXTON:
+ case INTEL_KABYLAKE:
+ case INTEL_GEMINILAKE:
+ case INTEL_COFFEELAKE:
+ case INTEL_COMETLAKE:
+ case INTEL_CANNONLAKE:
+ case INTEL_ICELAKE:
+ case INTEL_ELKHARTLAKE:
+ case INTEL_JASPERLAKE:
+ case INTEL_TIGERLAKE:
+ case INTEL_ROCKETLAKE:
+ case INTEL_DG1:
+ case INTEL_ALDERLAKE_S:
+ oa_format_add(perf, I915_OA_FORMAT_A12);
+ oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
+ oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
+ oa_format_add(perf, I915_OA_FORMAT_C4_B8);
+ break;
+
+ default:
+ MISSING_CASE(platform);
+ }
+}
+
/**
* i915_perf_init - initialize i915-perf state on module bind
* @i915: i915 device instance
@@ -4274,6 +4317,7 @@ void i915_perf_init(struct drm_i915_private *i915)
/* XXX const struct i915_perf_ops! */
+ perf->oa_formats = oa_formats;
if (IS_HASWELL(i915)) {
perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
@@ -4284,8 +4328,6 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->ops.oa_disable = gen7_oa_disable;
perf->ops.read = gen7_oa_read;
perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
-
- perf->oa_formats = hsw_oa_formats;
} else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
@@ -4296,8 +4338,6 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->ops.read = gen8_oa_read;
if (IS_GEN_RANGE(i915, 8, 9)) {
- perf->oa_formats = gen8_plus_oa_formats;
-
perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
perf->ops.is_valid_mux_reg =
@@ -4328,8 +4368,6 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->gen8_valid_ctx_bit = BIT(16);
}
} else if (IS_GEN_RANGE(i915, 10, 11)) {
- perf->oa_formats = gen8_plus_oa_formats;
-
perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
perf->ops.is_valid_mux_reg =
@@ -4352,8 +4390,6 @@ void i915_perf_init(struct drm_i915_private *i915)
}
perf->gen8_valid_ctx_bit = BIT(16);
} else if (IS_GEN(i915, 12)) {
- perf->oa_formats = gen12_oa_formats;
-
perf->ops.is_valid_b_counter_reg =
gen12_is_valid_b_counter_addr;
perf->ops.is_valid_mux_reg =
@@ -4408,6 +4444,8 @@ void i915_perf_init(struct drm_i915_private *i915)
500 * 1000 /* 500us */);
perf->i915 = i915;
+
+ oa_init_supported_formats(perf);
}
}
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index a36a455ae336..aa14354a5120 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/uuid.h>
#include <linux/wait.h>
+#include <uapi/drm/i915_drm.h>
#include "gt/intel_sseu.h"
#include "i915_reg.h"
@@ -441,6 +442,13 @@ struct i915_perf {
struct i915_oa_ops ops;
const struct i915_oa_format *oa_formats;
+ /**
+ * Use a format mask to store the supported formats
+ * for a platform.
+ */
+#define FORMAT_MASK_SIZE DIV_ROUND_UP(I915_OA_FORMAT_MAX - 1, BITS_PER_LONG)
+ unsigned long format_mask[FORMAT_MASK_SIZE];
+
atomic64_t noa_programming_delay;
};
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 2b88c0baa1bf..41651ac255fa 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -1124,7 +1124,7 @@ static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
static bool is_igp(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
/* IGP is 0000:00:02.0 */
return pci_domain_nr(pdev->bus) == 0 &&
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7146cd0f3256..174dc15a0cf8 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1874,10 +1874,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _ICL_COMBOPHY_B 0x6C000
#define _EHL_COMBOPHY_C 0x160000
#define _RKL_COMBOPHY_D 0x161000
+#define _ADL_COMBOPHY_E 0x16B000
+
#define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \
_ICL_COMBOPHY_B, \
_EHL_COMBOPHY_C, \
- _RKL_COMBOPHY_D)
+ _RKL_COMBOPHY_D, \
+ _ADL_COMBOPHY_E)
/* CNL/ICL Port CL_DW registers */
#define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \
@@ -2927,7 +2930,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
#define HDPORT_STATE _MMIO(0x45050)
-#define HDPORT_DPLL_USED_MASK REG_GENMASK(14, 12)
+#define HDPORT_DPLL_USED_MASK REG_GENMASK(15, 12)
#define HDPORT_DDI_USED(phy) REG_BIT(2 * (phy) + 1)
#define HDPORT_ENABLED REG_BIT(0)
@@ -10357,7 +10360,7 @@ enum skl_power_gate {
/* ICL Clocks */
#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24, 4, 5))
#define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10)
#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < TC_PORT_4 ? \
(tc_port) + 12 : \
@@ -10392,14 +10395,38 @@ enum skl_power_gate {
#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy) \
(((clk_sel) >> DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) + _DG1_PHY_DPLL_MAP(phy))
+/* ADLS Clocks */
+#define _ADLS_DPCLKA_CFGCR0 0x164280
+#define _ADLS_DPCLKA_CFGCR1 0x1642BC
+#define ADLS_DPCLKA_CFGCR(phy) _MMIO_PHY((phy) / 3, \
+ _ADLS_DPCLKA_CFGCR0, \
+ _ADLS_DPCLKA_CFGCR1)
+#define ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy) (((phy) % 3) * 2)
+/* ADLS DPCLKA_CFGCR0 DDI mask */
+#define ADLS_DPCLKA_DDII_SEL_MASK REG_GENMASK(5, 4)
+#define ADLS_DPCLKA_DDIB_SEL_MASK REG_GENMASK(3, 2)
+#define ADLS_DPCLKA_DDIA_SEL_MASK REG_GENMASK(1, 0)
+/* ADLS DPCLKA_CFGCR1 DDI mask */
+#define ADLS_DPCLKA_DDIK_SEL_MASK REG_GENMASK(3, 2)
+#define ADLS_DPCLKA_DDIJ_SEL_MASK REG_GENMASK(1, 0)
+#define ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy) _PICK((phy), \
+ ADLS_DPCLKA_DDIA_SEL_MASK, \
+ ADLS_DPCLKA_DDIB_SEL_MASK, \
+ ADLS_DPCLKA_DDII_SEL_MASK, \
+ ADLS_DPCLKA_DDIJ_SEL_MASK, \
+ ADLS_DPCLKA_DDIK_SEL_MASK)
+
/* CNL PLL */
#define DPLL0_ENABLE 0x46010
#define DPLL1_ENABLE 0x46014
+#define _ADLS_DPLL2_ENABLE 0x46018
+#define _ADLS_DPLL3_ENABLE 0x46030
#define PLL_ENABLE (1 << 31)
#define PLL_LOCK (1 << 30)
#define PLL_POWER_ENABLE (1 << 27)
#define PLL_POWER_STATE (1 << 26)
-#define CNL_DPLL_ENABLE(pll) _MMIO_PLL(pll, DPLL0_ENABLE, DPLL1_ENABLE)
+#define CNL_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
+ _ADLS_DPLL2_ENABLE, _ADLS_DPLL3_ENABLE)
#define TBT_PLL_ENABLE _MMIO(0x46020)
@@ -10645,6 +10672,21 @@ enum skl_power_gate {
_DG1_DPLL2_CFGCR1, \
_DG1_DPLL3_CFGCR1)
+/* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */
+#define _ADLS_DPLL3_CFGCR0 0x1642C0
+#define _ADLS_DPLL4_CFGCR0 0x164294
+#define ADLS_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
+ _TGL_DPLL1_CFGCR0, \
+ _ADLS_DPLL4_CFGCR0, \
+ _ADLS_DPLL3_CFGCR0)
+
+#define _ADLS_DPLL3_CFGCR1 0x1642C4
+#define _ADLS_DPLL4_CFGCR1 0x164298
+#define ADLS_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
+ _TGL_DPLL1_CFGCR1, \
+ _ADLS_DPLL4_CFGCR1, \
+ _ADLS_DPLL3_CFGCR1)
+
#define _DKL_PHY1_BASE 0x168000
#define _DKL_PHY2_BASE 0x169000
#define _DKL_PHY3_BASE 0x16A000
@@ -11406,6 +11448,9 @@ enum skl_power_gate {
#define BIG_JOINER_ENABLE (1 << 29)
#define MASTER_BIG_JOINER_ENABLE (1 << 28)
#define VGA_CENTERING_ENABLE (1 << 27)
+#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25)
+#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0)
+#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1)
#define _ICL_PIPE_DSS_CTL2_PB 0x78204
#define _ICL_PIPE_DSS_CTL2_PC 0x78404
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 63212df33c9e..0bc7b49f843c 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -85,7 +85,7 @@ static void intel_restore_swf(struct drm_i915_private *dev_priv)
void i915_save_display(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
/* Display arbitration control */
if (INTEL_GEN(dev_priv) <= 4)
@@ -100,7 +100,7 @@ void i915_save_display(struct drm_i915_private *dev_priv)
void i915_restore_display(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
intel_restore_swf(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index b3a24eac21f1..de0e224b56ce 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -54,14 +54,14 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
int i915_switcheroo_register(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
return vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
}
void i915_switcheroo_unregister(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
vga_switcheroo_unregister_client(pdev);
}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 70fca72f5162..172799277dd5 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -61,7 +61,7 @@
*/
void intel_vgpu_detect(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u64 magic;
u16 version_major;
void __iomem *shared_area;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index f2d5ae59081e..8aaa0f8f6cfd 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -66,6 +66,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(TIGERLAKE),
PLATFORM_NAME(ROCKETLAKE),
PLATFORM_NAME(DG1),
+ PLATFORM_NAME(ALDERLAKE_S),
};
#undef PLATFORM_NAME
@@ -204,7 +205,7 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915)
}
if (IS_TIGERLAKE(i915)) {
- struct pci_dev *root, *pdev = i915->drm.pdev;
+ struct pci_dev *root, *pdev = to_pci_dev(i915->drm.dev);
root = list_first_entry(&pdev->bus->devices, typeof(*root), bus_list);
@@ -249,7 +250,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
enum pipe pipe;
- if (INTEL_GEN(dev_priv) >= 10) {
+ /* Wa_14011765242: adl-s A0 */
+ if (IS_ADLS_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0))
+ for_each_pipe(dev_priv, pipe)
+ runtime->num_scalers[pipe] = 0;
+ else if (INTEL_GEN(dev_priv) >= 10) {
for_each_pipe(dev_priv, pipe)
runtime->num_scalers[pipe] = 2;
} else if (IS_GEN(dev_priv, 9)) {
@@ -260,7 +265,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
- if (IS_ROCKETLAKE(dev_priv))
+ if (HAS_D12_PLANE_MINIMIZATION(dev_priv))
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 4;
else if (INTEL_GEN(dev_priv) >= 11)
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index cf2d528c6e9b..efd138761e14 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -84,6 +84,7 @@ enum intel_platform {
INTEL_TIGERLAKE,
INTEL_ROCKETLAKE,
INTEL_DG1,
+ INTEL_ALDERLAKE_S,
INTEL_MAX_PLATFORMS
};
@@ -116,7 +117,6 @@ enum intel_ppgtt_type {
func(has_64bit_reloc); \
func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
- func(has_fpga_dbg); \
func(has_global_mocs); \
func(has_gt_uc); \
func(has_l3_dpf); \
@@ -143,6 +143,7 @@ enum intel_ppgtt_type {
func(has_dsb); \
func(has_dsc); \
func(has_fbc); \
+ func(has_fpga_dbg); \
func(has_gmch); \
func(has_hdcp); \
func(has_hotplug); \
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 73d256fc6830..1e53c017c30d 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -427,6 +427,12 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
case 0:
dram_info->type = INTEL_DRAM_DDR4;
break;
+ case 1:
+ dram_info->type = INTEL_DRAM_DDR5;
+ break;
+ case 2:
+ dram_info->type = INTEL_DRAM_LPDDR5;
+ break;
case 3:
dram_info->type = INTEL_DRAM_LPDDR4;
break;
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index ecaf314d60b6..7476f0e063c6 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -121,13 +121,18 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv) &&
- !IS_ROCKETLAKE(dev_priv));
+ !IS_ROCKETLAKE(dev_priv) &&
+ !IS_GEN9_BC(dev_priv));
return PCH_TGP;
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
return PCH_JSP;
+ case INTEL_PCH_ADP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n");
+ drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv));
+ return PCH_ADP;
default:
return PCH_NONE;
}
@@ -156,7 +161,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
* make an educated guess as to which PCH is really there.
*/
- if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ if (IS_ALDERLAKE_S(dev_priv))
+ id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
+ else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
else if (IS_JSL_EHL(dev_priv))
id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index 06d2cd50af0b..7318377503b0 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -26,6 +26,7 @@ enum intel_pch {
PCH_JSP, /* Jasper Lake PCH */
PCH_MCC, /* Mule Creek Canyon PCH */
PCH_TGP, /* Tiger Lake PCH */
+ PCH_ADP, /* Alder Lake PCH */
/* Fake PCHs, functionality handled on the same PCI dev */
PCH_DG1 = 1024,
@@ -53,12 +54,14 @@ enum intel_pch {
#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880
+#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
#define HAS_PCH_JSP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_JSP)
#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0c3e63f27c29..854ffecd98d9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -38,6 +38,7 @@
#include "display/intel_display_types.h"
#include "display/intel_fbc.h"
#include "display/intel_sprite.h"
+#include "display/skl_universal_plane.h"
#include "gt/intel_llc.h"
@@ -3920,12 +3921,10 @@ static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
return true;
for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_ddb_entry *plane_alloc =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (skl_ddb_entry_size(plane_alloc) < wm->sagv_wm0.min_ddb_alloc)
+ if (wm->wm[0].plane_en && !wm->sagv.wm0.plane_en)
return false;
}
@@ -4746,20 +4745,31 @@ icl_get_total_relative_data_rate(struct intel_atomic_state *state,
return total_data_rate;
}
-static const struct skl_wm_level *
-skl_plane_wm_level(const struct intel_crtc_state *crtc_state,
+const struct skl_wm_level *
+skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id,
int level)
{
- const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
if (level == 0 && pipe_wm->use_sagv_wm)
- return &wm->sagv_wm0;
+ return &wm->sagv.wm0;
return &wm->wm[level];
}
+const struct skl_wm_level *
+skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
+ enum plane_id plane_id)
+{
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ if (pipe_wm->use_sagv_wm)
+ return &wm->sagv.trans_wm;
+
+ return &wm->trans_wm;
+}
+
static int
skl_allocate_plane_ddb(struct intel_atomic_state *state,
struct intel_crtc *crtc)
@@ -4957,8 +4967,8 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
}
/*
- * Go back and disable the transition watermark if it turns out we
- * don't have enough DDB blocks for it.
+ * Go back and disable the transition and SAGV watermarks
+ * if it turns out we don't have enough DDB blocks for them.
*/
for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm =
@@ -4966,6 +4976,12 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
if (wm->trans_wm.plane_res_b >= total[plane_id])
memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
+
+ if (wm->sagv.wm0.plane_res_b >= total[plane_id])
+ memset(&wm->sagv.wm0, 0, sizeof(wm->sagv.wm0));
+
+ if (wm->sagv.trans_wm.plane_res_b >= total[plane_id])
+ memset(&wm->sagv.trans_wm, 0, sizeof(wm->sagv.trans_wm));
}
return 0;
@@ -5315,7 +5331,7 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
struct skl_plane_wm *plane_wm)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct skl_wm_level *sagv_wm = &plane_wm->sagv_wm0;
+ struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
struct skl_wm_level *levels = plane_wm->wm;
unsigned int latency = dev_priv->wm.skl_latency[0] + dev_priv->sagv_block_time_us;
@@ -5324,12 +5340,11 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
sagv_wm);
}
-static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
- const struct skl_wm_params *wp,
- struct skl_plane_wm *wm)
+static void skl_compute_transition_wm(struct drm_i915_private *dev_priv,
+ struct skl_wm_level *trans_wm,
+ const struct skl_wm_level *wm0,
+ const struct skl_wm_params *wp)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
- const struct drm_i915_private *dev_priv = to_i915(dev);
u16 trans_min, trans_amount, trans_y_tile_min;
u16 wm0_sel_res_b, trans_offset_b, res_blocks;
@@ -5367,7 +5382,7 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
* Result Blocks is Result Blocks minus 1 and it should work for the
* current platforms.
*/
- wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
+ wm0_sel_res_b = wm0->plane_res_b - 1;
if (wp->y_tiled) {
trans_y_tile_min =
@@ -5383,8 +5398,8 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
* computing the DDB we'll come back and disable it if that
* assumption turns out to be false.
*/
- wm->trans_wm.plane_res_b = res_blocks + 1;
- wm->trans_wm.plane_en = true;
+ trans_wm->plane_res_b = res_blocks + 1;
+ trans_wm->plane_en = true;
}
static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
@@ -5404,10 +5419,15 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
- if (INTEL_GEN(dev_priv) >= 12)
+ skl_compute_transition_wm(dev_priv, &wm->trans_wm,
+ &wm->wm[0], &wm_params);
+
+ if (INTEL_GEN(dev_priv) >= 12) {
tgl_compute_sagv_wm(crtc_state, &wm_params, wm);
- skl_compute_transition_wm(crtc_state, &wm_params, wm);
+ skl_compute_transition_wm(dev_priv, &wm->sagv.trans_wm,
+ &wm->sagv.wm0, &wm_params);
+ }
return 0;
}
@@ -5571,23 +5591,19 @@ void skl_write_plane_wm(struct intel_plane *plane,
int level, max_level = ilk_wm_max_level(dev_priv);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
const struct skl_ddb_entry *ddb_uv =
&crtc_state->wm.skl.plane_ddb_uv[plane_id];
- for (level = 0; level <= max_level; level++) {
- const struct skl_wm_level *wm_level;
-
- wm_level = skl_plane_wm_level(crtc_state, plane_id, level);
-
+ for (level = 0; level <= max_level; level++)
skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
- wm_level);
- }
+ skl_plane_wm_level(pipe_wm, plane_id, level));
+
skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
- &wm->trans_wm);
+ skl_plane_trans_wm(pipe_wm, plane_id));
if (INTEL_GEN(dev_priv) >= 11) {
skl_ddb_entry_write(dev_priv,
@@ -5611,20 +5627,16 @@ void skl_write_cursor_wm(struct intel_plane *plane,
int level, max_level = ilk_wm_max_level(dev_priv);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
- for (level = 0; level <= max_level; level++) {
- const struct skl_wm_level *wm_level;
-
- wm_level = skl_plane_wm_level(crtc_state, plane_id, level);
-
+ for (level = 0; level <= max_level; level++)
skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
- wm_level);
- }
- skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
+ skl_plane_wm_level(pipe_wm, plane_id, level));
+
+ skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe),
+ skl_plane_trans_wm(pipe_wm, plane_id));
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
}
@@ -5654,7 +5666,9 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
return false;
}
- return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
+ return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
+ skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
+ skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
}
static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
@@ -5884,25 +5898,27 @@ skl_print_wm_changes(struct intel_atomic_state *state)
continue;
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm"
- " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n",
+ "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
plane->base.base.id, plane->base.name,
enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
enast(old_wm->trans_wm.plane_en),
- enast(old_wm->sagv_wm0.plane_en),
+ enast(old_wm->sagv.wm0.plane_en),
+ enast(old_wm->sagv.trans_wm.plane_en),
enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
enast(new_wm->trans_wm.plane_en),
- enast(new_wm->sagv_wm0.plane_en));
+ enast(new_wm->sagv.wm0.plane_en),
+ enast(new_wm->sagv.trans_wm.plane_en));
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
- " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
+ "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
+ " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
plane->base.base.id, plane->base.name,
enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
@@ -5913,8 +5929,8 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
- enast(old_wm->sagv_wm0.ignore_lines), old_wm->sagv_wm0.plane_res_l,
-
+ enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.plane_res_l,
+ enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.plane_res_l,
enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
@@ -5924,45 +5940,72 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l,
- enast(new_wm->sagv_wm0.ignore_lines), new_wm->sagv_wm0.plane_res_l);
+ enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.plane_res_l,
+ enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.plane_res_l);
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
plane->base.base.id, plane->base.name,
old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
old_wm->trans_wm.plane_res_b,
- old_wm->sagv_wm0.plane_res_b,
+ old_wm->sagv.wm0.plane_res_b,
+ old_wm->sagv.trans_wm.plane_res_b,
new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
new_wm->trans_wm.plane_res_b,
- new_wm->sagv_wm0.plane_res_b);
+ new_wm->sagv.wm0.plane_res_b,
+ new_wm->sagv.trans_wm.plane_res_b);
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
plane->base.base.id, plane->base.name,
old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
old_wm->trans_wm.min_ddb_alloc,
- old_wm->sagv_wm0.min_ddb_alloc,
+ old_wm->sagv.wm0.min_ddb_alloc,
+ old_wm->sagv.trans_wm.min_ddb_alloc,
new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
new_wm->trans_wm.min_ddb_alloc,
- new_wm->sagv_wm0.min_ddb_alloc);
+ new_wm->sagv.wm0.min_ddb_alloc,
+ new_wm->sagv.trans_wm.min_ddb_alloc);
}
}
}
+static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
+ const struct skl_pipe_wm *old_pipe_wm,
+ const struct skl_pipe_wm *new_pipe_wm)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ int level, max_level = ilk_wm_max_level(i915);
+
+ for (level = 0; level <= max_level; level++) {
+ /*
+ * We don't check uv_wm as the hardware doesn't actually
+ * use it. It only gets used for calculating the required
+ * ddb allocation.
+ */
+ if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, level, plane->id),
+ skl_plane_wm_level(new_pipe_wm, level, plane->id)))
+ return false;
+ }
+
+ return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
+ skl_plane_trans_wm(new_pipe_wm, plane->id));
+}
+
/*
* To make sure the cursor watermark registers are always consistent
* with our computed state the following scenario needs special
@@ -6008,9 +6051,9 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
* with the software state.
*/
if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
- skl_plane_wm_equals(dev_priv,
- &old_crtc_state->wm.skl.optimal.planes[plane_id],
- &new_crtc_state->wm.skl.optimal.planes[plane_id]))
+ skl_plane_selected_wm_equals(plane,
+ &old_crtc_state->wm.skl.optimal,
+ &new_crtc_state->wm.skl.optimal))
continue;
plane_state = intel_atomic_get_plane_state(state, plane);
@@ -6171,19 +6214,18 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
- if (INTEL_GEN(dev_priv) >= 12)
- wm->sagv_wm0 = wm->wm[0];
-
if (plane_id != PLANE_CURSOR)
val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id));
else
val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe));
skl_wm_level_from_reg_val(val, &wm->trans_wm);
- }
- if (!crtc->active)
- return;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ wm->sagv.wm0 = wm->wm[0];
+ wm->sagv.trans_wm = wm->trans_wm;
+ }
+ }
}
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
@@ -7072,7 +7114,7 @@ static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* Wa_1409825376:tgl (pre-prod)*/
- if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1))
+ if (IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_B1))
intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
TGL_VRH_GATING_DIS);
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 97550cf0b6df..669c8d505677 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -52,6 +52,11 @@ bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
const struct intel_bw_state *bw_state);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
+const struct skl_wm_level *skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
+ enum plane_id plane_id,
+ int level);
+const struct skl_wm_level *skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
+ enum plane_id plane_id);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2);
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 153ca9e65382..4970ef0843dc 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -625,7 +625,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
{
struct drm_i915_private *i915 =
container_of(rpm, struct drm_i915_private, runtime_pm);
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct device *kdev = &pdev->dev;
rpm->kdev = kdev;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 9ac501bcfdad..661b50191f2b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -465,6 +465,22 @@ fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
return false;
+ /*
+ * Bugs in PCI programming (or failing hardware) can occasionally cause
+ * us to lose access to the MMIO BAR. When this happens, register
+ * reads will come back with 0xFFFFFFFF for every register and things
+ * go bad very quickly. Let's try to detect that special case and at
+ * least try to print a more informative message about what has
+ * happened.
+ *
+ * During normal operation the FPGA_DBG register has several unused
+ * bits that will always read back as 0's so we can use them as canaries
+ * to recognize when MMIO accesses are just busted.
+ */
+ if (unlikely(dbg == ~0))
+ drm_err(&uncore->i915->drm,
+ "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
+
__raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
return true;
@@ -1780,7 +1796,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
static int uncore_mmio_setup(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
int mmio_bar;
int mmio_size;
@@ -1812,7 +1828,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
static void uncore_mmio_cleanup(struct intel_uncore *uncore)
{
- struct pci_dev *pdev = uncore->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(uncore->i915->drm.dev);
pci_iounmap(pdev, uncore->regs);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 7270fc8ca801..5c7ae40bba63 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -74,7 +74,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
ppgtt->vm.i915 = i915;
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.file = ERR_PTR(-ENODEV);
- ppgtt->vm.dma = &i915->drm.pdev->dev;
+ ppgtt->vm.dma = i915->drm.dev;
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);