summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/dev/pci/drm/i915/i915_devlist.h22
-rw-r--r--sys/dev/pci/drm/i915/i915_dma.c34
-rw-r--r--sys/dev/pci/drm/i915/i915_drv.c53
-rw-r--r--sys/dev/pci/drm/i915/i915_drv.h49
-rw-r--r--sys/dev/pci/drm/i915/i915_gem.c2
-rw-r--r--sys/dev/pci/drm/i915/i915_gem_stolen.c6
-rw-r--r--sys/dev/pci/drm/i915/i915_guc_submission.c6
-rw-r--r--sys/dev/pci/drm/i915/i915_irq.c4
-rw-r--r--sys/dev/pci/drm/i915/i915_reg.h16
-rw-r--r--sys/dev/pci/drm/i915/intel_csr.c24
-rw-r--r--sys/dev/pci/drm/i915/intel_ddi.c29
-rw-r--r--sys/dev/pci/drm/i915/intel_display.c12
-rw-r--r--sys/dev/pci/drm/i915/intel_dp.c10
-rw-r--r--sys/dev/pci/drm/i915/intel_fbc.c3
-rw-r--r--sys/dev/pci/drm/i915/intel_guc_loader.c13
-rw-r--r--sys/dev/pci/drm/i915/intel_hdmi.c2
-rw-r--r--sys/dev/pci/drm/i915/intel_i2c.c6
-rw-r--r--sys/dev/pci/drm/i915/intel_lrc.c29
-rw-r--r--sys/dev/pci/drm/i915/intel_mocs.c12
-rw-r--r--sys/dev/pci/drm/i915/intel_panel.c3
-rw-r--r--sys/dev/pci/drm/i915/intel_pm.c32
-rw-r--r--sys/dev/pci/drm/i915/intel_ringbuffer.c186
-rw-r--r--sys/dev/pci/drm/i915/intel_runtime_pm.c14
-rw-r--r--sys/dev/pci/drm/i915_pciids.h36
24 files changed, 447 insertions, 156 deletions
diff --git a/sys/dev/pci/drm/i915/i915_devlist.h b/sys/dev/pci/drm/i915/i915_devlist.h
index 9c35ceecba5..684d3c4d39d 100644
--- a/sys/dev/pci/drm/i915/i915_devlist.h
+++ b/sys/dev/pci/drm/i915/i915_devlist.h
@@ -154,4 +154,26 @@ static const struct pci_matchid i915_devices[] = {
{ 0x8086, 0x1a85 },
{ 0x8086, 0x5a84 },
{ 0x8086, 0x5a85 },
+ { 0x8086, 0x5913 },
+ { 0x8086, 0x5915 },
+ { 0x8086, 0x5917 },
+ { 0x8086, 0x5906 },
+ { 0x8086, 0x590e },
+ { 0x8086, 0x5902 },
+ { 0x8086, 0x590b },
+ { 0x8086, 0x590a },
+ { 0x8086, 0x5916 },
+ { 0x8086, 0x5921 },
+ { 0x8086, 0x591e },
+ { 0x8086, 0x5912 },
+ { 0x8086, 0x591b },
+ { 0x8086, 0x591a },
+ { 0x8086, 0x591d },
+ { 0x8086, 0x5926 },
+ { 0x8086, 0x592b },
+ { 0x8086, 0x592a },
+ { 0x8086, 0x5932 },
+ { 0x8086, 0x593b },
+ { 0x8086, 0x593a },
+ { 0x8086, 0x593d },
};
diff --git a/sys/dev/pci/drm/i915/i915_dma.c b/sys/dev/pci/drm/i915/i915_dma.c
index 6cbee8b26b4..54225532eea 100644
--- a/sys/dev/pci/drm/i915/i915_dma.c
+++ b/sys/dev/pci/drm/i915/i915_dma.c
@@ -712,7 +712,8 @@ static void gen9_sseu_info_init(struct drm_device *dev)
* supports EU power gating on devices with more than one EU
* pair per subslice.
*/
- info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1));
+ info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
+ (info->slice_total > 1));
info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
info->has_eu_pg = (info->eu_per_subslice > 2);
}
@@ -858,6 +859,37 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
DRM_INFO("Display fused off, disabling\n");
info->num_pipes = 0;
}
+ } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
+ u32 dfsm = I915_READ(SKL_DFSM);
+ u8 disabled_mask = 0;
+ bool invalid;
+ int num_bits;
+
+ if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
+ disabled_mask |= BIT(PIPE_A);
+ if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
+ disabled_mask |= BIT(PIPE_B);
+ if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
+ disabled_mask |= BIT(PIPE_C);
+
+ num_bits = hweight8(disabled_mask);
+
+ switch (disabled_mask) {
+ case BIT(PIPE_A):
+ case BIT(PIPE_B):
+ case BIT(PIPE_A) | BIT(PIPE_B):
+ case BIT(PIPE_A) | BIT(PIPE_C):
+ invalid = true;
+ break;
+ default:
+ invalid = false;
+ }
+
+ if (num_bits > info->num_pipes || invalid)
+ DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
+ disabled_mask);
+ else
+ info->num_pipes -= num_bits;
}
/* Initialize slice/subslice/EU info */
diff --git a/sys/dev/pci/drm/i915/i915_drv.c b/sys/dev/pci/drm/i915/i915_drv.c
index b1d20030b2f..4f352ea7dd0 100644
--- a/sys/dev/pci/drm/i915/i915_drv.c
+++ b/sys/dev/pci/drm/i915/i915_drv.c
@@ -388,6 +388,7 @@ static const struct intel_device_info intel_skylake_gt3_info = {
static const struct intel_device_info intel_broxton_info = {
.is_preliminary = 1,
+ .is_broxton = 1,
.gen = 9,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -399,6 +400,34 @@ static const struct intel_device_info intel_broxton_info = {
IVB_CURSOR_OFFSETS,
};
+static const struct intel_device_info intel_kabylake_info = {
+ .is_kabylake = 1,
+ .gen = 9,
+ .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
+ .has_fpga_dbg = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_kabylake_gt3_info = {
+ .is_kabylake = 1,
+ .gen = 9,
+ .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
+ .has_fpga_dbg = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
@@ -439,7 +468,11 @@ static const struct intel_device_info intel_broxton_info = {
INTEL_SKL_GT1_IDS(&intel_skylake_info), \
INTEL_SKL_GT2_IDS(&intel_skylake_info), \
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), \
- INTEL_BXT_IDS(&intel_broxton_info)
+ INTEL_BXT_IDS(&intel_broxton_info), \
+ INTEL_KBL_GT1_IDS(&intel_kabylake_info), \
+ INTEL_KBL_GT2_IDS(&intel_kabylake_info), \
+ INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), \
+ INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info)
static const struct drm_pcidev pciidlist[] = { /* aka */
INTEL_PCI_IDS,
@@ -469,7 +502,7 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ret = PCH_LPT;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
- } else if (IS_SKYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
ret = PCH_SPT;
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
}
@@ -543,11 +576,17 @@ void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
- WARN_ON(!IS_SKYLAKE(dev));
+ WARN_ON(!IS_SKYLAKE(dev) &&
+ !IS_KABYLAKE(dev));
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev));
+ WARN_ON(!IS_SKYLAKE(dev) &&
+ !IS_KABYLAKE(dev));
+ } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_KBP;
+ DRM_DEBUG_KMS("Found KabyPoint PCH\n");
+ WARN_ON(!IS_KABYLAKE(dev));
#ifdef notyet
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@@ -866,7 +905,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv);
- else if (IS_SKYLAKE(dev_priv))
+ else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
ret = skl_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
@@ -1605,7 +1644,7 @@ static int intel_runtime_resume(struct device *device)
if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv);
- else if (IS_SKYLAKE(dev))
+ else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
ret = skl_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
@@ -1650,7 +1689,7 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv)
if (IS_BROXTON(dev_priv))
ret = bxt_suspend_complete(dev_priv);
- else if (IS_SKYLAKE(dev_priv))
+ else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
ret = skl_suspend_complete(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = hsw_suspend_complete(dev_priv);
diff --git a/sys/dev/pci/drm/i915/i915_drv.h b/sys/dev/pci/drm/i915/i915_drv.h
index 51479660f55..c1e4b6f905b 100644
--- a/sys/dev/pci/drm/i915/i915_drv.h
+++ b/sys/dev/pci/drm/i915/i915_drv.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_drv.h,v 1.77 2017/07/19 22:05:58 kettenis Exp $ */
+/* $OpenBSD: i915_drv.h,v 1.78 2017/09/30 07:36:56 robert Exp $ */
/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
*/
/*
@@ -849,6 +849,8 @@ struct intel_csr {
func(is_valleyview) sep \
func(is_haswell) sep \
func(is_skylake) sep \
+ func(is_broxton) sep \
+ func(is_kabylake) sep \
func(is_preliminary) sep \
func(has_fbc) sep \
func(has_pipe_cxsr) sep \
@@ -1076,6 +1078,7 @@ enum intel_pch {
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
+ PCH_KBP, /* Kabypoint PCH */
PCH_NOP,
};
@@ -2565,6 +2568,16 @@ struct drm_i915_cmd_table {
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
+#define REVID_FOREVER (0xff)
+
+/*
+ * Return true if revision is in range [since,until] inclusive.
+ *
+ * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
+ */
+#define IS_REVID(p, since, until) \
+ (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
+
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
@@ -2591,7 +2604,8 @@ struct drm_i915_cmd_table {
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
-#define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev))
+#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
+#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
@@ -2619,6 +2633,14 @@ struct drm_i915_cmd_table {
#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \
INTEL_DEVID(dev) == 0x1915 || \
INTEL_DEVID(dev) == 0x191E)
+#define IS_KBL_ULT(dev) (INTEL_DEVID(dev) == 0x5906 || \
+ INTEL_DEVID(dev) == 0x5913 || \
+ INTEL_DEVID(dev) == 0x5916 || \
+ INTEL_DEVID(dev) == 0x5921 || \
+ INTEL_DEVID(dev) == 0x5926)
+#define IS_KBL_ULX(dev) (INTEL_DEVID(dev) == 0x590E || \
+ INTEL_DEVID(dev) == 0x5915 || \
+ INTEL_DEVID(dev) == 0x591E)
#define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \
@@ -2633,10 +2655,23 @@ struct drm_i915_cmd_table {
#define SKL_REVID_E0 (0x4)
#define SKL_REVID_F0 (0x5)
+#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
+
#define BXT_REVID_A0 (0x0)
+#define BXT_REVID_A1 (0x1)
#define BXT_REVID_B0 (0x3)
#define BXT_REVID_C0 (0x9)
+#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
+
+#define KBL_REVID_A0 (0x0)
+#define KBL_REVID_B0 (0x1)
+#define KBL_REVID_C0 (0x2)
+#define KBL_REVID_D0 (0x3)
+#define KBL_REVID_E0 (0x4)
+
+#define IS_KBL_REVID(p, since, until) (IS_KABYLAKE(p) && IS_REVID(p, since, until))
+
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
@@ -2707,17 +2742,17 @@ struct drm_i915_cmd_table {
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
- IS_SKYLAKE(dev))
+ IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
- IS_SKYLAKE(dev))
+ IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
#define HAS_CSR(dev) (IS_GEN9(dev))
-#define HAS_GUC_UCODE(dev) (IS_GEN9(dev))
-#define HAS_GUC_SCHED(dev) (IS_GEN9(dev))
+#define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
+#define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
INTEL_INFO(dev)->gen >= 8)
@@ -2733,10 +2768,12 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
+#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
diff --git a/sys/dev/pci/drm/i915/i915_gem.c b/sys/dev/pci/drm/i915/i915_gem.c
index 4caf49e5936..676a2e6ea04 100644
--- a/sys/dev/pci/drm/i915/i915_gem.c
+++ b/sys/dev/pci/drm/i915/i915_gem.c
@@ -4363,7 +4363,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
* cacheline, whereas normally such cachelines would get
* invalidated.
*/
- if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
return -ENODEV;
level = I915_CACHE_LLC;
diff --git a/sys/dev/pci/drm/i915/i915_gem_stolen.c b/sys/dev/pci/drm/i915/i915_gem_stolen.c
index e055296e485..c27b126886b 100644
--- a/sys/dev/pci/drm/i915/i915_gem_stolen.c
+++ b/sys/dev/pci/drm/i915/i915_gem_stolen.c
@@ -56,7 +56,8 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
/* See the comment at the drm_mm_init() call for more about this check.
* WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
- if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
+ if (start < 4096 && (IS_GEN8(dev_priv) ||
+ IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock);
@@ -448,7 +449,8 @@ int i915_gem_init_stolen(struct drm_device *dev)
&reserved_size);
break;
default:
- if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ if (IS_BROADWELL(dev_priv) ||
+ IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
bdw_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
else
diff --git a/sys/dev/pci/drm/i915/i915_guc_submission.c b/sys/dev/pci/drm/i915/i915_guc_submission.c
index 89fa5f222e3..2e88f88f281 100644
--- a/sys/dev/pci/drm/i915/i915_guc_submission.c
+++ b/sys/dev/pci/drm/i915/i915_guc_submission.c
@@ -165,9 +165,9 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */
if (!intel_enable_rc6(dev_priv->dev) ||
- (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
- (IS_SKL_GT3(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)) ||
- (IS_SKL_GT4(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
+ IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
+ (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
+ (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
data[1] = 0;
else
/* bit 0 and 1 are for Render and Media domain separately */
diff --git a/sys/dev/pci/drm/i915/i915_irq.c b/sys/dev/pci/drm/i915/i915_irq.c
index 5947d3e5060..7a6743d2fae 100644
--- a/sys/dev/pci/drm/i915/i915_irq.c
+++ b/sys/dev/pci/drm/i915/i915_irq.c
@@ -2349,7 +2349,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
I915_WRITE(SDEIIR, pch_iir);
ret = IRQ_HANDLED;
- if (HAS_PCH_SPT(dev_priv))
+ if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
spt_irq_handler(dev, pch_iir);
else
cpt_irq_handler(dev, pch_iir);
@@ -4457,7 +4457,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_BROXTON(dev))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (HAS_PCH_SPT(dev))
+ else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
diff --git a/sys/dev/pci/drm/i915/i915_reg.h b/sys/dev/pci/drm/i915/i915_reg.h
index cace154bbdc..adb3a297ce7 100644
--- a/sys/dev/pci/drm/i915/i915_reg.h
+++ b/sys/dev/pci/drm/i915/i915_reg.h
@@ -1585,6 +1585,12 @@ enum skl_disp_power_wells {
#define GEN7_TLB_RD_ADDR 0x4700
+#define GAMT_CHKN_BIT_REG 0x4ab8
+#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
+
+#define GEN9_GAMT_ECO_REG_RW_IA 0x4ab0
+#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18)
+
#if 0
#define PRB0_TAIL 0x02030
#define PRB0_HEAD 0x02034
@@ -1707,6 +1713,10 @@ enum skl_disp_power_wells {
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
+/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
+#define GEN9_CSFE_CHICKEN1_RCS 0x20D4
+#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
@@ -5897,6 +5907,7 @@ enum skl_disp_power_wells {
#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
#define DISP_ARB_CTL 0x45000
+#define DISP_FBC_MEMORY_WAKE (1<<31)
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
#define DISP_ARB_CTL2 0x45004
@@ -5916,6 +5927,9 @@ enum skl_disp_power_wells {
#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
+#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
+#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
+#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
#define FF_SLICE_CS_CHICKEN2 0x20e4
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
@@ -5925,6 +5939,7 @@ enum skl_disp_power_wells {
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
#define COMMON_SLICE_CHICKEN2 0x7014
+# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
#define HIZ_CHICKEN 0x7018
@@ -6761,6 +6776,7 @@ enum skl_disp_power_wells {
#define GEN7_UCGCTL4 0x940c
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
+#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14)
#define GEN6_RCGCTL1 0x9410
#define GEN6_RCGCTL2 0x9414
diff --git a/sys/dev/pci/drm/i915/intel_csr.c b/sys/dev/pci/drm/i915/intel_csr.c
index 0fb44be4074..ffb1e32cc77 100644
--- a/sys/dev/pci/drm/i915/intel_csr.c
+++ b/sys/dev/pci/drm/i915/intel_csr.c
@@ -43,9 +43,11 @@
* be moved to FW_FAILED.
*/
+#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_KBL);
MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
@@ -180,6 +182,14 @@ struct stepping_info {
char substepping;
};
+/*
+ * Kabylake derivated from Skylake H0, so SKL H0
+ * is the right firmware for KBL A0 (revid 0).
+ */
+static const struct stepping_info kbl_stepping_info[] = {
+ {'H', '0'}, {'I', '0'}
+};
+
static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
@@ -194,7 +204,10 @@ static struct stepping_info bxt_stepping_info[] = {
static char intel_get_stepping(struct drm_device *dev)
{
- if (IS_SKYLAKE(dev) && (dev->pdev->revision <
+ if (IS_KABYLAKE(dev) && (dev->pdev->revision <
+ ARRAY_SIZE(kbl_stepping_info)))
+ return kbl_stepping_info[dev->pdev->revision].stepping;
+ else if (IS_SKYLAKE(dev) && (dev->pdev->revision <
ARRAY_SIZE(skl_stepping_info)))
return skl_stepping_info[dev->pdev->revision].stepping;
else if (IS_BROXTON(dev) && (dev->pdev->revision <
@@ -206,7 +219,10 @@ static char intel_get_stepping(struct drm_device *dev)
static char intel_get_substepping(struct drm_device *dev)
{
- if (IS_SKYLAKE(dev) && (dev->pdev->revision <
+ if (IS_KABYLAKE(dev) && (dev->pdev->revision <
+ ARRAY_SIZE(kbl_stepping_info)))
+ return kbl_stepping_info[dev->pdev->revision].substepping;
+ else if (IS_SKYLAKE(dev) && (dev->pdev->revision <
ARRAY_SIZE(skl_stepping_info)))
return skl_stepping_info[dev->pdev->revision].substepping;
else if (IS_BROXTON(dev) && (dev->pdev->revision <
@@ -435,7 +451,9 @@ void intel_csr_ucode_init(struct drm_device *dev)
if (!HAS_CSR(dev))
return;
- if (IS_SKYLAKE(dev))
+ if (IS_KABYLAKE(dev))
+ csr->fw_path = I915_CSR_KBL;
+ else if (IS_SKYLAKE(dev))
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
diff --git a/sys/dev/pci/drm/i915/intel_ddi.c b/sys/dev/pci/drm/i915/intel_ddi.c
index 3c6b07683bd..6f66b10e82f 100644
--- a/sys/dev/pci/drm/i915/intel_ddi.c
+++ b/sys/dev/pci/drm/i915/intel_ddi.c
@@ -353,10 +353,10 @@ static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
{
const struct ddi_buf_trans *ddi_translations;
- if (IS_SKL_ULX(dev)) {
+ if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
ddi_translations = skl_y_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
- } else if (IS_SKL_ULT(dev)) {
+ } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
ddi_translations = skl_u_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
} else {
@@ -373,7 +373,7 @@ static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
const struct ddi_buf_trans *ddi_translations;
- if (IS_SKL_ULX(dev)) {
+ if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
if (dev_priv->edp_low_vswing) {
ddi_translations = skl_y_ddi_translations_edp;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
@@ -381,7 +381,7 @@ static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
ddi_translations = skl_y_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
}
- } else if (IS_SKL_ULT(dev)) {
+ } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
if (dev_priv->edp_low_vswing) {
ddi_translations = skl_u_ddi_translations_edp;
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
@@ -408,7 +408,7 @@ skl_get_buf_trans_hdmi(struct drm_device *dev,
{
const struct ddi_buf_trans *ddi_translations;
- if (IS_SKL_ULX(dev)) {
+ if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
ddi_translations = skl_y_ddi_translations_hdmi;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
} else {
@@ -448,7 +448,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
bxt_ddi_vswing_sequence(dev, hdmi_level, port,
INTEL_OUTPUT_HDMI);
return;
- } else if (IS_SKYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
ddi_translations_fdi = NULL;
ddi_translations_dp =
skl_get_buf_trans_dp(dev, &n_dp_entries);
@@ -1192,7 +1192,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
if (INTEL_INFO(dev)->gen <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
- else if (IS_SKYLAKE(dev))
+ else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skl_ddi_clock_get(encoder, pipe_config);
else if (IS_BROXTON(dev))
bxt_ddi_clock_get(encoder, pipe_config);
@@ -1789,7 +1789,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(crtc_state);
- if (IS_SKYLAKE(dev))
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
else if (IS_BROXTON(dev))
@@ -2272,7 +2272,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
level = translate_signal_level(signal_levels);
- if (IS_SKYLAKE(dev))
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skl_ddi_set_iboost(dev, level, port, encoder->type);
else if (IS_BROXTON(dev))
bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
@@ -2295,7 +2295,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_edp_panel_on(intel_dp);
}
- if (IS_SKYLAKE(dev)) {
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
uint32_t dpll = crtc->config->ddi_pll_sel;
uint32_t val;
@@ -2390,7 +2390,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
intel_edp_panel_off(intel_dp);
}
- if (IS_SKYLAKE(dev))
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
else if (INTEL_INFO(dev)->gen < 9)
@@ -3001,14 +3001,14 @@ void intel_ddi_pll_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
- if (IS_SKYLAKE(dev))
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skl_shared_dplls_init(dev_priv);
else if (IS_BROXTON(dev))
bxt_shared_dplls_init(dev_priv);
else
hsw_shared_dplls_init(dev_priv);
- if (IS_SKYLAKE(dev)) {
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
int cdclk_freq;
cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
@@ -3307,8 +3307,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
*/
- if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)
- && port == PORT_B)
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B)
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
else
dev_priv->hotplug.irq_port[port] = intel_dig_port;
diff --git a/sys/dev/pci/drm/i915/intel_display.c b/sys/dev/pci/drm/i915/intel_display.c
index 5e3a6f92dbf..67570ec28e1 100644
--- a/sys/dev/pci/drm/i915/intel_display.c
+++ b/sys/dev/pci/drm/i915/intel_display.c
@@ -5406,7 +5406,7 @@ static void intel_update_max_cdclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_SKYLAKE(dev)) {
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
if (limit == SKL_DFSM_CDCLK_LIMIT_675)
@@ -9846,7 +9846,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- if (IS_SKYLAKE(dev))
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_BROXTON(dev))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
@@ -12101,7 +12101,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->dpll_hw_state.pll9,
pipe_config->dpll_hw_state.pll10,
pipe_config->dpll_hw_state.pcsdw12);
- } else if (IS_SKYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
pipe_config->ddi_pll_sel,
@@ -14127,7 +14127,7 @@ static void intel_setup_outputs(struct drm_device *dev)
*/
found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
- if (found || IS_SKYLAKE(dev))
+ if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
intel_ddi_init(dev, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -14143,7 +14143,7 @@ static void intel_setup_outputs(struct drm_device *dev)
/*
* On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/
- if (IS_SKYLAKE(dev) &&
+ if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
(dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
@@ -14601,7 +14601,7 @@ static void intel_init_display(struct drm_device *dev)
}
/* Returns the core display clock speed */
- if (IS_SKYLAKE(dev))
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
dev_priv->display.get_display_clock_speed =
skylake_get_display_clock_speed;
else if (IS_BROXTON(dev))
diff --git a/sys/dev/pci/drm/i915/intel_dp.c b/sys/dev/pci/drm/i915/intel_dp.c
index 5140368f817..7779a6979f5 100644
--- a/sys/dev/pci/drm/i915/intel_dp.c
+++ b/sys/dev/pci/drm/i915/intel_dp.c
@@ -1024,7 +1024,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
/* On SKL we don't have Aux for port E so we rely on VBT to set
* a proper alternate aux channel.
*/
- if (IS_SKYLAKE(dev) && port == PORT_E) {
+ if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && port == PORT_E) {
switch (info->alternate_aux_channel) {
case DP_AUX_B:
porte_aux_ctl_reg = DPB_AUX_CH_CTL;
@@ -1199,7 +1199,7 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
{
/* WaDisableHBR2:skl */
- if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+ if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
return false;
if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
@@ -1217,7 +1217,7 @@ intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
if (IS_BROXTON(dev)) {
*source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
- } else if (IS_SKYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
*source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
} else {
@@ -1537,7 +1537,7 @@ found:
&pipe_config->dp_m2_n2);
}
- if (IS_SKYLAKE(dev) && is_edp(intel_dp))
+ if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
skl_edp_set_pll_config(pipe_config);
else if (IS_BROXTON(dev))
/* handled in ddi */;
@@ -6056,7 +6056,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
break;
case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B;
- if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A;
break;
case PORT_C:
diff --git a/sys/dev/pci/drm/i915/intel_fbc.c b/sys/dev/pci/drm/i915/intel_fbc.c
index 00c4a79a64f..6c4dacf03ed 100644
--- a/sys/dev/pci/drm/i915/intel_fbc.c
+++ b/sys/dev/pci/drm/i915/intel_fbc.c
@@ -581,7 +581,8 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
* reserved range size, so it always assumes the maximum (8mb) is used.
* If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */
- if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ if (IS_BROADWELL(dev_priv) ||
+ IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
else
end = dev_priv->gtt.stolen_usable_size;
diff --git a/sys/dev/pci/drm/i915/intel_guc_loader.c b/sys/dev/pci/drm/i915/intel_guc_loader.c
index b46ee1f7253..a77968cfd1e 100644
--- a/sys/dev/pci/drm/i915/intel_guc_loader.c
+++ b/sys/dev/pci/drm/i915/intel_guc_loader.c
@@ -66,6 +66,9 @@
#define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin"
MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
+#define I915_KBL_GUC_UCODE "i915/kbl_guc_ver9_14.bin"
+MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
+
/* User-friendly representation of an enum */
const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
{
@@ -326,8 +329,8 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
/* WaDisableMinuteIaClockGating:skl,bxt */
- if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
- (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) {
+ if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
~GUC_ENABLE_MIA_CLOCK_GATING));
}
@@ -565,7 +568,11 @@ void intel_guc_ucode_init(struct drm_device *dev)
fw_path = I915_SKL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = 4;
guc_fw->guc_fw_minor_wanted = 3;
- } else {
+ } else if (IS_KABYLAKE(dev)) {
+ fw_path = I915_KBL_GUC_UCODE;
+ guc_fw->guc_fw_major_wanted = 9;
+ guc_fw->guc_fw_minor_wanted = 14;
+ } else {
i915.enable_guc_submission = false;
fw_path = ""; /* unknown device */
}
diff --git a/sys/dev/pci/drm/i915/intel_hdmi.c b/sys/dev/pci/drm/i915/intel_hdmi.c
index 000ef07f918..0113d841b45 100644
--- a/sys/dev/pci/drm/i915/intel_hdmi.c
+++ b/sys/dev/pci/drm/i915/intel_hdmi.c
@@ -2074,7 +2074,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
*/
- if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A;
else
intel_encoder->hpd_pin = HPD_PORT_B;
diff --git a/sys/dev/pci/drm/i915/intel_i2c.c b/sys/dev/pci/drm/i915/intel_i2c.c
index c75fe66b70a..bfe2deb6a84 100644
--- a/sys/dev/pci/drm/i915/intel_i2c.c
+++ b/sys/dev/pci/drm/i915/intel_i2c.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: intel_i2c.c,v 1.11 2017/07/03 13:26:04 naddy Exp $ */
+/* $OpenBSD: intel_i2c.c,v 1.12 2017/09/30 07:36:56 robert Exp $ */
/*
* Copyright (c) 2012, 2013 Mark Kettenis <kettenis@openbsd.org>
*
@@ -112,7 +112,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
{
if (IS_BROXTON(dev_priv))
return &gmbus_pins_bxt[pin];
- else if (IS_SKYLAKE(dev_priv))
+ else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return &gmbus_pins_skl[pin];
else if (IS_BROADWELL(dev_priv))
return &gmbus_pins_bdw[pin];
@@ -127,7 +127,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
if (IS_BROXTON(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bxt);
- else if (IS_SKYLAKE(dev_priv))
+ else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
size = ARRAY_SIZE(gmbus_pins_skl);
else if (IS_BROADWELL(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bdw);
diff --git a/sys/dev/pci/drm/i915/intel_lrc.c b/sys/dev/pci/drm/i915/intel_lrc.c
index 2c033f74ed9..6d5431e0cc9 100644
--- a/sys/dev/pci/drm/i915/intel_lrc.c
+++ b/sys/dev/pci/drm/i915/intel_lrc.c
@@ -284,8 +284,8 @@ static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
- return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
- (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) &&
+ return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev, 0, BXT_REVID_A0)) &&
(ring->id == VCS || ring->id == VCS2);
}
@@ -1144,12 +1144,13 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
- * WaDisableLSQCROPERFforOCL:skl
+ * WaDisableLSQCROPERFforOCL:skl,kbl
* This WA is implemented in skl_init_clock_gating() but since
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
- if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0)
+ if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0) ||
+ IS_KBL_REVID(ring->dev, 0, KBL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1314,8 +1315,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaDisableCtxRestoreArbitration:skl,bxt */
- if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
- (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+ if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
+ IS_BXT_REVID(dev, 0, BXT_REVID_A0))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1340,8 +1341,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) ||
- (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) {
+ if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev, 0, BXT_REVID_A0)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
wa_ctx_emit(batch, index,
@@ -1350,8 +1351,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
}
/* WaDisableCtxRestoreArbitration:skl,bxt */
- if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
- (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+ if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
+ IS_BXT_REVID(dev, 0, BXT_REVID_A0))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1973,7 +1974,7 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->init_hw = gen8_init_render_ring;
ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
- if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
@@ -2025,7 +2026,7 @@ static int logical_bsd_ring_init(struct drm_device *dev)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring;
- if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
@@ -2080,7 +2081,7 @@ static int logical_blt_ring_init(struct drm_device *dev)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring;
- if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
@@ -2110,7 +2111,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring;
- if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
diff --git a/sys/dev/pci/drm/i915/intel_mocs.c b/sys/dev/pci/drm/i915/intel_mocs.c
index 6d3c6c0a5c6..f140e504880 100644
--- a/sys/dev/pci/drm/i915/intel_mocs.c
+++ b/sys/dev/pci/drm/i915/intel_mocs.c
@@ -143,7 +143,7 @@ static bool get_mocs_settings(struct drm_device *dev,
{
bool result = false;
- if (IS_SKYLAKE(dev)) {
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
table->size = ARRAY_SIZE(skylake_mocs_table);
table->table = skylake_mocs_table;
result = true;
@@ -156,6 +156,16 @@ static bool get_mocs_settings(struct drm_device *dev,
"Platform that should have a MOCS table does not.\n");
}
+ /* WaDisableSkipCaching:skl,bxt,kbl */
+ if (IS_GEN9(dev)) {
+ int i;
+
+ for (i = 0; i < table->size; i++)
+ if (WARN_ON(table->table[i].l3cc_value &
+ (L3_ESC(1) || L3_SCC(0x7))))
+ return false;
+ }
+
return result;
}
diff --git a/sys/dev/pci/drm/i915/intel_panel.c b/sys/dev/pci/drm/i915/intel_panel.c
index 2ffd76489d6..263977047d2 100644
--- a/sys/dev/pci/drm/i915/intel_panel.c
+++ b/sys/dev/pci/drm/i915/intel_panel.c
@@ -1779,7 +1779,8 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.disable = bxt_disable_backlight;
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
- } else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) {
+ } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv)) {
panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight;
panel->backlight.disable = lpt_disable_backlight;
diff --git a/sys/dev/pci/drm/i915/intel_pm.c b/sys/dev/pci/drm/i915/intel_pm.c
index 6b2079132ec..52b6dda7ecf 100644
--- a/sys/dev/pci/drm/i915/intel_pm.c
+++ b/sys/dev/pci/drm/i915/intel_pm.c
@@ -4436,7 +4436,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
struct drm_i915_private *dev_priv = dev->dev_private;
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
- if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0))
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
return;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -4712,7 +4712,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
+ IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
ret = sandybridge_pcode_read(dev_priv,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status);
@@ -4724,7 +4725,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq);
}
- if (IS_SKYLAKE(dev)) {
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
/* Store the frequency values in 16.66 MHZ units, which is
the natural hardware unit for SKL */
dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
@@ -4761,7 +4762,7 @@ static void gen9_enable_rps(struct drm_device *dev)
gen6_init_rps_frequencies(dev);
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
- if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) {
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return;
}
@@ -4829,8 +4830,8 @@ static void gen9_enable_rc6(struct drm_device *dev)
DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
"on" : "off");
/* WaRsUseTimeoutMode */
- if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) ||
- (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0)) {
+ if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
+ IS_BXT_REVID(dev, 0, BXT_REVID_A0)) {
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
@@ -4846,8 +4847,9 @@ static void gen9_enable_rc6(struct drm_device *dev)
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
* WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
*/
- if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
- ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
+ ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
+ IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
I915_WRITE(GEN9_PG_ENABLE, 0);
else
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
@@ -5085,7 +5087,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
/* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
- if (IS_SKYLAKE(dev)) {
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@@ -5103,7 +5105,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
int diff = max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
- if (IS_SKYLAKE(dev)) {
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
/*
* ring_freq = 2 * GT. ring_freq is in 100MHz units
* No floor required for ring frequency on SKL.
@@ -6237,7 +6239,7 @@ static void intel_gen6_powersave_work(struct work_struct *work)
} else if (INTEL_INFO(dev)->gen >= 9) {
gen9_enable_rc6(dev);
gen9_enable_rps(dev);
- if (IS_SKYLAKE(dev))
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
__gen6_update_ring_freq(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
@@ -6376,9 +6378,11 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
(I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
- I915_WRITE(DISP_ARB_CTL,
- (I915_READ(DISP_ARB_CTL) |
- DISP_FBC_WM_DIS));
+
+ /* WaFbcWakeMemOn:skl,bxt,kbl */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS |
+ DISP_FBC_MEMORY_WAKE);
ilk_init_lp_watermarks(dev);
diff --git a/sys/dev/pci/drm/i915/intel_ringbuffer.c b/sys/dev/pci/drm/i915/intel_ringbuffer.c
index 07d03469d5d..42fe7779094 100644
--- a/sys/dev/pci/drm/i915/intel_ringbuffer.c
+++ b/sys/dev/pci/drm/i915/intel_ringbuffer.c
@@ -908,35 +908,33 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t tmp;
- /* WaEnableLbsSlaRetryTimerDecrement:skl */
+ /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
+ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
- /* WaDisableKillLogic:bxt,skl */
+ /* WaDisableKillLogic:bxt,skl,kbl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB);
- /* WaDisablePartialInstShootdown:skl,bxt */
+ /* WaDisablePartialInstShootdown:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
- /* Syncing dependencies between camera and graphics:skl,bxt */
+ /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
- if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 ||
- INTEL_REVID(dev) == SKL_REVID_B0)) ||
- (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
- /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
- WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
- GEN9_DG_MIRROR_FIX_ENABLE);
- }
+ /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt,kbl */
+ WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_DG_MIRROR_FIX_ENABLE);
- if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
- (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
+ if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
/*
@@ -946,43 +944,60 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
*/
}
- if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
- IS_BROXTON(dev)) {
- /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
- WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_ENABLE_YV12_BUGFIX);
- }
+ /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_ENABLE_YV12_BUGFIX);
- /* Wa4x4STCOptimizationDisable:skl,bxt */
- /* WaDisablePartialResolveInVc:skl,bxt */
+ /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
+ /* WaDisablePartialResolveInVc:skl,bxt,kbl */
WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
- /* WaCcsTlbPrefetchDisable:skl,bxt */
+ /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
- /* WaDisableMaskBasedCammingInRCC:skl,bxt */
- if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) ||
- (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0))
+ /* WaDisableMaskBasedCammingInRCC:skl,bxt,kbl */
+ if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
+ IS_BXT_REVID(dev, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
- /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
- tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
- if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) ||
- (IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0))
- tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
- WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
+ /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+ /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+ * both tied to WaForceContextSaveRestoreNonCoherent
+ * in some hsds for skl. We keep the tie for all gen9. The
+ * documentation is a bit hazy and so we want to get common behaviour,
+ * even though there is no clear evidence we would need both on kbl/bxt.
+ * This area has been source of system hangs so we play it safe
+ * and mimic the skl regardless of what bspec says.
+ *
+ * Use Force Non-Coherent whenever executing a 3D context. This
+ * is a workaround for a possible hang in the unlikely event
+ * a TLB invalidation occurs during a PSD flush.
+ */
+
+ /* WaForceEnableNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
+
+ /* WaDisableHDCInvalidation:skl,bxt,kbl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ BDW_DISABLE_HDC_INVALIDATION);
+
- /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
- if (IS_SKYLAKE(dev) ||
- (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_B0)) {
+ /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
+ if (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_BXT_REVID(dev, 0, BXT_REVID_B0))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
- }
- /* WaDisableSTUnitPowerOptimization:skl,bxt */
+ /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
return 0;
@@ -1040,7 +1055,7 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
if (ret)
return ret;
- if (INTEL_REVID(dev) <= SKL_REVID_D0) {
+ if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
/* WaDisableHDCInvalidation:skl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
BDW_DISABLE_HDC_INVALIDATION);
@@ -1053,46 +1068,33 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
* involving this register should also be added to WA batch as required.
*/
- if (INTEL_REVID(dev) <= SKL_REVID_E0)
+ if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
/* WaDisableLSQCROPERFforOCL:skl */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS);
/* WaEnableGapsTsvCreditFix:skl */
- if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
+ if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
}
/* WaDisablePowerCompilerClockGating:skl */
- if (INTEL_REVID(dev) == SKL_REVID_B0)
+ if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
- if (INTEL_REVID(dev) <= SKL_REVID_D0) {
- /*
- *Use Force Non-Coherent whenever executing a 3D context. This
- * is a workaround for a possible hang in the unlikely event
- * a TLB invalidation occurs during a PSD flush.
- */
- /* WaForceEnableNonCoherent:skl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_NON_COHERENT);
- }
-
- if (INTEL_REVID(dev) == SKL_REVID_C0 ||
- INTEL_REVID(dev) == SKL_REVID_D0)
- /* WaBarrierPerformanceFixDisable:skl */
+ /* WaBarrierPerformanceFixDisable:skl */
+ if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE |
HDC_BARRIER_PERFORMANCE_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:skl */
- if (INTEL_REVID(dev) <= SKL_REVID_F0) {
+ if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
- }
return skl_tune_iz_hashing(ring);
}
@@ -1109,11 +1111,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
/* WaStoreMultiplePTEenable:bxt */
/* This is a requirement according to Hardware specification */
- if (INTEL_REVID(dev) == BXT_REVID_A0)
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A0))
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
/* WaSetClckGatingDisableMedia:bxt */
- if (INTEL_REVID(dev) == BXT_REVID_A0) {
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A0)) {
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
}
@@ -1123,7 +1125,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
STALL_DOP_GATING_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:bxt */
- if (INTEL_REVID(dev) <= BXT_REVID_B0) {
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1132,6 +1134,65 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
return 0;
}
+static int kbl_init_workarounds(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = gen9_init_workarounds(ring);
+ if (ret)
+ return ret;
+
+ /* WaEnableGapsTsvCreditFix:kbl */
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+ /* WaDisableDynamicCreditSharing:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ WA_SET_BIT(GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+ /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE);
+
+ /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+ * involving this register should also be added to WA batch as required.
+ */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+ GEN8_LQSC_RO_PERF_DIS);
+
+ /* WaToEnableHwFixForPushConstHWBug:kbl */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaDisableGafsUnitClkGating:kbl */
+ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableSbeCacheDispatchPortSharing:kbl */
+ WA_SET_BIT_MASKED(
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+ /* WaInPlaceDecompressionHang:kbl */
+ WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+#ifdef notyet
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ if (ret)
+ return ret;
+#endif
+
+ return 0;
+}
+
int init_workarounds_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
@@ -1153,6 +1214,9 @@ int init_workarounds_ring(struct intel_engine_cs *ring)
if (IS_BROXTON(dev))
return bxt_init_workarounds(ring);
+ if (IS_KABYLAKE(dev))
+ return kbl_init_workarounds(ring);
+
return 0;
}
diff --git a/sys/dev/pci/drm/i915/intel_runtime_pm.c b/sys/dev/pci/drm/i915/intel_runtime_pm.c
index 1ec4fe7d1b3..cce038acbc7 100644
--- a/sys/dev/pci/drm/i915/intel_runtime_pm.c
+++ b/sys/dev/pci/drm/i915/intel_runtime_pm.c
@@ -52,7 +52,7 @@
*/
#define GEN9_ENABLE_DC5(dev) 0
-#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev)
+#define SKL_ENABLE_DC6(dev) (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
#define for_each_power_well(i, power_well, domain_mask, power_domains) \
for (i = 0; \
@@ -475,7 +475,8 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
SKL_DISP_PW_2);
- WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
+ WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
+ "Platform doesn't support DC5.\n");
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
@@ -538,7 +539,8 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
+ WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
+ "Platform doesn't support DC6.\n");
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Backlight is not disabled.\n");
@@ -668,7 +670,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
}
} else {
if (enable_requested) {
- if (IS_SKYLAKE(dev) &&
+ if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
(power_well->data == SKL_DISP_PW_1) &&
(intel_csr_load_status_get(dev_priv) == FW_LOADED))
DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n");
@@ -1830,7 +1832,7 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
if (disable_power_well >= 0)
return !!disable_power_well;
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
DRM_DEBUG_KMS("Disabling display power well support\n");
return 0;
}
@@ -1869,7 +1871,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
set_power_wells(power_domains, hsw_power_wells);
} else if (IS_BROADWELL(dev_priv->dev)) {
set_power_wells(power_domains, bdw_power_wells);
- } else if (IS_SKYLAKE(dev_priv->dev)) {
+ } else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) {
set_power_wells(power_domains, skl_power_wells);
} else if (IS_BROXTON(dev_priv->dev)) {
set_power_wells(power_domains, bxt_power_wells);
diff --git a/sys/dev/pci/drm/i915_pciids.h b/sys/dev/pci/drm/i915_pciids.h
index 2cdc723d750..7887ca0f82d 100644
--- a/sys/dev/pci/drm/i915_pciids.h
+++ b/sys/dev/pci/drm/i915_pciids.h
@@ -295,4 +295,40 @@
INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
+#define INTEL_KBL_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
+ INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \
+ INTEL_VGA_DEVICE(0x5917, info), /* DT GT1.5 */ \
+ INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
+ INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
+ INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
+ INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
+ INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
+
+#define INTEL_KBL_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
+ INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
+ INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
+ INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
+ INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
+ INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \
+ INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
+
+#define INTEL_KBL_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \
+ INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */
+
+#define INTEL_KBL_GT4_IDS(info) \
+ INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \
+ INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \
+ INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \
+ INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */
+
+#define INTEL_KBL_IDS(info) \
+ INTEL_KBL_GT1_IDS(info), \
+ INTEL_KBL_GT2_IDS(info), \
+ INTEL_KBL_GT3_IDS(info), \
+ INTEL_KBL_GT4_IDS(info)
+
#endif /* _I915_PCIIDS_H */