aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/dvo.h16
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c17
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c23
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c588
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c18
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c251
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c73
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c61
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h239
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1514
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c65
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c174
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c44
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c391
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c144
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c16
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c194
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h328
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c220
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h25
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c165
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c144
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2129
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c401
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h147
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c115
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c221
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c99
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c1
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c23
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c60
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c420
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c152
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h20
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c210
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c70
39 files changed, 5729 insertions, 3091 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b0bacdba6d7e..0f2c5493242b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,6 +40,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
dvo_ivch.o \
dvo_tfp410.o \
dvo_sil164.o \
+ dvo_ns2501.o \
i915_gem_dmabuf.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 573de82c9f5a..33a62ad80100 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -57,13 +57,12 @@ struct intel_dvo_dev_ops {
void (*create_resources)(struct intel_dvo_device *dvo);
/*
- * Turn on/off output or set intermediate power levels if available.
+ * Turn on/off output.
*
- * Unsupported intermediate modes drop to the lower power setting.
- * If the mode is DPMSModeOff, the output must be disabled,
- * as the DPLL may be disabled afterwards.
+ * Because none of our dvo drivers support an intermediate power levels,
+ * we don't expose this in the interfac.
*/
- void (*dpms)(struct intel_dvo_device *dvo, int mode);
+ void (*dpms)(struct intel_dvo_device *dvo, bool enable);
/*
* Callback for testing a video mode for a given output.
@@ -114,6 +113,12 @@ struct intel_dvo_dev_ops {
*/
enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
+ /*
+ * Probe the current hw status, returning true if the connected output
+ * is active.
+ */
+ bool (*get_hw_state)(struct intel_dvo_device *dev);
+
/**
* Query the device for the modes it provides.
*
@@ -139,5 +144,6 @@ extern struct intel_dvo_dev_ops ch7xxx_ops;
extern struct intel_dvo_dev_ops ivch_ops;
extern struct intel_dvo_dev_ops tfp410_ops;
extern struct intel_dvo_dev_ops ch7017_ops;
+extern struct intel_dvo_dev_ops ns2501_ops;
#endif /* _INTEL_DVO_H */
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1ca799a1e1fc..86b27d1d90c2 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -163,7 +163,7 @@ struct ch7017_priv {
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
-static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
{
@@ -309,7 +309,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
(mode->hdisplay & 0x0700) >> 8;
- ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
+ ch7017_dpms(dvo, false);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
horizontal_active_pixel_input);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
@@ -331,7 +331,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
}
/* set the CH7017 power state */
-static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
{
uint8_t val;
@@ -345,7 +345,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
CH7017_DAC3_POWER_DOWN |
CH7017_TV_POWER_DOWN_EN);
- if (mode == DRM_MODE_DPMS_ON) {
+ if (enable) {
/* Turn on the LVDS */
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
val & ~CH7017_LVDS_POWER_DOWN_EN);
@@ -359,6 +359,18 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
msleep(20);
}
+static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+ if (val & CH7017_LVDS_POWER_DOWN_EN)
+ return false;
+ else
+ return true;
+}
+
static void ch7017_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
@@ -396,6 +408,7 @@ struct intel_dvo_dev_ops ch7017_ops = {
.mode_valid = ch7017_mode_valid,
.mode_set = ch7017_mode_set,
.dpms = ch7017_dpms,
+ .get_hw_state = ch7017_get_hw_state,
.dump_regs = ch7017_dump_regs,
.destroy = ch7017_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 4a036600e806..38f3a6cb8c7d 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -289,14 +289,26 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
}
/* set the CH7xxx power state */
-static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
+static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
{
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
else
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
}
+static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
+{
+ u8 val;
+
+ ch7xxx_readb(dvo, CH7xxx_PM, &val);
+
+ if (val & CH7xxx_PM_FPD)
+ return false;
+ else
+ return true;
+}
+
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
{
int i;
@@ -326,6 +338,7 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
.mode_valid = ch7xxx_mode_valid,
.mode_set = ch7xxx_mode_set,
.dpms = ch7xxx_dpms,
+ .get_hw_state = ch7xxx_get_hw_state,
.dump_regs = ch7xxx_dump_regs,
.destroy = ch7xxx_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 04f2893d5e3c..baaf65bf0bdd 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -288,7 +288,7 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
}
/** Sets the power state of the panel connected to the ivch */
-static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
+static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
uint16_t vr01, vr30, backlight;
@@ -297,13 +297,13 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
if (!ivch_read(dvo, VR01, &vr01))
return;
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
backlight = 1;
else
backlight = 0;
ivch_write(dvo, VR80, backlight);
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
else
vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
@@ -315,7 +315,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
if (!ivch_read(dvo, VR30, &vr30))
break;
- if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON))
+ if (((vr30 & VR30_PANEL_ON) != 0) == enable)
break;
udelay(1000);
}
@@ -323,6 +323,20 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
udelay(16 * 1000);
}
+static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint16_t vr01;
+
+ /* Set the new power state of the panel. */
+ if (!ivch_read(dvo, VR01, &vr01))
+ return false;
+
+ if (vr01 & VR01_LCD_ENABLE)
+ return true;
+ else
+ return false;
+}
+
static void ivch_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -413,6 +427,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
struct intel_dvo_dev_ops ivch_ops = {
.init = ivch_init,
.dpms = ivch_dpms,
+ .get_hw_state = ivch_get_hw_state,
.mode_valid = ivch_mode_valid,
.mode_set = ivch_mode_set,
.detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
new file mode 100644
index 000000000000..c4a255be6979
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -0,0 +1,588 @@
+/*
+ *
+ * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "dvo.h"
+#include "i915_reg.h"
+#include "i915_drv.h"
+
+#define NS2501_VID 0x1305
+#define NS2501_DID 0x6726
+
+#define NS2501_VID_LO 0x00
+#define NS2501_VID_HI 0x01
+#define NS2501_DID_LO 0x02
+#define NS2501_DID_HI 0x03
+#define NS2501_REV 0x04
+#define NS2501_RSVD 0x05
+#define NS2501_FREQ_LO 0x06
+#define NS2501_FREQ_HI 0x07
+
+#define NS2501_REG8 0x08
+#define NS2501_8_VEN (1<<5)
+#define NS2501_8_HEN (1<<4)
+#define NS2501_8_DSEL (1<<3)
+#define NS2501_8_BPAS (1<<2)
+#define NS2501_8_RSVD (1<<1)
+#define NS2501_8_PD (1<<0)
+
+#define NS2501_REG9 0x09
+#define NS2501_9_VLOW (1<<7)
+#define NS2501_9_MSEL_MASK (0x7<<4)
+#define NS2501_9_TSEL (1<<3)
+#define NS2501_9_RSEN (1<<2)
+#define NS2501_9_RSVD (1<<1)
+#define NS2501_9_MDI (1<<0)
+
+#define NS2501_REGC 0x0c
+
+struct ns2501_priv {
+ //I2CDevRec d;
+ bool quiet;
+ int reg_8_shadow;
+ int reg_8_set;
+ // Shadow registers for i915
+ int dvoc;
+ int pll_a;
+ int srcdim;
+ int fw_blc;
+};
+
+#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
+
+/*
+ * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
+ * laptops does not react on the i2c bus unless
+ * both the PLL is running and the display is configured in its native
+ * resolution.
+ * This function forces the DVO on, and stores the registers it touches.
+ * Afterwards, registers are restored to regular values.
+ *
+ * This is pretty much a hack, though it works.
+ * Without that, ns2501_readb and ns2501_writeb fail
+ * when switching the resolution.
+ */
+
+static void enable_dvo(struct intel_dvo_device *dvo)
+{
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
+
+ ns->dvoc = I915_READ(DVO_C);
+ ns->pll_a = I915_READ(_DPLL_A);
+ ns->srcdim = I915_READ(DVOC_SRCDIM);
+ ns->fw_blc = I915_READ(FW_BLC);
+
+ I915_WRITE(DVOC, 0x10004084);
+ I915_WRITE(_DPLL_A, 0xd0820000);
+ I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768
+ I915_WRITE(FW_BLC, 0x1080304);
+
+ I915_WRITE(DVOC, 0x90004084);
+}
+
+/*
+ * Restore the I915 registers modified by the above
+ * trigger function.
+ */
+static void restore_dvo(struct intel_dvo_device *dvo)
+{
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+ I915_WRITE(DVOC, ns->dvoc);
+ I915_WRITE(_DPLL_A, ns->pll_a);
+ I915_WRITE(DVOC_SRCDIM, ns->srcdim);
+ I915_WRITE(FW_BLC, ns->fw_blc);
+}
+
+/*
+** Read a register from the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ };
+
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS
+ ("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
+ adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/*
+** Write a register to the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(adapter, &msg, 1) == 1) {
+ return true;
+ }
+
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/* National Semiconductor 2501 driver for chip on i2c bus
+ * scan for the chip on the bus.
+ * Hope the VBIOS initialized the PLL correctly so we can
+ * talk to it. If not, it will not be seen and not detected.
+ * Bummer!
+ */
+static bool ns2501_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ /* this will detect the NS2501 chip on the specified i2c bus */
+ struct ns2501_priv *ns;
+ unsigned char ch;
+
+ ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
+ if (ns == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = ns;
+ ns->quiet = true;
+
+ if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_VID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+
+ if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_DID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+ ns->quiet = false;
+ ns->reg_8_set = 0;
+ ns->reg_8_shadow =
+ NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
+
+ DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
+ return true;
+
+out:
+ kfree(ns);
+ return false;
+}
+
+static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
+{
+ /*
+ * This is a Laptop display, it doesn't have hotplugging.
+ * Even if not, the detection bit of the 2501 is unreliable as
+ * it only works for some display types.
+ * It is even more unreliable as the PLL must be active for
+ * allowing reading from the chiop.
+ */
+ return connector_status_connected;
+}
+
+static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS
+ ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
+ __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+ mode->vtotal);
+
+ /*
+ * Currently, these are all the modes I have data from.
+ * More might exist. Unclear how to find the native resolution
+ * of the panel in here so we could always accept it
+ * by disabling the scaler.
+ */
+ if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
+ (mode->hdisplay == 640 && mode->vdisplay == 480) ||
+ (mode->hdisplay == 1024 && mode->vdisplay == 768)) {
+ return MODE_OK;
+ } else {
+ return MODE_ONE_SIZE; /* Is this a reasonable error? */
+ }
+}
+
+static void ns2501_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ bool ok;
+ bool restore = false;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+ DRM_DEBUG_KMS
+ ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
+ __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+ mode->vtotal);
+
+ /*
+ * Where do I find the native resolution for which scaling is not required???
+ *
+ * First trigger the DVO on as otherwise the chip does not appear on the i2c
+ * bus.
+ */
+ do {
+ ok = true;
+
+ if (mode->hdisplay == 800 && mode->vdisplay == 600) {
+ /* mode 277 */
+ ns->reg_8_shadow &= ~NS2501_8_BPAS;
+ DRM_DEBUG_KMS("%s: switching to 800x600\n",
+ __FUNCTION__);
+
+ /*
+ * No, I do not know where this data comes from.
+ * It is just what the video bios left in the DVO, so
+ * I'm just copying it here over.
+ * This also means that I cannot support any other modes
+ * except the ones supported by the bios.
+ */
+ ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
+ ok &= ns2501_writeb(dvo, 0x1b, 0x19);
+ ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
+ ok &= ns2501_writeb(dvo, 0x1d, 0x02);
+
+ ok &= ns2501_writeb(dvo, 0x34, 0x03);
+ ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+ ok &= ns2501_writeb(dvo, 0x80, 0x27);
+ ok &= ns2501_writeb(dvo, 0x81, 0x03);
+ ok &= ns2501_writeb(dvo, 0x82, 0x41);
+ ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+ ok &= ns2501_writeb(dvo, 0x8e, 0x04);
+ ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
+ ok &= ns2501_writeb(dvo, 0x91, 0x07);
+ ok &= ns2501_writeb(dvo, 0x94, 0x00);
+ ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x96, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x99, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+ ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
+ ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+ ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0xa4, 0x80);
+
+ ok &= ns2501_writeb(dvo, 0xb6, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
+
+ ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
+
+ ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
+
+ ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+ ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
+
+ ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc7, 0x73);
+ ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+ } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
+ /* mode 274 */
+ DRM_DEBUG_KMS("%s: switching to 640x480\n",
+ __FUNCTION__);
+ /*
+ * No, I do not know where this data comes from.
+ * It is just what the video bios left in the DVO, so
+ * I'm just copying it here over.
+ * This also means that I cannot support any other modes
+ * except the ones supported by the bios.
+ */
+ ns->reg_8_shadow &= ~NS2501_8_BPAS;
+
+ ok &= ns2501_writeb(dvo, 0x11, 0xa0);
+ ok &= ns2501_writeb(dvo, 0x1b, 0x11);
+ ok &= ns2501_writeb(dvo, 0x1c, 0x54);
+ ok &= ns2501_writeb(dvo, 0x1d, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0x34, 0x03);
+ ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+ ok &= ns2501_writeb(dvo, 0x80, 0xff);
+ ok &= ns2501_writeb(dvo, 0x81, 0x07);
+ ok &= ns2501_writeb(dvo, 0x82, 0x3d);
+ ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+ ok &= ns2501_writeb(dvo, 0x8e, 0x10);
+ ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
+ ok &= ns2501_writeb(dvo, 0x91, 0x07);
+ ok &= ns2501_writeb(dvo, 0x94, 0x00);
+ ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x96, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x99, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+ ok &= ns2501_writeb(dvo, 0x9c, 0x24);
+ ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+ ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0xa4, 0x84);
+
+ ok &= ns2501_writeb(dvo, 0xb6, 0x09);
+
+ ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
+
+ ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xc1, 0x90);
+
+ ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
+
+ ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+ ok &= ns2501_writeb(dvo, 0xc5, 0x16);
+
+ ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc7, 0x02);
+ ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+ } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
+ /* mode 280 */
+ DRM_DEBUG_KMS("%s: switching to 1024x768\n",
+ __FUNCTION__);
+ /*
+ * This might or might not work, actually. I'm silently
+ * assuming here that the native panel resolution is
+ * 1024x768. If not, then this leaves the scaler disabled
+ * generating a picture that is likely not the expected.
+ *
+ * Problem is that I do not know where to take the panel
+ * dimensions from.
+ *
+ * Enable the bypass, scaling not required.
+ *
+ * The scaler registers are irrelevant here....
+ *
+ */
+ ns->reg_8_shadow |= NS2501_8_BPAS;
+ ok &= ns2501_writeb(dvo, 0x37, 0x44);
+ } else {
+ /*
+ * Data not known. Bummer!
+ * Hopefully, the code should not go here
+ * as mode_OK delivered no other modes.
+ */
+ ns->reg_8_shadow |= NS2501_8_BPAS;
+ }
+ ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
+
+ if (!ok) {
+ if (restore)
+ restore_dvo(dvo);
+ enable_dvo(dvo);
+ restore = true;
+ }
+ } while (!ok);
+ /*
+ * Restore the old i915 registers before
+ * forcing the ns2501 on.
+ */
+ if (restore)
+ restore_dvo(dvo);
+}
+
+/* set the NS2501 power state */
+static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
+{
+ unsigned char ch;
+
+ if (!ns2501_readb(dvo, NS2501_REG8, &ch))
+ return false;
+
+ if (ch & NS2501_8_PD)
+ return true;
+ else
+ return false;
+}
+
+/* set the NS2501 power state */
+static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ bool ok;
+ bool restore = false;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ unsigned char ch;
+
+ DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
+ __FUNCTION__, enable);
+
+ ch = ns->reg_8_shadow;
+
+ if (enable)
+ ch |= NS2501_8_PD;
+ else
+ ch &= ~NS2501_8_PD;
+
+ if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
+ ns->reg_8_set = 1;
+ ns->reg_8_shadow = ch;
+
+ do {
+ ok = true;
+ ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
+ ok &=
+ ns2501_writeb(dvo, 0x34,
+ enable ? 0x03 : 0x00);
+ ok &=
+ ns2501_writeb(dvo, 0x35,
+ enable ? 0xff : 0x00);
+ if (!ok) {
+ if (restore)
+ restore_dvo(dvo);
+ enable_dvo(dvo);
+ restore = true;
+ }
+ } while (!ok);
+
+ if (restore)
+ restore_dvo(dvo);
+ }
+}
+
+static void ns2501_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ ns2501_readb(dvo, NS2501_FREQ_LO, &val);
+ DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_FREQ_HI, &val);
+ DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REG8, &val);
+ DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REG9, &val);
+ DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REGC, &val);
+ DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
+}
+
+static void ns2501_destroy(struct intel_dvo_device *dvo)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+
+ if (ns) {
+ kfree(ns);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ns2501_ops = {
+ .init = ns2501_init,
+ .detect = ns2501_detect,
+ .mode_valid = ns2501_mode_valid,
+ .mode_set = ns2501_mode_set,
+ .dpms = ns2501_dpms,
+ .get_hw_state = ns2501_get_hw_state,
+ .dump_regs = ns2501_dump_regs,
+ .destroy = ns2501_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index a0b13a6f619d..4debd32e3e4c 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -208,7 +208,7 @@ static void sil164_mode_set(struct intel_dvo_device *dvo,
}
/* set the SIL164 power state */
-static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
+static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
{
int ret;
unsigned char ch;
@@ -217,7 +217,7 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
if (ret == false)
return;
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
ch |= SIL164_8_PD;
else
ch &= ~SIL164_8_PD;
@@ -226,6 +226,21 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
return;
}
+static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
+{
+ int ret;
+ unsigned char ch;
+
+ ret = sil164_readb(dvo, SIL164_REG8, &ch);
+ if (ret == false)
+ return false;
+
+ if (ch & SIL164_8_PD)
+ return true;
+ else
+ return false;
+}
+
static void sil164_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
@@ -258,6 +273,7 @@ struct intel_dvo_dev_ops sil164_ops = {
.mode_valid = sil164_mode_valid,
.mode_set = sil164_mode_set,
.dpms = sil164_dpms,
+ .get_hw_state = sil164_get_hw_state,
.dump_regs = sil164_dump_regs,
.destroy = sil164_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index aa2cd3ec54aa..e17f1b07e915 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -234,14 +234,14 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
}
/* set the tfp410 power state */
-static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
+static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
{
uint8_t ctl1;
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return;
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
ctl1 |= TFP410_CTL_1_PD;
else
ctl1 &= ~TFP410_CTL_1_PD;
@@ -249,6 +249,19 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
}
+static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint8_t ctl1;
+
+ if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+ return false;
+
+ if (ctl1 & TFP410_CTL_1_PD)
+ return true;
+ else
+ return false;
+}
+
static void tfp410_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val, val2;
@@ -299,6 +312,7 @@ struct intel_dvo_dev_ops tfp410_ops = {
.mode_valid = tfp410_mode_valid,
.mode_set = tfp410_mode_set,
.dpms = tfp410_dpms,
+ .get_hw_state = tfp410_get_hw_state,
.dump_regs = tfp410_dump_regs,
.destroy = tfp410_destroy,
};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 63f01e29c1fa..dde8b505bf7f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -43,7 +43,6 @@
enum {
ACTIVE_LIST,
- FLUSHING_LIST,
INACTIVE_LIST,
PINNED_LIST,
};
@@ -61,28 +60,11 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "gen: %d\n", info->gen);
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
-#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
- B(is_mobile);
- B(is_i85x);
- B(is_i915g);
- B(is_i945gm);
- B(is_g33);
- B(need_gfx_hws);
- B(is_g4x);
- B(is_pineview);
- B(is_broadwater);
- B(is_crestline);
- B(has_fbc);
- B(has_pipe_cxsr);
- B(has_hotplug);
- B(cursor_needs_physical);
- B(has_overlay);
- B(overlay_needs_physical);
- B(supports_tv);
- B(has_bsd_ring);
- B(has_blt_ring);
- B(has_llc);
-#undef B
+#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
+#define DEV_INFO_SEP ;
+ DEV_INFO_FLAGS;
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
return 0;
}
@@ -120,20 +102,23 @@ static const char *cache_level_str(int type)
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
+ seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
- obj->last_rendering_seqno,
+ obj->last_read_seqno,
+ obj->last_write_seqno,
obj->last_fenced_seqno,
cache_level_str(obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
+ if (obj->pin_count)
+ seq_printf(m, " (pinned x %d)", obj->pin_count);
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
if (obj->gtt_space != NULL)
@@ -176,10 +161,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
seq_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list;
break;
- case FLUSHING_LIST:
- seq_printf(m, "Flushing:\n");
- head = &dev_priv->mm.flushing_list;
- break;
default:
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
@@ -217,8 +198,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 count, mappable_count;
- size_t size, mappable_size;
+ u32 count, mappable_count, purgeable_count;
+ size_t size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
int ret;
@@ -231,13 +212,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
dev_priv->mm.object_memory);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.gtt_list, gtt_list);
+ count_objects(&dev_priv->mm.bound_list, gtt_list);
seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.active_list, mm_list);
- count_objects(&dev_priv->mm.flushing_list, mm_list);
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
@@ -246,8 +226,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
+ size = count = purgeable_size = purgeable_count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) {
+ size += obj->base.size, ++count;
+ if (obj->madv == I915_MADV_DONTNEED)
+ purgeable_size += obj->base.size, ++purgeable_count;
+ }
+ seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
+
size = count = mappable_size = mappable_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
if (obj->fault_mappable) {
size += obj->gtt_space->size;
++count;
@@ -256,7 +244,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
mappable_size += obj->gtt_space->size;
++mappable_count;
}
+ if (obj->madv == I915_MADV_DONTNEED) {
+ purgeable_size += obj->base.size;
+ ++purgeable_count;
+ }
}
+ seq_printf(m, "%u purgeable objects, %zu bytes\n",
+ purgeable_count, purgeable_size);
seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
mappable_count, mappable_size);
seq_printf(m, "%u fault mappable objects, %zu bytes\n",
@@ -285,7 +279,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
return ret;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
if (list == PINNED_LIST && obj->pin_count == 0)
continue;
@@ -358,40 +352,22 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
struct drm_i915_gem_request *gem_request;
- int ret, count;
+ int ret, count, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
count = 0;
- if (!list_empty(&dev_priv->ring[RCS].request_list)) {
- seq_printf(m, "Render requests:\n");
- list_for_each_entry(gem_request,
- &dev_priv->ring[RCS].request_list,
- list) {
- seq_printf(m, " %d @ %d\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies));
- }
- count++;
- }
- if (!list_empty(&dev_priv->ring[VCS].request_list)) {
- seq_printf(m, "BSD requests:\n");
- list_for_each_entry(gem_request,
- &dev_priv->ring[VCS].request_list,
- list) {
- seq_printf(m, " %d @ %d\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies));
- }
- count++;
- }
- if (!list_empty(&dev_priv->ring[BCS].request_list)) {
- seq_printf(m, "BLT requests:\n");
+ for_each_ring(ring, dev_priv, i) {
+ if (list_empty(&ring->request_list))
+ continue;
+
+ seq_printf(m, "%s requests:\n", ring->name);
list_for_each_entry(gem_request,
- &dev_priv->ring[BCS].request_list,
+ &ring->request_list,
list) {
seq_printf(m, " %d @ %d\n",
gem_request->seqno,
@@ -412,7 +388,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
{
if (ring->get_seqno) {
seq_printf(m, "Current sequence (%s): %d\n",
- ring->name, ring->get_seqno(ring));
+ ring->name, ring->get_seqno(ring, false));
}
}
@@ -421,14 +397,15 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- for (i = 0; i < I915_NUM_RINGS; i++)
- i915_ring_seqno_info(m, &dev_priv->ring[i]);
+ for_each_ring(ring, dev_priv, i)
+ i915_ring_seqno_info(m, ring);
mutex_unlock(&dev->struct_mutex);
@@ -441,6 +418,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -518,13 +496,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for_each_ring(ring, dev_priv, i) {
if (IS_GEN6(dev) || IS_GEN7(dev)) {
- seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
- dev_priv->ring[i].name,
- I915_READ_IMR(&dev_priv->ring[i]));
+ seq_printf(m,
+ "Graphics Interrupt mask (%s): %08x\n",
+ ring->name, I915_READ_IMR(ring));
}
- i915_ring_seqno_info(m, &dev_priv->ring[i]);
+ i915_ring_seqno_info(m, ring);
}
mutex_unlock(&dev->struct_mutex);
@@ -547,7 +525,8 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
- seq_printf(m, "Fenced object[%2d] = ", i);
+ seq_printf(m, "Fence %d, pin count = %d, object = ",
+ i, dev_priv->fence_regs[i].pin_count);
if (obj == NULL)
seq_printf(m, "unused");
else
@@ -629,12 +608,12 @@ static void print_error_buffers(struct seq_file *m,
seq_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
+ seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
err->gtt_offset,
err->size,
err->read_domains,
err->write_domain,
- err->seqno,
+ err->rseqno, err->wseqno,
pin_flag(err->pinned),
tiling_flag(err->tiling),
dirty_flag(err->dirty),
@@ -666,10 +645,9 @@ static void i915_ring_error_state(struct seq_file *m,
seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
- if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
- seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
+ if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
- }
+
if (INTEL_INFO(dev)->gen >= 4)
seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
@@ -718,11 +696,17 @@ static int i915_error_state(struct seq_file *m, void *unused)
for (i = 0; i < dev_priv->num_fence_regs; i++)
seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+ for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
+ seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]);
+
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, "ERROR: 0x%08x\n", error->error);
seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
+ if (INTEL_INFO(dev)->gen == 7)
+ seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
for_each_ring(ring, dev_priv, i)
i915_ring_error_state(m, dev, error, i);
@@ -798,10 +782,14 @@ i915_error_state_write(struct file *filp,
struct seq_file *m = filp->private_data;
struct i915_error_state_file_priv *error_priv = m->private;
struct drm_device *dev = error_priv->dev;
+ int ret;
DRM_DEBUG_DRIVER("Resetting error state\n");
- mutex_lock(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
i915_destroy_error_state(dev);
mutex_unlock(&dev->struct_mutex);
@@ -925,7 +913,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
- GEN6_CAGF_SHIFT) * 50);
+ GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@@ -941,15 +929,15 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
max_freq = (rp_state_cap & 0xff0000) >> 16;
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- max_freq * 50);
+ max_freq * GT_FREQUENCY_MULTIPLIER);
max_freq = (rp_state_cap & 0xff00) >> 8;
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- max_freq * 50);
+ max_freq * GT_FREQUENCY_MULTIPLIER);
max_freq = rp_state_cap & 0xff;
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- max_freq * 50);
+ max_freq * GT_FREQUENCY_MULTIPLIER);
} else {
seq_printf(m, "no P-state info available\n");
}
@@ -1291,7 +1279,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
- for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
+ for (gpu_freq = dev_priv->rps.min_delay;
+ gpu_freq <= dev_priv->rps.max_delay;
gpu_freq++) {
I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
@@ -1302,7 +1291,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
continue;
}
ia_freq = I915_READ(GEN6_PCODE_DATA);
- seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
+ seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
}
mutex_unlock(&dev->struct_mutex);
@@ -1471,8 +1460,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
- mutex_lock(&dev->struct_mutex);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
@@ -1519,9 +1512,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
if (INTEL_INFO(dev)->gen == 6)
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
- for (i = 0; i < I915_NUM_RINGS; i++) {
- ring = &dev_priv->ring[i];
-
+ for_each_ring(ring, dev_priv, i) {
seq_printf(m, "%s\n", ring->name);
if (INTEL_INFO(dev)->gen == 7)
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
@@ -1673,7 +1664,7 @@ i915_ring_stop_write(struct file *filp,
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20];
- int val = 0;
+ int val = 0, ret;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
@@ -1688,7 +1679,10 @@ i915_ring_stop_write(struct file *filp,
DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
- mutex_lock(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
dev_priv->stop_rings = val;
mutex_unlock(&dev->struct_mutex);
@@ -1712,10 +1706,18 @@ i915_max_freq_read(struct file *filp,
struct drm_device *dev = filp->private_data;
drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80];
- int len;
+ int len, ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
len = snprintf(buf, sizeof(buf),
- "max freq: %d\n", dev_priv->max_delay * 50);
+ "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
+ mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1732,7 +1734,10 @@ i915_max_freq_write(struct file *filp,
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20];
- int val = 1;
+ int val = 1, ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
@@ -1747,12 +1752,17 @@ i915_max_freq_write(struct file *filp,
DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
/*
* Turbo will still be enabled, but won't go above the set value.
*/
- dev_priv->max_delay = val / 50;
+ dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
- gen6_set_rps(dev, val / 50);
+ gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+ mutex_unlock(&dev->struct_mutex);
return cnt;
}
@@ -1772,10 +1782,18 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
struct drm_device *dev = filp->private_data;
drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80];
- int len;
+ int len, ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
len = snprintf(buf, sizeof(buf),
- "min freq: %d\n", dev_priv->min_delay * 50);
+ "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
+ mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1790,7 +1808,10 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20];
- int val = 1;
+ int val = 1, ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
@@ -1805,12 +1826,17 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
/*
* Turbo will still be enabled, but won't go below the set value.
*/
- dev_priv->min_delay = val / 50;
+ dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
- gen6_set_rps(dev, val / 50);
+ gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+ mutex_unlock(&dev->struct_mutex);
return cnt;
}
@@ -1833,9 +1859,15 @@ i915_cache_sharing_read(struct file *filp,
drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80];
u32 snpcr;
- int len;
+ int len, ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
- mutex_lock(&dev_priv->dev->struct_mutex);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
mutex_unlock(&dev_priv->dev->struct_mutex);
@@ -1861,6 +1893,9 @@ i915_cache_sharing_write(struct file *filp,
u32 snpcr;
int val = 1;
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
return -EINVAL;
@@ -1924,16 +1959,11 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
if (INTEL_INFO(dev)->gen < 6)
return 0;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
gen6_gt_force_wake_get(dev_priv);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1946,16 +1976,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6)
return 0;
- /*
- * It's bad that we can potentially hang userspace if struct_mutex gets
- * forever stuck. However, if we cannot acquire this lock it means that
- * almost certainly the driver has hung, is not unload-able. Therefore
- * hanging here is probably a minor inconvenience not to be seen my
- * almost every user.
- */
- mutex_lock(&dev->struct_mutex);
gen6_gt_force_wake_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -2005,7 +2026,6 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
- {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
@@ -2066,6 +2086,7 @@ int i915_debugfs_init(struct drm_minor *minor)
&i915_cache_sharing_fops);
if (ret)
return ret;
+
ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_ring_stop",
&i915_ring_stop_fops);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 804f1c98e279..c9bfd83dde64 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -234,10 +234,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
}
- dev_priv->cpp = init->cpp;
- dev_priv->back_offset = init->back_offset;
- dev_priv->front_offset = init->front_offset;
- dev_priv->current_page = 0;
+ dev_priv->dri1.cpp = init->cpp;
+ dev_priv->dri1.back_offset = init->back_offset;
+ dev_priv->dri1.front_offset = init->front_offset;
+ dev_priv->dri1.current_page = 0;
if (master_priv->sarea_priv)
master_priv->sarea_priv->pf_current_page = 0;
@@ -574,7 +574,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
__func__,
- dev_priv->current_page,
+ dev_priv->dri1.current_page,
master_priv->sarea_priv->pf_current_page);
i915_kernel_lost_context(dev);
@@ -588,12 +588,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
OUT_RING(0);
- if (dev_priv->current_page == 0) {
- OUT_RING(dev_priv->back_offset);
- dev_priv->current_page = 1;
+ if (dev_priv->dri1.current_page == 0) {
+ OUT_RING(dev_priv->dri1.back_offset);
+ dev_priv->dri1.current_page = 1;
} else {
- OUT_RING(dev_priv->front_offset);
- dev_priv->current_page = 0;
+ OUT_RING(dev_priv->dri1.front_offset);
+ dev_priv->dri1.current_page = 0;
}
OUT_RING(0);
@@ -612,7 +612,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
ADVANCE_LP_RING();
}
- master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+ master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
return 0;
}
@@ -1008,6 +1008,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_WAIT_TIMEOUT:
value = 1;
break;
+ case I915_PARAM_HAS_SEMAPHORES:
+ value = i915_semaphore_is_enabled(dev);
+ break;
+ case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1424,6 +1430,21 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
kfree(ap);
}
+static void i915_dump_device_info(struct drm_i915_private *dev_priv)
+{
+ const struct intel_device_info *info = dev_priv->info;
+
+#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
+#define DEV_INFO_SEP ,
+ DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ info->gen,
+ dev_priv->dev->pdev->device,
+ DEV_INFO_FLAGS);
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
+}
+
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
@@ -1439,7 +1460,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
struct intel_device_info *info;
- int ret = 0, mmio_bar;
+ int ret = 0, mmio_bar, mmio_size;
uint32_t aperture_size;
info = (struct intel_device_info *) flags;
@@ -1448,7 +1469,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
-
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@@ -1464,6 +1484,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->dev = dev;
dev_priv->info = info;
+ i915_dump_device_info(dev_priv);
+
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
@@ -1503,7 +1525,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
mmio_bar = IS_GEN2(dev) ? 1 : 0;
- dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
+ /* Before gen4, the registers and the GTT are behind different BARs.
+ * However, from gen4 onwards, the registers and the GTT are shared
+ * in the same BAR, so we want to restrict this ioremap from
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+ * the register BAR remains the same size for all the earlier
+ * generations up to Ironlake.
+ */
+ if (info->gen < 5)
+ mmio_size = 512*1024;
+ else
+ mmio_size = 2*1024*1024;
+
+ dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
@@ -1535,11 +1569,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
*
* All tasks on the workqueue are expected to acquire the dev mutex
* so there is no point in running more than one instance of the
- * workqueue at any time: max_active = 1 and NON_REENTRANT.
+ * workqueue at any time. Use an ordered one.
*/
- dev_priv->wq = alloc_workqueue("i915",
- WQ_UNBOUND | WQ_NON_REENTRANT,
- 1);
+ dev_priv->wq = alloc_ordered_workqueue("i915", 0);
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
@@ -1585,7 +1617,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
- spin_lock_init(&dev_priv->rps_lock);
+ spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->dpio_lock);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
@@ -1835,6 +1867,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -1857,6 +1891,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f6825324e72d..aac4e5e1a5b9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -469,6 +469,9 @@ static int i915_drm_freeze(struct drm_device *dev)
"GEM idle failed, resume might fail\n");
return error;
}
+
+ intel_modeset_disable(dev);
+
drm_irq_uninstall(dev);
}
@@ -542,13 +545,9 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
intel_modeset_init_hw(dev);
+ intel_modeset_setup_hw_state(dev);
drm_mode_config_reset(dev);
drm_irq_install(dev);
-
- /* Resume the modeset for every activated CRTC */
- mutex_lock(&dev->mode_config.mutex);
- drm_helper_resume_force_mode(dev);
- mutex_unlock(&dev->mode_config.mutex);
}
intel_opregion_init(dev);
@@ -1059,7 +1058,7 @@ static bool IS_DISPLAYREG(u32 reg)
* This should make it easier to transition modules over to the
* new register block scheme, since we can do it incrementally.
*/
- if (reg >= 0x180000)
+ if (reg >= VLV_DISPLAY_BASE)
return false;
if (reg >= RENDER_RING_BASE &&
@@ -1173,9 +1172,59 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
+ if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
+ DRM_ERROR("Unclaimed write to %x\n", reg); \
+ writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT); \
+ } \
}
__i915_write(8, b)
__i915_write(16, w)
__i915_write(32, l)
__i915_write(64, q)
#undef __i915_write
+
+static const struct register_whitelist {
+ uint64_t offset;
+ uint32_t size;
+ uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+} whitelist[] = {
+ { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+};
+
+int i915_reg_read_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_reg_read *reg = data;
+ struct register_whitelist const *entry = whitelist;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+ if (entry->offset == reg->offset &&
+ (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(whitelist))
+ return -EINVAL;
+
+ switch (entry->size) {
+ case 8:
+ reg->val = I915_READ64(reg->offset);
+ break;
+ case 4:
+ reg->val = I915_READ(reg->offset);
+ break;
+ case 2:
+ reg->val = I915_READ16(reg->offset);
+ break;
+ case 1:
+ reg->val = I915_READ8(reg->offset);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 627fe35781b4..4f2831aa5fed 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -109,6 +109,7 @@ struct intel_pch_pll {
#define WATCH_COHERENCY 0
#define WATCH_LISTS 0
+#define WATCH_GTT 0
#define I915_GEM_PHYS_CURSOR_0 1
#define I915_GEM_PHYS_CURSOR_1 2
@@ -195,9 +196,10 @@ struct drm_i915_error_state {
u32 cpu_ring_head[I915_NUM_RINGS];
u32 cpu_ring_tail[I915_NUM_RINGS];
u32 error; /* gen6+ */
+ u32 err_int; /* gen7 */
u32 instpm[I915_NUM_RINGS];
u32 instps[I915_NUM_RINGS];
- u32 instdone1;
+ u32 extra_instdone[I915_NUM_INSTDONE_REG];
u32 seqno[I915_NUM_RINGS];
u64 bbaddr;
u32 fault_reg[I915_NUM_RINGS];
@@ -221,7 +223,7 @@ struct drm_i915_error_state {
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 seqno;
+ u32 rseqno, wseqno;
u32 gtt_offset;
u32 read_domains;
u32 write_domain;
@@ -239,7 +241,6 @@ struct drm_i915_error_state {
};
struct drm_i915_display_funcs {
- void (*dpms)(struct drm_crtc *crtc, int mode);
bool (*fbc_enabled)(struct drm_device *dev);
void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
void (*disable_fbc)(struct drm_device *dev);
@@ -248,7 +249,6 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size);
- void (*sanitize_pm)(struct drm_device *dev);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
int (*crtc_mode_set)(struct drm_crtc *crtc,
@@ -256,6 +256,8 @@ struct drm_i915_display_funcs {
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
+ void (*crtc_enable)(struct drm_crtc *crtc);
+ void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc);
@@ -279,6 +281,32 @@ struct drm_i915_gt_funcs {
void (*force_wake_put)(struct drm_i915_private *dev_priv);
};
+#define DEV_INFO_FLAGS \
+ DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
+ DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
+ DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
+ DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
+ DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_llc)
+
struct intel_device_info {
u8 gen;
u8 is_mobile:1;
@@ -402,12 +430,6 @@ typedef struct drm_i915_private {
struct resource mch_res;
- unsigned int cpp;
- int back_offset;
- int front_offset;
- int current_page;
- int page_flipping;
-
atomic_t irq_received;
/* protects the irq masks */
@@ -425,7 +447,6 @@ typedef struct drm_i915_private {
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
- unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int num_pipe;
int num_pch_pll;
@@ -434,8 +455,7 @@ typedef struct drm_i915_private {
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
- uint32_t last_instdone;
- uint32_t last_instdone1;
+ uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
unsigned int stop_rings;
@@ -666,7 +686,13 @@ typedef struct drm_i915_private {
struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
- struct list_head gtt_list;
+ struct list_head bound_list;
+ /**
+ * List of objects which are not bound to the GTT (thus
+ * are idle and not used by the GPU) but still have
+ * (presumably uncached) pages still attached.
+ */
+ struct list_head unbound_list;
/** Usable portion of the GTT for GEM */
unsigned long gtt_start;
@@ -696,17 +722,6 @@ typedef struct drm_i915_private {
struct list_head active_list;
/**
- * List of objects which are not in the ringbuffer but which
- * still have a write_domain which needs to be flushed before
- * unbinding.
- *
- * last_rendering_seqno is 0 while an object is in this list.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head flushing_list;
-
- /**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
@@ -775,6 +790,12 @@ typedef struct drm_i915_private {
struct {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
+
+ unsigned int cpp;
+ int back_offset;
+ int front_offset;
+ int current_page;
+ int page_flipping;
} dri1;
/* Kernel Modesetting */
@@ -796,9 +817,6 @@ typedef struct drm_i915_private {
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
- struct work_struct idle_work;
- struct timer_list idle_timer;
- bool busy;
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
@@ -807,26 +825,41 @@ typedef struct drm_i915_private {
bool mchbar_need_disable;
- struct work_struct rps_work;
- spinlock_t rps_lock;
- u32 pm_iir;
-
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- u8 fmax;
- u8 fstart;
-
- u64 last_count1;
- unsigned long last_time1;
- unsigned long chipset_power;
- u64 last_count2;
- struct timespec last_time2;
- unsigned long gfx_power;
- int c_m;
- int r_t;
- u8 corr;
- spinlock_t *mchdev_lock;
+ /* gen6+ rps state */
+ struct {
+ struct work_struct work;
+ u32 pm_iir;
+ /* lock - irqsave spinlock that protectects the work_struct and
+ * pm_iir. */
+ spinlock_t lock;
+
+ /* The below variables an all the rps hw state are protected by
+ * dev->struct mutext. */
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ } rps;
+
+ /* ilk-only ips/rps state. Everything in here is protected by the global
+ * mchdev_lock in intel_pm.c */
+ struct {
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c_m;
+ int r_t;
+ } ips;
enum no_fbc_reason no_fbc_reason;
@@ -861,30 +894,48 @@ enum hdmi_force_audio {
};
enum i915_cache_level {
- I915_CACHE_NONE,
+ I915_CACHE_NONE = 0,
I915_CACHE_LLC,
- I915_CACHE_LLC_MLC, /* gen6+ */
+ I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
+};
+
+struct drm_i915_gem_object_ops {
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ int (*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *);
};
struct drm_i915_gem_object {
struct drm_gem_object base;
+ const struct drm_i915_gem_object_ops *ops;
+
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
struct list_head gtt_list;
- /** This object's place on the active/flushing/inactive lists */
+ /** This object's place on the active/inactive lists */
struct list_head ring_list;
struct list_head mm_list;
- /** This object's place on GPU write list */
- struct list_head gpu_write_list;
/** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
/**
- * This is set if the object is on the active or flushing lists
- * (has pending rendering), and is not set if it's on inactive (ready
- * to be unbound).
+ * This is set if the object is on the active lists (has pending
+ * rendering and so a non-zero seqno), and is not set if it i s on
+ * inactive (ready to be unbound) list.
*/
unsigned int active:1;
@@ -895,12 +946,6 @@ struct drm_i915_gem_object {
unsigned int dirty:1;
/**
- * This is set if the object has been written to since the last
- * GPU flush.
- */
- unsigned int pending_gpu_write:1;
-
- /**
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
@@ -961,17 +1006,12 @@ struct drm_i915_gem_object {
unsigned int has_aliasing_ppgtt_mapping:1;
unsigned int has_global_gtt_mapping:1;
+ unsigned int has_dma_mapping:1;
- struct page **pages;
-
- /**
- * DMAR support
- */
- struct scatterlist *sg_list;
- int num_sg;
+ struct sg_table *pages;
+ int pages_pin_count;
/* prime dma-buf support */
- struct sg_table *sg_table;
void *dma_buf_vmapping;
int vmapping_count;
@@ -992,7 +1032,8 @@ struct drm_i915_gem_object {
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
- uint32_t last_rendering_seqno;
+ uint32_t last_read_seqno;
+ uint32_t last_write_seqno;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
@@ -1135,6 +1176,10 @@ struct drm_i915_file_private {
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
+#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+
+#define GT_FREQUENCY_MULTIPLIER 50
+
#include "i915_trace.h"
/**
@@ -1256,6 +1301,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
@@ -1274,24 +1323,42 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
-int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
- uint32_t invalidate_domains,
- uint32_t flush_domains);
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
- bool map_and_fenceable);
+ bool map_and_fenceable,
+ bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
-int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
- gfp_t gfpmask);
+int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+{
+ struct scatterlist *sg = obj->pages->sgl;
+ while (n >= SG_MAX_SINGLE_ALLOC) {
+ sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
+ n -= SG_MAX_SINGLE_ALLOC - 1;
+ }
+ return sg_page(sg+n);
+}
+static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(obj->pages == NULL);
+ obj->pages_pin_count++;
+}
+static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(obj->pages_pin_count == 0);
+ obj->pages_pin_count--;
+}
+
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
@@ -1358,9 +1425,9 @@ void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
-int __must_check i915_add_request(struct intel_ring_buffer *ring,
- struct drm_file *file,
- struct drm_i915_gem_request *request);
+int i915_add_request(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ struct drm_i915_gem_request *request);
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -1429,8 +1496,11 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
- unsigned alignment, bool mappable);
-int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
+ unsigned alignment,
+ unsigned cache_level,
+ bool mappable,
+ bool nonblock);
+int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
@@ -1519,6 +1589,7 @@ extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -1529,6 +1600,8 @@ extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev);
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+int i915_reg_read_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
/* overlay */
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e2c93f7be8ed..e957f3740f68 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -36,12 +36,12 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
-static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
- bool map_and_fenceable);
+ bool map_and_fenceable,
+ bool nonblocking);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -55,6 +55,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
struct shrink_control *sc);
+static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
+static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
@@ -140,7 +142,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return !obj->active;
+ return obj->gtt_space && !obj->active;
}
int
@@ -179,7 +181,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned = 0;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
if (obj->pin_count)
pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
@@ -340,7 +342,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap_atomic(vaddr);
- return ret;
+ return ret ? -EFAULT : 0;
}
static void
@@ -391,7 +393,7 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap(page);
- return ret;
+ return ret ? - EFAULT : 0;
}
static int
@@ -400,7 +402,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
struct drm_i915_gem_pread *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
char __user *user_data;
ssize_t remain;
loff_t offset;
@@ -409,7 +410,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
int hit_slowpath = 0;
int prefaulted = 0;
int needs_clflush = 0;
- int release_page;
+ struct scatterlist *sg;
+ int i;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -423,16 +425,30 @@ i915_gem_shmem_pread(struct drm_device *dev,
* anyway again before the next pread happens. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush = 1;
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- if (ret)
- return ret;
+ if (obj->gtt_space) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret)
+ return ret;
+ }
}
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
offset = args->offset;
- while (remain > 0) {
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
struct page *page;
+ if (i < offset >> PAGE_SHIFT)
+ continue;
+
+ if (remain <= 0)
+ break;
+
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
@@ -443,18 +459,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- if (obj->pages) {
- page = obj->pages[offset >> PAGE_SHIFT];
- release_page = 0;
- } else {
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
- }
- release_page = 1;
- }
-
+ page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
@@ -465,7 +470,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
goto next_page;
hit_slowpath = 1;
- page_cache_get(page);
mutex_unlock(&dev->struct_mutex);
if (!prefaulted) {
@@ -483,16 +487,12 @@ i915_gem_shmem_pread(struct drm_device *dev,
needs_clflush);
mutex_lock(&dev->struct_mutex);
- page_cache_release(page);
+
next_page:
mark_page_accessed(page);
- if (release_page)
- page_cache_release(page);
- if (ret) {
- ret = -EFAULT;
+ if (ret)
goto out;
- }
remain -= page_length;
user_data += page_length;
@@ -500,6 +500,8 @@ next_page:
}
out:
+ i915_gem_object_unpin_pages(obj);
+
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
@@ -605,7 +607,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
char __user *user_data;
int page_offset, page_length, ret;
- ret = i915_gem_object_pin(obj, 0, true);
+ ret = i915_gem_object_pin(obj, 0, true, true);
if (ret)
goto out;
@@ -685,7 +687,7 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap_atomic(vaddr);
- return ret;
+ return ret ? -EFAULT : 0;
}
/* Only difference to the fast-path function is that this can handle bit17
@@ -719,7 +721,7 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
page_do_bit17_swizzling);
kunmap(page);
- return ret;
+ return ret ? -EFAULT : 0;
}
static int
@@ -728,7 +730,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
loff_t offset;
char __user *user_data;
@@ -737,7 +738,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int hit_slowpath = 0;
int needs_clflush_after = 0;
int needs_clflush_before = 0;
- int release_page;
+ int i;
+ struct scatterlist *sg;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -751,9 +753,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* right away and we therefore have to clflush anyway. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush_after = 1;
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- return ret;
+ if (obj->gtt_space) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+ }
}
/* Same trick applies for invalidate partially written cachelines before
* writing. */
@@ -761,13 +765,25 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
&& obj->cache_level == I915_CACHE_NONE)
needs_clflush_before = 1;
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
offset = args->offset;
obj->dirty = 1;
- while (remain > 0) {
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
struct page *page;
int partial_cacheline_write;
+ if (i < offset >> PAGE_SHIFT)
+ continue;
+
+ if (remain <= 0)
+ break;
+
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
@@ -786,18 +802,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
((shmem_page_offset | page_length)
& (boot_cpu_data.x86_clflush_size - 1));
- if (obj->pages) {
- page = obj->pages[offset >> PAGE_SHIFT];
- release_page = 0;
- } else {
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
- }
- release_page = 1;
- }
-
+ page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
@@ -809,26 +814,20 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
goto next_page;
hit_slowpath = 1;
- page_cache_get(page);
mutex_unlock(&dev->struct_mutex);
-
ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
partial_cacheline_write,
needs_clflush_after);
mutex_lock(&dev->struct_mutex);
- page_cache_release(page);
+
next_page:
set_page_dirty(page);
mark_page_accessed(page);
- if (release_page)
- page_cache_release(page);
- if (ret) {
- ret = -EFAULT;
+ if (ret)
goto out;
- }
remain -= page_length;
user_data += page_length;
@@ -836,6 +835,8 @@ next_page:
}
out:
+ i915_gem_object_unpin_pages(obj);
+
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
@@ -919,10 +920,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
- if (obj->gtt_space &&
- obj->cache_level == I915_CACHE_NONE &&
+ if (obj->cache_level == I915_CACHE_NONE &&
obj->tiling_mode == I915_TILING_NONE &&
- obj->map_and_fenceable &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
/* Note that the gtt paths might fail with non-page-backed user
@@ -930,7 +929,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* textures). Fallback to the shmem path in that case. */
}
- if (ret == -EFAULT)
+ if (ret == -EFAULT || ret == -ENOSPC)
ret = i915_gem_shmem_pwrite(dev, obj, args, file);
out:
@@ -940,6 +939,240 @@ unlock:
return ret;
}
+int
+i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+ bool interruptible)
+{
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ struct completion *x = &dev_priv->error_completion;
+ bool recovery_complete;
+ unsigned long flags;
+
+ /* Give the error handler a chance to run. */
+ spin_lock_irqsave(&x->wait.lock, flags);
+ recovery_complete = x->done > 0;
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+
+ /* Non-interruptible callers can't handle -EAGAIN, hence return
+ * -EIO unconditionally for these. */
+ if (!interruptible)
+ return -EIO;
+
+ /* Recovery complete, but still wedged means reset failure. */
+ if (recovery_complete)
+ return -EIO;
+
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*
+ * Compare seqno against outstanding lazy request. Emit a request if they are
+ * equal.
+ */
+static int
+i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+{
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
+ ret = 0;
+ if (seqno == ring->outstanding_lazy_request)
+ ret = i915_add_request(ring, NULL, NULL);
+
+ return ret;
+}
+
+/**
+ * __wait_seqno - wait until execution of seqno has finished
+ * @ring: the ring expected to report seqno
+ * @seqno: duh!
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ *
+ * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
+static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+ bool interruptible, struct timespec *timeout)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct timespec before, now, wait_time={1,0};
+ unsigned long timeout_jiffies;
+ long end;
+ bool wait_forever = true;
+ int ret;
+
+ if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+ return 0;
+
+ trace_i915_gem_request_wait_begin(ring, seqno);
+
+ if (timeout != NULL) {
+ wait_time = *timeout;
+ wait_forever = false;
+ }
+
+ timeout_jiffies = timespec_to_jiffies(&wait_time);
+
+ if (WARN_ON(!ring->irq_get(ring)))
+ return -ENODEV;
+
+ /* Record current time in case interrupted by signal, or wedged * */
+ getrawmonotonic(&before);
+
+#define EXIT_COND \
+ (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
+ atomic_read(&dev_priv->mm.wedged))
+ do {
+ if (interruptible)
+ end = wait_event_interruptible_timeout(ring->irq_queue,
+ EXIT_COND,
+ timeout_jiffies);
+ else
+ end = wait_event_timeout(ring->irq_queue, EXIT_COND,
+ timeout_jiffies);
+
+ ret = i915_gem_check_wedge(dev_priv, interruptible);
+ if (ret)
+ end = ret;
+ } while (end == 0 && wait_forever);
+
+ getrawmonotonic(&now);
+
+ ring->irq_put(ring);
+ trace_i915_gem_request_wait_end(ring, seqno);
+#undef EXIT_COND
+
+ if (timeout) {
+ struct timespec sleep_time = timespec_sub(now, before);
+ *timeout = timespec_sub(*timeout, sleep_time);
+ }
+
+ switch (end) {
+ case -EIO:
+ case -EAGAIN: /* Wedged */
+ case -ERESTARTSYS: /* Signal */
+ return (int)end;
+ case 0: /* Timeout */
+ if (timeout)
+ set_normalized_timespec(timeout, 0, 0);
+ return -ETIME;
+ default: /* Completed */
+ WARN_ON(end < 0); /* We're not aware of other errors */
+ return 0;
+ }
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool interruptible = dev_priv->mm.interruptible;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(seqno == 0);
+
+ ret = i915_gem_check_wedge(dev_priv, interruptible);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
+
+ return __wait_seqno(ring, seqno, interruptible, NULL);
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool readonly)
+{
+ struct intel_ring_buffer *ring = obj->ring;
+ u32 seqno;
+ int ret;
+
+ seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+ if (seqno == 0)
+ return 0;
+
+ ret = i915_wait_seqno(ring, seqno);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests_ring(ring);
+
+ /* Manually manage the write flush as we may have not yet
+ * retired the buffer.
+ */
+ if (obj->last_write_seqno &&
+ i915_seqno_passed(seqno, obj->last_write_seqno)) {
+ obj->last_write_seqno = 0;
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ return 0;
+}
+
+/* A nonblocking variant of the above wait. This is a highly dangerous routine
+ * as the object state may change during this call.
+ */
+static __must_check int
+i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+ bool readonly)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = obj->ring;
+ u32 seqno;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!dev_priv->mm.interruptible);
+
+ seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+ if (seqno == 0)
+ return 0;
+
+ ret = i915_gem_check_wedge(dev_priv, true);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
+
+ mutex_unlock(&dev->struct_mutex);
+ ret = __wait_seqno(ring, seqno, true, NULL);
+ mutex_lock(&dev->struct_mutex);
+
+ i915_gem_retire_requests_ring(ring);
+
+ /* Manually manage the write flush as we may have not yet
+ * retired the buffer.
+ */
+ if (obj->last_write_seqno &&
+ i915_seqno_passed(seqno, obj->last_write_seqno)) {
+ obj->last_write_seqno = 0;
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ return ret;
+}
+
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
@@ -977,6 +1210,14 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
goto unlock;
}
+ /* Try to flush the object off the GPU without holding the lock.
+ * We will repeat the flush holding the lock in the normal manner
+ * to catch cases where we are gazumped.
+ */
+ ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
+ if (ret)
+ goto unref;
+
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
@@ -990,6 +1231,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
+unref:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
@@ -1109,7 +1351,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock;
}
if (!obj->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, 0, true);
+ ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
if (ret)
goto unlock;
@@ -1270,6 +1512,42 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
+static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int ret;
+
+ if (obj->base.map_list.map)
+ return 0;
+
+ ret = drm_gem_create_mmap_offset(&obj->base);
+ if (ret != -ENOSPC)
+ return ret;
+
+ /* Badly fragmented mmap space? The only way we can recover
+ * space is by destroying unwanted objects. We can't randomly release
+ * mmap_offsets as userspace expects them to be persistent for the
+ * lifetime of the objects. The closest we can is to release the
+ * offsets on purgeable objects by truncating it and marking it purged,
+ * which prevents userspace from ever using that object again.
+ */
+ i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+ ret = drm_gem_create_mmap_offset(&obj->base);
+ if (ret != -ENOSPC)
+ return ret;
+
+ i915_gem_shrink_all(dev_priv);
+ return drm_gem_create_mmap_offset(&obj->base);
+}
+
+static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ if (!obj->base.map_list.map)
+ return;
+
+ drm_gem_free_mmap_offset(&obj->base);
+}
+
int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
@@ -1301,11 +1579,9 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto out;
}
- if (!obj->base.map_list.map) {
- ret = drm_gem_create_mmap_offset(&obj->base);
- if (ret)
- goto out;
- }
+ ret = i915_gem_object_create_mmap_offset(obj);
+ if (ret)
+ goto out;
*offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
@@ -1340,83 +1616,245 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
-int
-i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
- gfp_t gfpmask)
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
- int page_count, i;
- struct address_space *mapping;
struct inode *inode;
- struct page *page;
- if (obj->pages || obj->sg_table)
- return 0;
+ i915_gem_object_free_mmap_offset(obj);
- /* Get the list of pages out of our struct file. They'll be pinned
- * at this point until we release them.
- */
- page_count = obj->base.size / PAGE_SIZE;
- BUG_ON(obj->pages != NULL);
- obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
- if (obj->pages == NULL)
- return -ENOMEM;
+ if (obj->base.filp == NULL)
+ return;
+ /* Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*.
+ */
inode = obj->base.filp->f_path.dentry->d_inode;
- mapping = inode->i_mapping;
- gfpmask |= mapping_gfp_mask(mapping);
-
- for (i = 0; i < page_count; i++) {
- page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
- if (IS_ERR(page))
- goto err_pages;
-
- obj->pages[i] = page;
- }
-
- if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_do_bit_17_swizzle(obj);
-
- return 0;
+ shmem_truncate_range(inode, 0, (loff_t)-1);
-err_pages:
- while (i--)
- page_cache_release(obj->pages[i]);
+ obj->madv = __I915_MADV_PURGED;
+}
- drm_free_large(obj->pages);
- obj->pages = NULL;
- return PTR_ERR(page);
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+{
+ return obj->madv == I915_MADV_DONTNEED;
}
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
int page_count = obj->base.size / PAGE_SIZE;
- int i;
-
- if (!obj->pages)
- return;
+ struct scatterlist *sg;
+ int ret, i;
BUG_ON(obj->madv == __I915_MADV_PURGED);
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret) {
+ /* In the event of a disaster, abandon all caches and
+ * hope for the best.
+ */
+ WARN_ON(ret != -EIO);
+ i915_gem_clflush_object(obj);
+ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj);
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
- for (i = 0; i < page_count; i++) {
+ for_each_sg(obj->pages->sgl, sg, page_count, i) {
+ struct page *page = sg_page(sg);
+
if (obj->dirty)
- set_page_dirty(obj->pages[i]);
+ set_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED)
- mark_page_accessed(obj->pages[i]);
+ mark_page_accessed(page);
- page_cache_release(obj->pages[i]);
+ page_cache_release(page);
}
obj->dirty = 0;
- drm_free_large(obj->pages);
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+}
+
+static int
+i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+{
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
+
+ if (obj->pages == NULL)
+ return 0;
+
+ BUG_ON(obj->gtt_space);
+
+ if (obj->pages_pin_count)
+ return -EBUSY;
+
+ ops->put_pages(obj);
obj->pages = NULL;
+
+ list_del(&obj->gtt_list);
+ if (i915_gem_object_is_purgeable(obj))
+ i915_gem_object_truncate(obj);
+
+ return 0;
+}
+
+static long
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+{
+ struct drm_i915_gem_object *obj, *next;
+ long count = 0;
+
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.unbound_list,
+ gtt_list) {
+ if (i915_gem_object_is_purgeable(obj) &&
+ i915_gem_object_put_pages(obj) == 0) {
+ count += obj->base.size >> PAGE_SHIFT;
+ if (count >= target)
+ return count;
+ }
+ }
+
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list,
+ mm_list) {
+ if (i915_gem_object_is_purgeable(obj) &&
+ i915_gem_object_unbind(obj) == 0 &&
+ i915_gem_object_put_pages(obj) == 0) {
+ count += obj->base.size >> PAGE_SHIFT;
+ if (count >= target)
+ return count;
+ }
+ }
+
+ return count;
+}
+
+static void
+i915_gem_shrink_all(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj, *next;
+
+ i915_gem_evict_everything(dev_priv->dev);
+
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
+ i915_gem_object_put_pages(obj);
+}
+
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int page_count, i;
+ struct address_space *mapping;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ struct page *page;
+ gfp_t gfp;
+
+ /* Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+ * a GPU cache
+ */
+ BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return -ENOMEM;
+
+ page_count = obj->base.size / PAGE_SIZE;
+ if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
+ sg_free_table(st);
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ /* Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ *
+ * Fail silently without starting the shrinker
+ */
+ mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ gfp = mapping_gfp_mask(mapping);
+ gfp |= __GFP_NORETRY | __GFP_NOWARN;
+ gfp &= ~(__GFP_IO | __GFP_WAIT);
+ for_each_sg(st->sgl, sg, page_count, i) {
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (IS_ERR(page)) {
+ i915_gem_purge(dev_priv, page_count);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ }
+ if (IS_ERR(page)) {
+ /* We've tried hard to allocate the memory by reaping
+ * our own buffer, now let the real VM do its job and
+ * go down in flames if truly OOM.
+ */
+ gfp &= ~(__GFP_NORETRY | __GFP_NOWARN);
+ gfp |= __GFP_IO | __GFP_WAIT;
+
+ i915_gem_shrink_all(dev_priv);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (IS_ERR(page))
+ goto err_pages;
+
+ gfp |= __GFP_NORETRY | __GFP_NOWARN;
+ gfp &= ~(__GFP_IO | __GFP_WAIT);
+ }
+
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ }
+
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ i915_gem_object_do_bit_17_swizzle(obj);
+
+ obj->pages = st;
+ return 0;
+
+err_pages:
+ for_each_sg(st->sgl, sg, i, page_count)
+ page_cache_release(sg_page(sg));
+ sg_free_table(st);
+ kfree(st);
+ return PTR_ERR(page);
+}
+
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_get_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_put_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int
+i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
+ int ret;
+
+ if (obj->pages)
+ return 0;
+
+ BUG_ON(obj->pages_pin_count);
+
+ ret = ops->get_pages(obj);
+ if (ret)
+ return ret;
+
+ list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+ return 0;
}
void
@@ -1440,7 +1878,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
- obj->last_rendering_seqno = seqno;
+ obj->last_read_seqno = seqno;
if (obj->fenced_gpu_access) {
obj->last_fenced_seqno = seqno;
@@ -1457,97 +1895,35 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
static void
-i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
-{
- list_del_init(&obj->ring_list);
- obj->last_rendering_seqno = 0;
- obj->last_fenced_seqno = 0;
-}
-
-static void
-i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
-
- i915_gem_object_move_off_active(obj);
-}
-static void
-i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ if (obj->pin_count) /* are we a framebuffer? */
+ intel_mark_fb_idle(obj);
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- BUG_ON(!list_empty(&obj->gpu_write_list));
- BUG_ON(!obj->active);
+ list_del_init(&obj->ring_list);
obj->ring = NULL;
- i915_gem_object_move_off_active(obj);
+ obj->last_read_seqno = 0;
+ obj->last_write_seqno = 0;
+ obj->base.write_domain = 0;
+
+ obj->last_fenced_seqno = 0;
obj->fenced_gpu_access = false;
obj->active = 0;
- obj->pending_gpu_write = false;
drm_gem_object_unreference(&obj->base);
WARN_ON(i915_verify_lists(dev));
}
-/* Immediately discard the backing storage */
-static void
-i915_gem_object_truncate(struct drm_i915_gem_object *obj)
-{
- struct inode *inode;
-
- /* Our goal here is to return as much of the memory as
- * is possible back to the system as we are called from OOM.
- * To do this we must instruct the shmfs to drop all of its
- * backing pages, *now*.
- */
- inode = obj->base.filp->f_path.dentry->d_inode;
- shmem_truncate_range(inode, 0, (loff_t)-1);
-
- if (obj->base.map_list.map)
- drm_gem_free_mmap_offset(&obj->base);
-
- obj->madv = __I915_MADV_PURGED;
-}
-
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
-{
- return obj->madv == I915_MADV_DONTNEED;
-}
-
-static void
-i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
- uint32_t flush_domains)
-{
- struct drm_i915_gem_object *obj, *next;
-
- list_for_each_entry_safe(obj, next,
- &ring->gpu_write_list,
- gpu_write_list) {
- if (obj->base.write_domain & flush_domains) {
- uint32_t old_write_domain = obj->base.write_domain;
-
- obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
- i915_gem_object_move_to_active(obj, ring,
- i915_gem_next_request_seqno(ring));
-
- trace_i915_gem_object_change_domain(obj,
- obj->base.read_domains,
- old_write_domain);
- }
- }
-}
-
static u32
i915_gem_get_seqno(struct drm_device *dev)
{
@@ -1588,15 +1964,16 @@ i915_add_request(struct intel_ring_buffer *ring,
* is that the flush _must_ happen before the next request, no matter
* what.
*/
- if (ring->gpu_caches_dirty) {
- ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
- if (ret)
- return ret;
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
- ring->gpu_caches_dirty = false;
+ if (request == NULL) {
+ request = kmalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
}
- BUG_ON(request == NULL);
seqno = i915_gem_next_request_seqno(ring);
/* Record the position of the start of the request so that
@@ -1607,8 +1984,10 @@ i915_add_request(struct intel_ring_buffer *ring,
request_ring_position = intel_ring_get_tail(ring);
ret = ring->add_request(ring, &seqno);
- if (ret)
- return ret;
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
trace_i915_gem_request_add(ring, seqno);
@@ -1618,6 +1997,7 @@ i915_add_request(struct intel_ring_buffer *ring,
request->emitted_jiffies = jiffies;
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
+ request->file_priv = NULL;
if (file) {
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -1637,13 +2017,13 @@ i915_add_request(struct intel_ring_buffer *ring,
jiffies +
msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
- if (was_empty)
+ if (was_empty) {
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
+ intel_mark_busy(dev_priv->dev);
+ }
}
- WARN_ON(!list_empty(&ring->gpu_write_list));
-
return 0;
}
@@ -1685,8 +2065,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object,
ring_list);
- obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_inactive(obj);
}
}
@@ -1722,20 +2100,6 @@ void i915_gem_reset(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_lists(dev_priv, ring);
- /* Remove anything from the flushing lists. The GPU cache is likely
- * to be lost on reset along with the data, so simply move the
- * lost bo to the inactive list.
- */
- while (!list_empty(&dev_priv->mm.flushing_list)) {
- obj = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- mm_list);
-
- obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
- i915_gem_object_move_to_inactive(obj);
- }
-
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
@@ -1764,7 +2128,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
WARN_ON(i915_verify_lists(ring->dev));
- seqno = ring->get_seqno(ring);
+ seqno = ring->get_seqno(ring, true);
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
if (seqno >= ring->sync_seqno[i])
@@ -1803,13 +2167,10 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
struct drm_i915_gem_object,
ring_list);
- if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
+ if (!i915_seqno_passed(seqno, obj->last_read_seqno))
break;
- if (obj->base.write_domain != 0)
- i915_gem_object_move_to_flushing(obj);
- else
- i915_gem_object_move_to_inactive(obj);
+ i915_gem_object_move_to_inactive(obj);
}
if (unlikely(ring->trace_irq_seqno &&
@@ -1858,216 +2219,20 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/
idle = true;
for_each_ring(ring, dev_priv, i) {
- if (ring->gpu_caches_dirty) {
- struct drm_i915_gem_request *request;
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL ||
- i915_add_request(ring, NULL, request))
- kfree(request);
- }
+ if (ring->gpu_caches_dirty)
+ i915_add_request(ring, NULL, NULL);
idle &= list_empty(&ring->request_list);
}
if (!dev_priv->mm.suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ if (idle)
+ intel_mark_idle(dev);
mutex_unlock(&dev->struct_mutex);
}
-int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv,
- bool interruptible)
-{
- if (atomic_read(&dev_priv->mm.wedged)) {
- struct completion *x = &dev_priv->error_completion;
- bool recovery_complete;
- unsigned long flags;
-
- /* Give the error handler a chance to run. */
- spin_lock_irqsave(&x->wait.lock, flags);
- recovery_complete = x->done > 0;
- spin_unlock_irqrestore(&x->wait.lock, flags);
-
- /* Non-interruptible callers can't handle -EAGAIN, hence return
- * -EIO unconditionally for these. */
- if (!interruptible)
- return -EIO;
-
- /* Recovery complete, but still wedged means reset failure. */
- if (recovery_complete)
- return -EIO;
-
- return -EAGAIN;
- }
-
- return 0;
-}
-
-/*
- * Compare seqno against outstanding lazy request. Emit a request if they are
- * equal.
- */
-static int
-i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
-{
- int ret = 0;
-
- BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-
- if (seqno == ring->outstanding_lazy_request) {
- struct drm_i915_gem_request *request;
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
-
- ret = i915_add_request(ring, NULL, request);
- if (ret) {
- kfree(request);
- return ret;
- }
-
- BUG_ON(seqno != request->seqno);
- }
-
- return ret;
-}
-
-/**
- * __wait_seqno - wait until execution of seqno has finished
- * @ring: the ring expected to report seqno
- * @seqno: duh!
- * @interruptible: do an interruptible wait (normally yes)
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- *
- * Returns 0 if the seqno was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
- */
-static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
- bool interruptible, struct timespec *timeout)
-{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
- struct timespec before, now, wait_time={1,0};
- unsigned long timeout_jiffies;
- long end;
- bool wait_forever = true;
- int ret;
-
- if (i915_seqno_passed(ring->get_seqno(ring), seqno))
- return 0;
-
- trace_i915_gem_request_wait_begin(ring, seqno);
-
- if (timeout != NULL) {
- wait_time = *timeout;
- wait_forever = false;
- }
-
- timeout_jiffies = timespec_to_jiffies(&wait_time);
-
- if (WARN_ON(!ring->irq_get(ring)))
- return -ENODEV;
-
- /* Record current time in case interrupted by signal, or wedged * */
- getrawmonotonic(&before);
-
-#define EXIT_COND \
- (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
- atomic_read(&dev_priv->mm.wedged))
- do {
- if (interruptible)
- end = wait_event_interruptible_timeout(ring->irq_queue,
- EXIT_COND,
- timeout_jiffies);
- else
- end = wait_event_timeout(ring->irq_queue, EXIT_COND,
- timeout_jiffies);
-
- ret = i915_gem_check_wedge(dev_priv, interruptible);
- if (ret)
- end = ret;
- } while (end == 0 && wait_forever);
-
- getrawmonotonic(&now);
-
- ring->irq_put(ring);
- trace_i915_gem_request_wait_end(ring, seqno);
-#undef EXIT_COND
-
- if (timeout) {
- struct timespec sleep_time = timespec_sub(now, before);
- *timeout = timespec_sub(*timeout, sleep_time);
- }
-
- switch (end) {
- case -EIO:
- case -EAGAIN: /* Wedged */
- case -ERESTARTSYS: /* Signal */
- return (int)end;
- case 0: /* Timeout */
- if (timeout)
- set_normalized_timespec(timeout, 0, 0);
- return -ETIME;
- default: /* Completed */
- WARN_ON(end < 0); /* We're not aware of other errors */
- return 0;
- }
-}
-
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int
-i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
-{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
- int ret = 0;
-
- BUG_ON(seqno == 0);
-
- ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
- if (ret)
- return ret;
-
- ret = i915_gem_check_olr(ring, seqno);
- if (ret)
- return ret;
-
- ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
-
- return ret;
-}
-
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
-{
- int ret;
-
- /* This function only exists to support waiting for existing rendering,
- * not for emitting required flushes.
- */
- BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
-
- /* If there is rendering queued on the buffer being evicted, wait for
- * it.
- */
- if (obj->active) {
- ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
- if (ret)
- return ret;
- i915_gem_retire_requests_ring(obj->ring);
- }
-
- return 0;
-}
-
/**
* Ensures that an object will eventually get non-busy by flushing any required
* write domains, emitting any outstanding lazy request and retiring and
@@ -2079,14 +2244,10 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
int ret;
if (obj->active) {
- ret = i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
if (ret)
return ret;
- ret = i915_gem_check_olr(obj->ring,
- obj->last_rendering_seqno);
- if (ret)
- return ret;
i915_gem_retire_requests_ring(obj->ring);
}
@@ -2146,7 +2307,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto out;
if (obj->active) {
- seqno = obj->last_rendering_seqno;
+ seqno = obj->last_read_seqno;
ring = obj->ring;
}
@@ -2201,11 +2362,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
return 0;
if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
- return i915_gem_object_wait_rendering(obj);
+ return i915_gem_object_wait_rendering(obj, false);
idx = intel_ring_sync_index(from, to);
- seqno = obj->last_rendering_seqno;
+ seqno = obj->last_read_seqno;
if (seqno <= from->sync_seqno[idx])
return 0;
@@ -2259,6 +2420,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (obj->pin_count)
return -EBUSY;
+ BUG_ON(obj->pages == NULL);
+
ret = i915_gem_object_finish_gpu(obj);
if (ret)
return ret;
@@ -2269,22 +2432,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
i915_gem_object_finish_gtt(obj);
- /* Move the object to the CPU domain to ensure that
- * any possible CPU writes while it's not in the GTT
- * are flushed when we go to remap it.
- */
- if (ret == 0)
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret == -ERESTARTSYS)
- return ret;
- if (ret) {
- /* In the event of a disaster, abandon all caches and
- * hope for the best.
- */
- i915_gem_clflush_object(obj);
- obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- }
-
/* release the fence reg _after_ flushing */
ret = i915_gem_object_put_fence(obj);
if (ret)
@@ -2300,10 +2447,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
}
i915_gem_gtt_finish_object(obj);
- i915_gem_object_put_pages_gtt(obj);
-
- list_del_init(&obj->gtt_list);
- list_del_init(&obj->mm_list);
+ list_del(&obj->mm_list);
+ list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
@@ -2311,48 +2456,14 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
obj->gtt_space = NULL;
obj->gtt_offset = 0;
- if (i915_gem_object_is_purgeable(obj))
- i915_gem_object_truncate(obj);
-
- return ret;
-}
-
-int
-i915_gem_flush_ring(struct intel_ring_buffer *ring,
- uint32_t invalidate_domains,
- uint32_t flush_domains)
-{
- int ret;
-
- if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
-
- trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
-
- ret = ring->flush(ring, invalidate_domains, flush_domains);
- if (ret)
- return ret;
-
- if (flush_domains & I915_GEM_GPU_DOMAINS)
- i915_gem_process_flushing_list(ring, flush_domains);
-
return 0;
}
static int i915_ring_idle(struct intel_ring_buffer *ring)
{
- int ret;
-
- if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
+ if (list_empty(&ring->active_list))
return 0;
- if (!list_empty(&ring->gpu_write_list)) {
- ret = i915_gem_flush_ring(ring,
- I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- if (ret)
- return ret;
- }
-
return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
}
@@ -2371,10 +2482,6 @@ int i915_gpu_idle(struct drm_device *dev)
ret = i915_ring_idle(ring);
if (ret)
return ret;
-
- /* Is the device fubar? */
- if (WARN_ON(!list_empty(&ring->gpu_write_list)))
- return -EBUSY;
}
return 0;
@@ -2547,21 +2654,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
{
- int ret;
-
- if (obj->fenced_gpu_access) {
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->ring,
- 0, obj->base.write_domain);
- if (ret)
- return ret;
- }
-
- obj->fenced_gpu_access = false;
- }
-
if (obj->last_fenced_seqno) {
- ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+ int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
if (ret)
return ret;
@@ -2574,6 +2668,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
mb();
+ obj->fenced_gpu_access = false;
return 0;
}
@@ -2693,18 +2788,88 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
return 0;
}
+static bool i915_gem_valid_gtt_space(struct drm_device *dev,
+ struct drm_mm_node *gtt_space,
+ unsigned long cache_level)
+{
+ struct drm_mm_node *other;
+
+ /* On non-LLC machines we have to be careful when putting differing
+ * types of snoopable memory together to avoid the prefetcher
+ * crossing memory domains and dieing.
+ */
+ if (HAS_LLC(dev))
+ return true;
+
+ if (gtt_space == NULL)
+ return true;
+
+ if (list_empty(&gtt_space->node_list))
+ return true;
+
+ other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+ if (other->allocated && !other->hole_follows && other->color != cache_level)
+ return false;
+
+ other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+ if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+ return false;
+
+ return true;
+}
+
+static void i915_gem_verify_gtt(struct drm_device *dev)
+{
+#if WATCH_GTT
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ if (obj->gtt_space == NULL) {
+ printk(KERN_ERR "object found on GTT list with no space reserved\n");
+ err++;
+ continue;
+ }
+
+ if (obj->cache_level != obj->gtt_space->color) {
+ printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
+ obj->gtt_space->start,
+ obj->gtt_space->start + obj->gtt_space->size,
+ obj->cache_level,
+ obj->gtt_space->color);
+ err++;
+ continue;
+ }
+
+ if (!i915_gem_valid_gtt_space(dev,
+ obj->gtt_space,
+ obj->cache_level)) {
+ printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
+ obj->gtt_space->start,
+ obj->gtt_space->start + obj->gtt_space->size,
+ obj->cache_level);
+ err++;
+ continue;
+ }
+ }
+
+ WARN_ON(err);
+#endif
+}
+
/**
* Finds free space in the GTT aperture and binds the object there.
*/
static int
i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
- bool map_and_fenceable)
+ bool map_and_fenceable,
+ bool nonblocking)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_mm_node *free_space;
- gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
int ret;
@@ -2744,89 +2909,67 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
return -E2BIG;
}
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
search_free:
if (map_and_fenceable)
free_space =
- drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
- size, alignment,
- 0, dev_priv->mm.gtt_mappable_end,
- 0);
+ drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
+ size, alignment, obj->cache_level,
+ 0, dev_priv->mm.gtt_mappable_end,
+ false);
else
- free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
- size, alignment, 0);
+ free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
+ size, alignment, obj->cache_level,
+ false);
if (free_space != NULL) {
if (map_and_fenceable)
obj->gtt_space =
drm_mm_get_block_range_generic(free_space,
- size, alignment, 0,
+ size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end,
- 0);
+ false);
else
obj->gtt_space =
- drm_mm_get_block(free_space, size, alignment);
+ drm_mm_get_block_generic(free_space,
+ size, alignment, obj->cache_level,
+ false);
}
if (obj->gtt_space == NULL) {
- /* If the gtt is empty and we're still having trouble
- * fitting our object in, we're out of memory.
- */
ret = i915_gem_evict_something(dev, size, alignment,
- map_and_fenceable);
+ obj->cache_level,
+ map_and_fenceable,
+ nonblocking);
if (ret)
return ret;
goto search_free;
}
-
- ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
- if (ret) {
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev,
+ obj->gtt_space,
+ obj->cache_level))) {
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
-
- if (ret == -ENOMEM) {
- /* first try to reclaim some memory by clearing the GTT */
- ret = i915_gem_evict_everything(dev, false);
- if (ret) {
- /* now try to shrink everyone else */
- if (gfpmask) {
- gfpmask = 0;
- goto search_free;
- }
-
- return -ENOMEM;
- }
-
- goto search_free;
- }
-
- return ret;
+ return -EINVAL;
}
+
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
- i915_gem_object_put_pages_gtt(obj);
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
-
- if (i915_gem_evict_everything(dev, false))
- return ret;
-
- goto search_free;
+ return ret;
}
if (!dev_priv->mm.aliasing_ppgtt)
i915_gem_gtt_bind_object(obj, obj->cache_level);
- list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
+ list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- /* Assert that the object is not currently in any GPU domain. As it
- * wasn't in the GTT, there shouldn't be any way it could have been in
- * a GPU cache
- */
- BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
- BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
-
obj->gtt_offset = obj->gtt_space->start;
fenceable =
@@ -2839,6 +2982,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
obj->map_and_fenceable = mappable && fenceable;
trace_i915_gem_object_bind(obj, map_and_fenceable);
+ i915_gem_verify_gtt(dev);
return 0;
}
@@ -2865,18 +3009,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
trace_i915_gem_object_clflush(obj);
- drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
-}
-
-/** Flushes any GPU write domain for the object if it's dirty. */
-static int
-i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
-{
- if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
-
- /* Queue the GPU write cache flushing we need. */
- return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
+ drm_clflush_sg(obj->pages);
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2945,16 +3078,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
return 0;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_wait_rendering(obj, !write);
if (ret)
return ret;
- if (obj->pending_gpu_write || write) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
- }
-
i915_gem_object_flush_cpu_write_domain(obj);
old_write_domain = obj->base.write_domain;
@@ -2997,6 +3124,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
+ if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
+ }
+
if (obj->gtt_space) {
ret = i915_gem_object_finish_gpu(obj);
if (ret)
@@ -3008,7 +3141,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* registers with snooped memory, so relinquish any fences
* currently pointing to our region in the aperture.
*/
- if (INTEL_INFO(obj->base.dev)->gen < 6) {
+ if (INTEL_INFO(dev)->gen < 6) {
ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
@@ -3019,6 +3152,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
if (obj->has_aliasing_ppgtt_mapping)
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
+
+ obj->gtt_space->color = cache_level;
}
if (cache_level == I915_CACHE_NONE) {
@@ -3045,9 +3180,72 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
obj->cache_level = cache_level;
+ i915_gem_verify_gtt(dev);
return 0;
}
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ args->caching = obj->cache_level != I915_CACHE_NONE;
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ enum i915_cache_level level;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ switch (args->caching) {
+ case I915_CACHING_NONE:
+ level = I915_CACHE_NONE;
+ break;
+ case I915_CACHING_CACHED:
+ level = I915_CACHE_LLC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ ret = i915_gem_object_set_cache_level(obj, level);
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
/*
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
@@ -3061,10 +3259,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 old_read_domains, old_write_domain;
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
if (pipelined != obj->ring) {
ret = i915_gem_object_sync(obj, pipelined);
if (ret)
@@ -3088,7 +3282,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
- ret = i915_gem_object_pin(obj, alignment, true);
+ ret = i915_gem_object_pin(obj, alignment, true, false);
if (ret)
return ret;
@@ -3100,7 +3294,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->base.write_domain = 0;
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
@@ -3118,13 +3312,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
return 0;
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
- if (ret)
- return ret;
- }
-
- ret = i915_gem_object_wait_rendering(obj);
+ ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
@@ -3148,16 +3336,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
return 0;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_wait_rendering(obj, !write);
if (ret)
return ret;
- if (write || obj->pending_gpu_write) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
- }
-
i915_gem_object_flush_gtt_write_domain(obj);
old_write_domain = obj->base.write_domain;
@@ -3237,7 +3419,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
- bool map_and_fenceable)
+ bool map_and_fenceable,
+ bool nonblocking)
{
int ret;
@@ -3262,7 +3445,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (obj->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment,
- map_and_fenceable);
+ map_and_fenceable,
+ nonblocking);
if (ret)
return ret;
}
@@ -3320,7 +3504,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
obj->user_pin_count++;
obj->pin_filp = file;
if (obj->user_pin_count == 1) {
- ret = i915_gem_object_pin(obj, args->alignment, true);
+ ret = i915_gem_object_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
@@ -3400,6 +3584,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_flush_active(obj);
args->busy = obj->active;
+ if (obj->ring) {
+ BUILD_BUG_ON(I915_NUM_RINGS > 16);
+ args->busy |= intel_ring_flag(obj->ring) << 16;
+ }
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3448,9 +3636,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (obj->madv != __I915_MADV_PURGED)
obj->madv = args->madv;
- /* if the object is no longer bound, discard its backing storage */
- if (i915_gem_object_is_purgeable(obj) &&
- obj->gtt_space == NULL)
+ /* if the object is no longer attached, discard its backing storage */
+ if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
i915_gem_object_truncate(obj);
args->retained = obj->madv != __I915_MADV_PURGED;
@@ -3462,10 +3649,32 @@ unlock:
return ret;
}
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops)
+{
+ INIT_LIST_HEAD(&obj->mm_list);
+ INIT_LIST_HEAD(&obj->gtt_list);
+ INIT_LIST_HEAD(&obj->ring_list);
+ INIT_LIST_HEAD(&obj->exec_list);
+
+ obj->ops = ops;
+
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ obj->madv = I915_MADV_WILLNEED;
+ /* Avoid an unnecessary call to unbind on the first bind. */
+ obj->map_and_fenceable = true;
+
+ i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+ .get_pages = i915_gem_object_get_pages_gtt,
+ .put_pages = i915_gem_object_put_pages_gtt,
+};
+
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct address_space *mapping;
u32 mask;
@@ -3489,7 +3698,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
mapping_set_gfp_mask(mapping, mask);
- i915_gem_info_add_obj(dev_priv, size);
+ i915_gem_object_init(obj, &i915_gem_object_ops);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -3511,17 +3720,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
} else
obj->cache_level = I915_CACHE_NONE;
- obj->base.driver_private = NULL;
- obj->fence_reg = I915_FENCE_REG_NONE;
- INIT_LIST_HEAD(&obj->mm_list);
- INIT_LIST_HEAD(&obj->gtt_list);
- INIT_LIST_HEAD(&obj->ring_list);
- INIT_LIST_HEAD(&obj->exec_list);
- INIT_LIST_HEAD(&obj->gpu_write_list);
- obj->madv = I915_MADV_WILLNEED;
- /* Avoid an unnecessary call to unbind on the first bind. */
- obj->map_and_fenceable = true;
-
return obj;
}
@@ -3540,9 +3738,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
trace_i915_gem_object_destroy(obj);
- if (gem_obj->import_attach)
- drm_prime_gem_destroy(gem_obj, obj->sg_table);
-
if (obj->phys_obj)
i915_gem_detach_phys_object(dev, obj);
@@ -3558,8 +3753,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
dev_priv->mm.interruptible = was_interruptible;
}
- if (obj->base.map_list.map)
- drm_gem_free_mmap_offset(&obj->base);
+ obj->pages_pin_count = 0;
+ i915_gem_object_put_pages(obj);
+ i915_gem_object_free_mmap_offset(obj);
+
+ BUG_ON(obj->pages);
+
+ if (obj->base.import_attach)
+ drm_prime_gem_destroy(&obj->base, NULL);
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
@@ -3590,7 +3791,7 @@ i915_gem_idle(struct drm_device *dev)
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_gem_evict_everything(dev, false);
+ i915_gem_evict_everything(dev);
i915_gem_reset_fences(dev);
@@ -3891,7 +4092,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
BUG_ON(!list_empty(&dev_priv->mm.active_list));
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
mutex_unlock(&dev->struct_mutex);
@@ -3939,7 +4139,6 @@ init_ring_lists(struct intel_ring_buffer *ring)
{
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
}
void
@@ -3949,10 +4148,10 @@ i915_gem_load(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
INIT_LIST_HEAD(&dev_priv->mm.active_list);
- INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
+ INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
@@ -4197,18 +4396,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
}
static int
-i915_gpu_is_active(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int lists_empty;
-
- lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list);
-
- return !lists_empty;
-}
-
-static int
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
@@ -4216,60 +4403,27 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj, *next;
+ struct drm_i915_gem_object *obj;
int nr_to_scan = sc->nr_to_scan;
int cnt;
if (!mutex_trylock(&dev->struct_mutex))
return 0;
- /* "fast-path" to count number of available objects */
- if (nr_to_scan == 0) {
- cnt = 0;
- list_for_each_entry(obj,
- &dev_priv->mm.inactive_list,
- mm_list)
- cnt++;
- mutex_unlock(&dev->struct_mutex);
- return cnt / 100 * sysctl_vfs_cache_pressure;
- }
-
-rescan:
- /* first scan for clean buffers */
- i915_gem_retire_requests(dev);
-
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if (i915_gem_object_is_purgeable(obj)) {
- if (i915_gem_object_unbind(obj) == 0 &&
- --nr_to_scan == 0)
- break;
- }
+ if (nr_to_scan) {
+ nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
+ if (nr_to_scan > 0)
+ i915_gem_shrink_all(dev_priv);
}
- /* second pass, evict/count anything still on the inactive list */
cnt = 0;
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if (nr_to_scan &&
- i915_gem_object_unbind(obj) == 0)
- nr_to_scan--;
- else
- cnt++;
- }
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
+ if (obj->pages_pin_count == 0)
+ cnt += obj->base.size >> PAGE_SHIFT;
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+ if (obj->pin_count == 0 && obj->pages_pin_count == 0)
+ cnt += obj->base.size >> PAGE_SHIFT;
- if (nr_to_scan && i915_gpu_is_active(dev)) {
- /*
- * We are desperate for pages, so as a last resort, wait
- * for the GPU to finish and discard whatever we can.
- * This has a dramatic impact to reduce the number of
- * OOM-killer events whilst running the GPU aggressively.
- */
- if (i915_gpu_idle(dev) == 0)
- goto rescan;
- }
mutex_unlock(&dev->struct_mutex);
- return cnt / 100 * sysctl_vfs_cache_pressure;
+ return cnt;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a21c3dccf436..1eb48faf741b 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -97,8 +97,7 @@
static struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
-static int do_switch(struct drm_i915_gem_object *from_obj,
- struct i915_hw_context *to, u32 seqno);
+static int do_switch(struct i915_hw_context *to);
static int get_context_size(struct drm_device *dev)
{
@@ -113,7 +112,10 @@ static int get_context_size(struct drm_device *dev)
break;
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
- ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
+ if (IS_HASWELL(dev))
+ ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
+ else
+ ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
default:
BUG();
@@ -219,20 +221,21 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* default context.
*/
dev_priv->ring[RCS].default_context = ctx;
- ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
- if (ret) {
- do_destroy(ctx);
- return ret;
- }
+ ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
+ if (ret)
+ goto err_destroy;
- ret = do_switch(NULL, ctx, 0);
- if (ret) {
- i915_gem_object_unpin(ctx->obj);
- do_destroy(ctx);
- } else {
- DRM_DEBUG_DRIVER("Default HW context loaded\n");
- }
+ ret = do_switch(ctx);
+ if (ret)
+ goto err_unpin;
+ DRM_DEBUG_DRIVER("Default HW context loaded\n");
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(ctx->obj);
+err_destroy:
+ do_destroy(ctx);
return ret;
}
@@ -359,18 +362,19 @@ mi_set_context(struct intel_ring_buffer *ring,
return ret;
}
-static int do_switch(struct drm_i915_gem_object *from_obj,
- struct i915_hw_context *to,
- u32 seqno)
+static int do_switch(struct i915_hw_context *to)
{
- struct intel_ring_buffer *ring = NULL;
+ struct intel_ring_buffer *ring = to->ring;
+ struct drm_i915_gem_object *from_obj = ring->last_context_obj;
u32 hw_flags = 0;
int ret;
- BUG_ON(to == NULL);
BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
- ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
+ if (from_obj == to->obj)
+ return 0;
+
+ ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
if (ret)
return ret;
@@ -393,7 +397,6 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
hw_flags |= MI_FORCE_RESTORE;
- ring = to->ring;
ret = mi_set_context(ring, to, hw_flags);
if (ret) {
i915_gem_object_unpin(to->obj);
@@ -407,6 +410,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from_obj != NULL) {
+ u32 seqno = i915_gem_next_request_seqno(ring);
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_gem_object_move_to_active(from_obj, ring, seqno);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
@@ -417,7 +421,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
* swapped, but there is no way to do that yet.
*/
from_obj->dirty = 1;
- BUG_ON(from_obj->ring != to->ring);
+ BUG_ON(from_obj->ring != ring);
i915_gem_object_unpin(from_obj);
drm_gem_object_unreference(&from_obj->base);
@@ -448,9 +452,7 @@ int i915_switch_context(struct intel_ring_buffer *ring,
int to_id)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct drm_i915_file_private *file_priv = NULL;
struct i915_hw_context *to;
- struct drm_i915_gem_object *from_obj = ring->last_context_obj;
if (dev_priv->hw_contexts_disabled)
return 0;
@@ -458,21 +460,18 @@ int i915_switch_context(struct intel_ring_buffer *ring,
if (ring != &dev_priv->ring[RCS])
return 0;
- if (file)
- file_priv = file->driver_priv;
-
if (to_id == DEFAULT_CONTEXT_ID) {
to = ring->default_context;
} else {
- to = i915_gem_context_get(file_priv, to_id);
+ if (file == NULL)
+ return -EINVAL;
+
+ to = i915_gem_context_get(file->driver_priv, to_id);
if (to == NULL)
return -ENOENT;
}
- if (from_obj == to->obj)
- return 0;
-
- return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
+ return do_switch(to);
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index af199596e792..773ef77b6c22 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -28,35 +28,62 @@
#include <linux/dma-buf.h>
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
- struct drm_device *dev = obj->base.dev;
- int npages = obj->base.size / PAGE_SIZE;
- struct sg_table *sg = NULL;
- int ret;
- int nents;
+ struct sg_table *st;
+ struct scatterlist *src, *dst;
+ int ret, i;
- ret = i915_mutex_lock_interruptible(dev);
+ ret = i915_mutex_lock_interruptible(obj->base.dev);
if (ret)
return ERR_PTR(ret);
- if (!obj->pages) {
- ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
- if (ret)
- goto out;
+ ret = i915_gem_object_get_pages(obj);
+ if (ret) {
+ st = ERR_PTR(ret);
+ goto out;
+ }
+
+ /* Copy sg so that we make an independent mapping */
+ st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (st == NULL) {
+ st = ERR_PTR(-ENOMEM);
+ goto out;
}
- /* link the pages into an SG then map the sg */
- sg = drm_prime_pages_to_sg(obj->pages, npages);
- nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+ if (ret) {
+ kfree(st);
+ st = ERR_PTR(ret);
+ goto out;
+ }
+
+ src = obj->pages->sgl;
+ dst = st->sgl;
+ for (i = 0; i < obj->pages->nents; i++) {
+ sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
+ dst = sg_next(dst);
+ src = sg_next(src);
+ }
+
+ if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+ sg_free_table(st);
+ kfree(st);
+ st = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ i915_gem_object_pin_pages(obj);
+
out:
- mutex_unlock(&dev->struct_mutex);
- return sg;
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ return st;
}
static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *sg, enum dma_data_direction dir)
+ struct sg_table *sg,
+ enum dma_data_direction dir)
{
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg);
@@ -78,7 +105,9 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->base.dev;
- int ret;
+ struct scatterlist *sg;
+ struct page **pages;
+ int ret, i;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -89,24 +118,34 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
goto out_unlock;
}
- if (!obj->pages) {
- ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ERR_PTR(ret);
- }
- }
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ goto error;
- obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
- if (!obj->dma_buf_vmapping) {
- DRM_ERROR("failed to vmap object\n");
- goto out_unlock;
- }
+ ret = -ENOMEM;
+
+ pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
+ if (pages == NULL)
+ goto error;
+
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
+ pages[i] = sg_page(sg);
+
+ obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
+ drm_free_large(pages);
+
+ if (!obj->dma_buf_vmapping)
+ goto error;
obj->vmapping_count = 1;
+ i915_gem_object_pin_pages(obj);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return obj->dma_buf_vmapping;
+
+error:
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -119,10 +158,11 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
if (ret)
return;
- --obj->vmapping_count;
- if (obj->vmapping_count == 0) {
+ if (--obj->vmapping_count == 0) {
vunmap(obj->dma_buf_vmapping);
obj->dma_buf_vmapping = NULL;
+
+ i915_gem_object_unpin_pages(obj);
}
mutex_unlock(&dev->struct_mutex);
}
@@ -151,6 +191,22 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
return -EINVAL;
}
+static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+ bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, write);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -162,25 +218,47 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
+ .begin_cpu_access = i915_gem_begin_cpu_access,
};
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gem_obj, int flags)
+ struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- return dma_buf_export(obj, &i915_dmabuf_ops,
- obj->base.size, 0600);
+ return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
+}
+
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+ struct sg_table *sg;
+
+ sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg))
+ return PTR_ERR(sg);
+
+ obj->pages = sg;
+ obj->has_dma_mapping = true;
+ return 0;
}
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+ dma_buf_unmap_attachment(obj->base.import_attach,
+ obj->pages, DMA_BIDIRECTIONAL);
+ obj->has_dma_mapping = false;
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
+ .get_pages = i915_gem_object_get_pages_dmabuf,
+ .put_pages = i915_gem_object_put_pages_dmabuf,
+};
+
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
+ struct dma_buf *dma_buf)
{
struct dma_buf_attachment *attach;
- struct sg_table *sg;
struct drm_i915_gem_object *obj;
- int npages;
- int size;
int ret;
/* is this one of own objects? */
@@ -198,34 +276,24 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
if (IS_ERR(attach))
return ERR_CAST(attach);
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- size = dma_buf->size;
- npages = size / PAGE_SIZE;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL) {
ret = -ENOMEM;
- goto fail_unmap;
+ goto fail_detach;
}
- ret = drm_gem_private_object_init(dev, &obj->base, size);
+ ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
if (ret) {
kfree(obj);
- goto fail_unmap;
+ goto fail_detach;
}
- obj->sg_table = sg;
+ i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
return &obj->base;
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index fd408995a783..776a3225184c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -43,7 +43,8 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
int
i915_gem_evict_something(struct drm_device *dev, int min_size,
- unsigned alignment, bool mappable)
+ unsigned alignment, unsigned cache_level,
+ bool mappable, bool nonblocking)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
@@ -78,11 +79,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
INIT_LIST_HEAD(&unwind_list);
if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
- min_size, alignment, 0,
+ min_size, alignment, cache_level,
0, dev_priv->mm.gtt_mappable_end);
else
drm_mm_init_scan(&dev_priv->mm.gtt_space,
- min_size, alignment, 0);
+ min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
@@ -90,29 +91,16 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
goto found;
}
- /* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- /* Does the object require an outstanding flush? */
- if (obj->base.write_domain)
- continue;
-
- if (mark_free(obj, &unwind_list))
- goto found;
- }
+ if (nonblocking)
+ goto none;
- /* Finally add anything with a pending flush (in order of retirement) */
- list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
- if (mark_free(obj, &unwind_list))
- goto found;
- }
+ /* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (!obj->base.write_domain)
- continue;
-
if (mark_free(obj, &unwind_list))
goto found;
}
+none:
/* Nothing found, clean up and bail out! */
while (!list_empty(&unwind_list)) {
obj = list_first_entry(&unwind_list,
@@ -163,7 +151,7 @@ found:
}
int
-i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
+i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
@@ -171,12 +159,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
int ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->mm.active_list));
if (lists_empty)
return -ENOSPC;
- trace_i915_gem_evict_everything(dev, purgeable_only);
+ trace_i915_gem_evict_everything(dev);
/* The gpu_idle will flush everything in the write domain to the
* active list. Then we must move everything off the active list
@@ -188,16 +175,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
i915_gem_retire_requests(dev);
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list, mm_list) {
- if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
- if (obj->pin_count == 0)
- WARN_ON(i915_gem_object_unbind(obj));
- }
- }
+ &dev_priv->mm.inactive_list, mm_list)
+ if (obj->pin_count == 0)
+ WARN_ON(i915_gem_object_unbind(obj));
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8dd9a6f47db8..3eea143749f6 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,180 +33,6 @@
#include "intel_drv.h"
#include <linux/dma_remapping.h>
-struct change_domains {
- uint32_t invalidate_domains;
- uint32_t flush_domains;
- uint32_t flush_rings;
- uint32_t flips;
-};
-
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Mapped to GTT
- * 4. Read by GPU
- * 5. Unmapped from GTT
- * 6. Freed
- *
- * Let's take these a step at a time
- *
- * 1. Allocated
- * Pages allocated from the kernel may still have
- * cache contents, so we set them to (CPU, CPU) always.
- * 2. Written by CPU (using pwrite)
- * The pwrite function calls set_domain (CPU, CPU) and
- * this function does nothing (as nothing changes)
- * 3. Mapped by GTT
- * This function asserts that the object is not
- * currently in any GPU-based read or write domains
- * 4. Read by GPU
- * i915_gem_execbuffer calls set_domain (COMMAND, 0).
- * As write_domain is zero, this function adds in the
- * current read domains (CPU+COMMAND, 0).
- * flush_domains is set to CPU.
- * invalidate_domains is set to COMMAND
- * clflush is run to get data out of the CPU caches
- * then i915_dev_set_domain calls i915_gem_flush to
- * emit an MI_FLUSH and drm_agp_chipset_flush
- * 5. Unmapped from GTT
- * i915_gem_object_unbind calls set_domain (CPU, CPU)
- * flush_domains and invalidate_domains end up both zero
- * so no flushing/invalidating happens
- * 6. Freed
- * yay, done
- *
- * Case 2: The shared render buffer
- *
- * 1. Allocated
- * 2. Mapped to GTT
- * 3. Read/written by GPU
- * 4. set_domain to (CPU,CPU)
- * 5. Read/written by CPU
- * 6. Read/written by GPU
- *
- * 1. Allocated
- * Same as last example, (CPU, CPU)
- * 2. Mapped to GTT
- * Nothing changes (assertions find that it is not in the GPU)
- * 3. Read/written by GPU
- * execbuffer calls set_domain (RENDER, RENDER)
- * flush_domains gets CPU
- * invalidate_domains gets GPU
- * clflush (obj)
- * MI_FLUSH and drm_agp_chipset_flush
- * 4. set_domain (CPU, CPU)
- * flush_domains gets GPU
- * invalidate_domains gets CPU
- * wait_rendering (obj) to make sure all drawing is complete.
- * This will include an MI_FLUSH to get the data from GPU
- * to memory
- * clflush (obj) to invalidate the CPU cache
- * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- * 5. Read/written by CPU
- * cache lines are loaded and dirtied
- * 6. Read written by GPU
- * Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Read by GPU
- * 4. Updated (written) by CPU again
- * 5. Read by GPU
- *
- * 1. Allocated
- * (CPU, CPU)
- * 2. Written by CPU
- * (CPU, CPU)
- * 3. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- * 4. Updated (written) by CPU again
- * (CPU, CPU)
- * flush_domains = 0 (no previous write domain)
- * invalidate_domains = 0 (no new read domains)
- * 5. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- */
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring,
- struct change_domains *cd)
-{
- uint32_t invalidate_domains = 0, flush_domains = 0;
-
- /*
- * If the object isn't moving to a new write domain,
- * let the object stay in multiple read domains
- */
- if (obj->base.pending_write_domain == 0)
- obj->base.pending_read_domains |= obj->base.read_domains;
-
- /*
- * Flush the current write domain if
- * the new read domains don't match. Invalidate
- * any read domains which differ from the old
- * write domain
- */
- if (obj->base.write_domain &&
- (((obj->base.write_domain != obj->base.pending_read_domains ||
- obj->ring != ring)) ||
- (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
- flush_domains |= obj->base.write_domain;
- invalidate_domains |=
- obj->base.pending_read_domains & ~obj->base.write_domain;
- }
- /*
- * Invalidate any read caches which may have
- * stale data. That is, any new read domains.
- */
- invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
- i915_gem_clflush_object(obj);
-
- if (obj->base.pending_write_domain)
- cd->flips |= atomic_read(&obj->pending_flip);
-
- /* The actual obj->write_domain will be updated with
- * pending_write_domain after we emit the accumulated flush for all
- * of our domain changes in execbuffers (which clears objects'
- * write_domains). So if we have a current write domain that we
- * aren't changing, set pending_write_domain to that.
- */
- if (flush_domains == 0 && obj->base.pending_write_domain == 0)
- obj->base.pending_write_domain = obj->base.write_domain;
-
- cd->invalidate_domains |= invalidate_domains;
- cd->flush_domains |= flush_domains;
- if (flush_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= intel_ring_flag(obj->ring);
- if (invalidate_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= intel_ring_flag(ring);
-}
-
struct eb_objects {
int and;
struct hlist_head buckets[0];
@@ -217,6 +43,7 @@ eb_create(int size)
{
struct eb_objects *eb;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
while (count > size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
@@ -268,6 +95,7 @@ eb_destroy(struct eb_objects *eb)
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+ !obj->map_and_fenceable ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -382,7 +210,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ reloc->offset >> PAGE_SHIFT));
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
kunmap_atomic(vaddr);
} else {
@@ -503,7 +332,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
return ret;
}
-#define __EXEC_OBJECT_HAS_FENCE (1<<31)
+#define __EXEC_OBJECT_HAS_PIN (1<<31)
+#define __EXEC_OBJECT_HAS_FENCE (1<<30)
static int
need_reloc_mappable(struct drm_i915_gem_object *obj)
@@ -513,9 +343,10 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
}
static int
-pin_and_fence_object(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
@@ -527,15 +358,17 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
- ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
+ ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
if (ret)
return ret;
+ entry->flags |= __EXEC_OBJECT_HAS_PIN;
+
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
ret = i915_gem_object_get_fence(obj);
if (ret)
- goto err_unpin;
+ return ret;
if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
@@ -544,12 +377,35 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
}
}
+ /* Ensure ppgtt mapping exists if needed */
+ if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
+ i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+ obj, obj->cache_level);
+
+ obj->has_aliasing_ppgtt_mapping = 1;
+ }
+
entry->offset = obj->gtt_offset;
return 0;
+}
-err_unpin:
- i915_gem_object_unpin(obj);
- return ret;
+static void
+i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_gem_exec_object2 *entry;
+
+ if (!obj->gtt_space)
+ return;
+
+ entry = obj->exec_entry;
+
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+ i915_gem_object_unpin_fence(obj);
+
+ if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+ i915_gem_object_unpin(obj);
+
+ entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
static int
@@ -557,11 +413,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
- int ret, retry;
- bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
struct list_head ordered_objects;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+ int retry;
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
@@ -586,6 +441,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
+ obj->pending_fenced_gpu_access = false;
}
list_splice(&ordered_objects, objects);
@@ -598,12 +454,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
* 2. Bind new objects.
* 3. Decrement pin count.
*
- * This avoid unnecessary unbinding of later objects in order to makr
+ * This avoid unnecessary unbinding of later objects in order to make
* room for the earlier objects *unless* we need to defragment.
*/
retry = 0;
do {
- ret = 0;
+ int ret = 0;
/* Unbind any ill-fitting objects or pin. */
list_for_each_entry(obj, objects, exec_list) {
@@ -623,7 +479,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
- ret = pin_and_fence_object(obj, ring);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring);
if (ret)
goto err;
}
@@ -633,77 +489,22 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (obj->gtt_space)
continue;
- ret = pin_and_fence_object(obj, ring);
- if (ret) {
- int ret_ignore;
-
- /* This can potentially raise a harmless
- * -EINVAL if we failed to bind in the above
- * call. It cannot raise -EINTR since we know
- * that the bo is freshly bound and so will
- * not need to be flushed or waited upon.
- */
- ret_ignore = i915_gem_object_unbind(obj);
- (void)ret_ignore;
- WARN_ON(obj->gtt_space);
- break;
- }
+ ret = i915_gem_execbuffer_reserve_object(obj, ring);
+ if (ret)
+ goto err;
}
- /* Decrement pin count for bound objects */
- list_for_each_entry(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry;
-
- if (!obj->gtt_space)
- continue;
-
- entry = obj->exec_entry;
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- i915_gem_object_unpin_fence(obj);
- entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
- }
-
- i915_gem_object_unpin(obj);
-
- /* ... and ensure ppgtt mapping exist if needed. */
- if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
- i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
- obj, obj->cache_level);
+err: /* Decrement pin count for bound objects */
+ list_for_each_entry(obj, objects, exec_list)
+ i915_gem_execbuffer_unreserve_object(obj);
- obj->has_aliasing_ppgtt_mapping = 1;
- }
- }
-
- if (ret != -ENOSPC || retry > 1)
+ if (ret != -ENOSPC || retry++)
return ret;
- /* First attempt, just clear anything that is purgeable.
- * Second attempt, clear the entire GTT.
- */
- ret = i915_gem_evict_everything(ring->dev, retry == 0);
+ ret = i915_gem_evict_everything(ring->dev);
if (ret)
return ret;
-
- retry++;
} while (1);
-
-err:
- list_for_each_entry_continue_reverse(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry;
-
- if (!obj->gtt_space)
- continue;
-
- entry = obj->exec_entry;
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- i915_gem_object_unpin_fence(obj);
- entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
- }
-
- i915_gem_object_unpin(obj);
- }
-
- return ret;
}
static int
@@ -809,18 +610,6 @@ err:
return ret;
}
-static void
-i915_gem_execbuffer_flush(struct drm_device *dev,
- uint32_t invalidate_domains,
- uint32_t flush_domains)
-{
- if (flush_domains & I915_GEM_DOMAIN_CPU)
- intel_gtt_chipset_flush();
-
- if (flush_domains & I915_GEM_DOMAIN_GTT)
- wmb();
-}
-
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
@@ -853,48 +642,45 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
return 0;
}
-
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
- struct change_domains cd;
+ uint32_t flush_domains = 0;
+ uint32_t flips = 0;
int ret;
- memset(&cd, 0, sizeof(cd));
- list_for_each_entry(obj, objects, exec_list)
- i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
-
- if (cd.invalidate_domains | cd.flush_domains) {
- i915_gem_execbuffer_flush(ring->dev,
- cd.invalidate_domains,
- cd.flush_domains);
- }
-
- if (cd.flips) {
- ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
+ list_for_each_entry(obj, objects, exec_list) {
+ ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
+
+ if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+ i915_gem_clflush_object(obj);
+
+ if (obj->base.pending_write_domain)
+ flips |= atomic_read(&obj->pending_flip);
+
+ flush_domains |= obj->base.write_domain;
}
- list_for_each_entry(obj, objects, exec_list) {
- ret = i915_gem_object_sync(obj, ring);
+ if (flips) {
+ ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
if (ret)
return ret;
}
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ intel_gtt_chipset_flush();
+
+ if (flush_domains & I915_GEM_DOMAIN_GTT)
+ wmb();
+
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- ret = i915_gem_flush_ring(ring,
- I915_GEM_GPU_DOMAINS,
- ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
- if (ret)
- return ret;
-
- ring->gpu_caches_dirty = false;
- return 0;
+ return intel_ring_invalidate_all_caches(ring);
}
static bool
@@ -942,9 +728,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, objects, exec_list) {
- u32 old_read = obj->base.read_domains;
- u32 old_write = obj->base.write_domain;
-
+ u32 old_read = obj->base.read_domains;
+ u32 old_write = obj->base.write_domain;
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
@@ -953,17 +738,13 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
i915_gem_object_move_to_active(obj, ring, seqno);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->pending_gpu_write = true;
- list_move_tail(&obj->gpu_write_list,
- &ring->gpu_write_list);
+ obj->last_write_seqno = seqno;
if (obj->pin_count) /* check for potential scanout */
- intel_mark_busy(ring->dev, obj);
+ intel_mark_fb_busy(obj);
}
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
-
- intel_mark_busy(ring->dev, NULL);
}
static void
@@ -971,16 +752,11 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring)
{
- struct drm_i915_gem_request *request;
-
/* Unconditionally force add_request to emit a full flush. */
ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL || i915_add_request(ring, file, request)) {
- kfree(request);
- }
+ (void)i915_add_request(ring, file, NULL);
}
static int
@@ -1326,8 +1102,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -ENOMEM;
}
ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ (void __user *)(uintptr_t)args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1366,8 +1141,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
for (i = 0; i < args->buffer_count; i++)
exec_list[i].offset = exec2_list[i].offset;
/* ... and back out to userspace */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
exec_list,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
@@ -1421,8 +1195,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
exec2_list,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 69261acb94b3..df470b5e8d36 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -166,8 +166,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
}
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
- struct scatterlist *sg_list,
- unsigned sg_len,
+ const struct sg_table *pages,
unsigned first_entry,
uint32_t pte_flags)
{
@@ -179,12 +178,12 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
struct scatterlist *sg;
/* init sg walking */
- sg = sg_list;
+ sg = pages->sgl;
i = 0;
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
- while (i < sg_len) {
+ while (i < pages->nents) {
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
@@ -193,13 +192,11 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
pt_vaddr[j] = pte | pte_flags;
/* grab the next page */
- m++;
- if (m == segment_len) {
- sg = sg_next(sg);
- i++;
- if (i == sg_len)
+ if (++m == segment_len) {
+ if (++i == pages->nents)
break;
+ sg = sg_next(sg);
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
}
@@ -212,44 +209,10 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
}
}
-static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
- unsigned first_entry, unsigned num_entries,
- struct page **pages, uint32_t pte_flags)
-{
- uint32_t *pt_vaddr, pte;
- unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
- unsigned last_pte, i;
- dma_addr_t page_addr;
-
- while (num_entries) {
- last_pte = first_pte + num_entries;
- last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
-
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
- for (i = first_pte; i < last_pte; i++) {
- page_addr = page_to_phys(*pages);
- pte = GEN6_PTE_ADDR_ENCODE(page_addr);
- pt_vaddr[i] = pte | pte_flags;
-
- pages++;
- }
-
- kunmap_atomic(pt_vaddr);
-
- num_entries -= last_pte - first_pte;
- first_pte = 0;
- act_pd++;
- }
-}
-
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t pte_flags = GEN6_PTE_VALID;
switch (cache_level) {
@@ -260,7 +223,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
pte_flags |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(obj->base.dev))
pte_flags |= HSW_PTE_UNCACHED;
else
pte_flags |= GEN6_PTE_UNCACHED;
@@ -269,26 +232,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
BUG();
}
- if (obj->sg_table) {
- i915_ppgtt_insert_sg_entries(ppgtt,
- obj->sg_table->sgl,
- obj->sg_table->nents,
- obj->gtt_space->start >> PAGE_SHIFT,
- pte_flags);
- } else if (dev_priv->mm.gtt->needs_dmar) {
- BUG_ON(!obj->sg_list);
-
- i915_ppgtt_insert_sg_entries(ppgtt,
- obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- pte_flags);
- } else
- i915_ppgtt_insert_pages(ppgtt,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- pte_flags);
+ i915_ppgtt_insert_sg_entries(ppgtt,
+ obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ pte_flags);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -350,7 +297,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
i915_gem_clflush_object(obj);
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
@@ -360,44 +307,26 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* don't map imported dma buf objects */
- if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
- return intel_gtt_map_memory(obj->pages,
- obj->base.size >> PAGE_SHIFT,
- &obj->sg_list,
- &obj->num_sg);
- else
+ if (obj->has_dma_mapping)
return 0;
+
+ if (!dma_map_sg(&obj->base.dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return -ENOSPC;
+
+ return 0;
}
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
- if (obj->sg_table) {
- intel_gtt_insert_sg_entries(obj->sg_table->sgl,
- obj->sg_table->nents,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else if (dev_priv->mm.gtt->needs_dmar) {
- BUG_ON(!obj->sg_list);
-
- intel_gtt_insert_sg_entries(obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else
- intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- agp_type);
-
+ intel_gtt_insert_sg_entries(obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ agp_type);
obj->has_global_gtt_mapping = 1;
}
@@ -417,14 +346,31 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
interruptible = do_idling(dev_priv);
- if (obj->sg_list) {
- intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
- obj->sg_list = NULL;
- }
+ if (!obj->has_dma_mapping)
+ dma_unmap_sg(&dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL);
undo_idling(dev_priv, interruptible);
}
+static void i915_gtt_color_adjust(struct drm_mm_node *node,
+ unsigned long color,
+ unsigned long *start,
+ unsigned long *end)
+{
+ if (node->color != color)
+ *start += 4096;
+
+ if (!list_empty(&node->node_list)) {
+ node = list_entry(node->node_list.next,
+ struct drm_mm_node,
+ node_list);
+ if (node->allocated && node->color != color)
+ *end -= 4096;
+ }
+}
+
void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
@@ -434,6 +380,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
/* Substract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+ if (!HAS_LLC(dev))
+ dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
dev_priv->mm.gtt_start = start;
dev_priv->mm.gtt_mappable_end = mappable_end;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index c2b7b67e410d..3208650a235c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -469,18 +469,20 @@ i915_gem_swizzle_page(struct page *page)
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
+ struct scatterlist *sg;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (obj->bit_17 == NULL)
return;
- for (i = 0; i < page_count; i++) {
- char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
+ for_each_sg(obj->pages->sgl, sg, page_count, i) {
+ struct page *page = sg_page(sg);
+ char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
- i915_gem_swizzle_page(obj->pages[i]);
- set_page_dirty(obj->pages[i]);
+ i915_gem_swizzle_page(page);
+ set_page_dirty(page);
}
}
}
@@ -488,6 +490,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
+ struct scatterlist *sg;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
@@ -501,8 +504,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
}
}
- for (i = 0; i < page_count; i++) {
- if (page_to_phys(obj->pages[i]) & (1 << 17))
+ for_each_sg(obj->pages->sgl, sg, page_count, i) {
+ struct page *page = sg_page(sg);
+ if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 505357886bbb..4e9888388c0c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -295,11 +295,21 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(dev);
}
-static void i915_handle_rps_change(struct drm_device *dev)
+/* defined intel_pm.c */
+extern spinlock_t mchdev_lock;
+
+static void ironlake_handle_rps_change(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
- u8 new_delay = dev_priv->cur_delay;
+ u8 new_delay;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mchdev_lock, flags);
+
+ I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+
+ new_delay = dev_priv->ips.cur_delay;
I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
busy_up = I915_READ(RCPREVBSYTUPAVG);
@@ -309,19 +319,21 @@ static void i915_handle_rps_change(struct drm_device *dev)
/* Handle RCS change request from hw */
if (busy_up > max_avg) {
- if (dev_priv->cur_delay != dev_priv->max_delay)
- new_delay = dev_priv->cur_delay - 1;
- if (new_delay < dev_priv->max_delay)
- new_delay = dev_priv->max_delay;
+ if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
+ new_delay = dev_priv->ips.cur_delay - 1;
+ if (new_delay < dev_priv->ips.max_delay)
+ new_delay = dev_priv->ips.max_delay;
} else if (busy_down < min_avg) {
- if (dev_priv->cur_delay != dev_priv->min_delay)
- new_delay = dev_priv->cur_delay + 1;
- if (new_delay > dev_priv->min_delay)
- new_delay = dev_priv->min_delay;
+ if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
+ new_delay = dev_priv->ips.cur_delay + 1;
+ if (new_delay > dev_priv->ips.min_delay)
+ new_delay = dev_priv->ips.min_delay;
}
if (ironlake_set_drps(dev, new_delay))
- dev_priv->cur_delay = new_delay;
+ dev_priv->ips.cur_delay = new_delay;
+
+ spin_unlock_irqrestore(&mchdev_lock, flags);
return;
}
@@ -334,7 +346,7 @@ static void notify_ring(struct drm_device *dev,
if (ring->obj == NULL)
return;
- trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
+ trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) {
@@ -348,16 +360,16 @@ static void notify_ring(struct drm_device *dev,
static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- rps_work);
+ rps.work);
u32 pm_iir, pm_imr;
u8 new_delay;
- spin_lock_irq(&dev_priv->rps_lock);
- pm_iir = dev_priv->pm_iir;
- dev_priv->pm_iir = 0;
+ spin_lock_irq(&dev_priv->rps.lock);
+ pm_iir = dev_priv->rps.pm_iir;
+ dev_priv->rps.pm_iir = 0;
pm_imr = I915_READ(GEN6_PMIMR);
I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps_lock);
+ spin_unlock_irq(&dev_priv->rps.lock);
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
@@ -365,11 +377,17 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_lock(&dev_priv->dev->struct_mutex);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
- new_delay = dev_priv->cur_delay + 1;
+ new_delay = dev_priv->rps.cur_delay + 1;
else
- new_delay = dev_priv->cur_delay - 1;
+ new_delay = dev_priv->rps.cur_delay - 1;
- gen6_set_rps(dev_priv->dev, new_delay);
+ /* sysfs frequency interfaces may have snuck in while servicing the
+ * interrupt
+ */
+ if (!(new_delay > dev_priv->rps.max_delay ||
+ new_delay < dev_priv->rps.min_delay)) {
+ gen6_set_rps(dev_priv->dev, new_delay);
+ }
mutex_unlock(&dev_priv->dev->struct_mutex);
}
@@ -443,7 +461,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long flags;
- if (!IS_IVYBRIDGE(dev))
+ if (!HAS_L3_GPU_CACHE(dev))
return;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -487,19 +505,19 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
* IIR bits should never already be set because IMR should
* prevent an interrupt from being shown in IIR. The warning
* displays a case where we've unsafely cleared
- * dev_priv->pm_iir. Although missing an interrupt of the same
+ * dev_priv->rps.pm_iir. Although missing an interrupt of the same
* type is not a problem, it displays a problem in the logic.
*
- * The mask bit in IMR is cleared by rps_work.
+ * The mask bit in IMR is cleared by dev_priv->rps.work.
*/
- spin_lock_irqsave(&dev_priv->rps_lock, flags);
- dev_priv->pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+ spin_lock_irqsave(&dev_priv->rps.lock, flags);
+ dev_priv->rps.pm_iir |= pm_iir;
+ I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
POSTING_READ(GEN6_PMIMR);
- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+ spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
- queue_work(dev_priv->wq, &dev_priv->rps_work);
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
}
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
@@ -792,10 +810,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
ibx_irq_handler(dev, pch_iir);
}
- if (de_iir & DE_PCU_EVENT) {
- I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
- i915_handle_rps_change(dev);
- }
+ if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
+ ironlake_handle_rps_change(dev);
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
@@ -842,26 +858,55 @@ static void i915_error_work_func(struct work_struct *work)
}
}
+/* NB: please notice the memset */
+static void i915_get_extra_instdone(struct drm_device *dev,
+ uint32_t *instdone)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+ switch(INTEL_INFO(dev)->gen) {
+ case 2:
+ case 3:
+ instdone[0] = I915_READ(INSTDONE);
+ break;
+ case 4:
+ case 5:
+ case 6:
+ instdone[0] = I915_READ(INSTDONE_I965);
+ instdone[1] = I915_READ(INSTDONE1);
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported platform\n");
+ case 7:
+ instdone[0] = I915_READ(GEN7_INSTDONE_1);
+ instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+ instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+ instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+ break;
+ }
+}
+
#ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object *
i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src)
{
struct drm_i915_error_object *dst;
- int page, page_count;
+ int i, count;
u32 reloc_offset;
if (src == NULL || src->pages == NULL)
return NULL;
- page_count = src->base.size / PAGE_SIZE;
+ count = src->base.size / PAGE_SIZE;
- dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC);
+ dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
reloc_offset = src->gtt_offset;
- for (page = 0; page < page_count; page++) {
+ for (i = 0; i < count; i++) {
unsigned long flags;
void *d;
@@ -884,30 +929,33 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
} else {
+ struct page *page;
void *s;
- drm_clflush_pages(&src->pages[page], 1);
+ page = i915_gem_object_get_page(src, i);
+
+ drm_clflush_pages(&page, 1);
- s = kmap_atomic(src->pages[page]);
+ s = kmap_atomic(page);
memcpy(d, s, PAGE_SIZE);
kunmap_atomic(s);
- drm_clflush_pages(&src->pages[page], 1);
+ drm_clflush_pages(&page, 1);
}
local_irq_restore(flags);
- dst->pages[page] = d;
+ dst->pages[i] = d;
reloc_offset += PAGE_SIZE;
}
- dst->page_count = page_count;
+ dst->page_count = count;
dst->gtt_offset = src->gtt_offset;
return dst;
unwind:
- while (page--)
- kfree(dst->pages[page]);
+ while (i--)
+ kfree(dst->pages[i]);
kfree(dst);
return NULL;
}
@@ -948,7 +996,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
{
err->size = obj->base.size;
err->name = obj->base.name;
- err->seqno = obj->last_rendering_seqno;
+ err->rseqno = obj->last_read_seqno;
+ err->wseqno = obj->last_write_seqno;
err->gtt_offset = obj->gtt_offset;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
@@ -1038,12 +1087,12 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (!ring->get_seqno)
return NULL;
- seqno = ring->get_seqno(ring);
+ seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->ring != ring)
continue;
- if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
+ if (i915_seqno_passed(seqno, obj->last_read_seqno))
continue;
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
@@ -1079,10 +1128,8 @@ static void i915_record_ring_state(struct drm_device *dev,
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
- if (ring->id == RCS) {
- error->instdone1 = I915_READ(INSTDONE1);
+ if (ring->id == RCS)
error->bbaddr = I915_READ64(BB_ADDR);
- }
} else {
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
error->ipeir[ring->id] = I915_READ(IPEIR);
@@ -1092,7 +1139,7 @@ static void i915_record_ring_state(struct drm_device *dev,
error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
- error->seqno[ring->id] = ring->get_seqno(ring);
+ error->seqno[ring->id] = ring->get_seqno(ring, false);
error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring);
@@ -1198,6 +1245,11 @@ static void i915_capture_error_state(struct drm_device *dev)
error->done_reg = I915_READ(DONE_REG);
}
+ if (INTEL_INFO(dev)->gen == 7)
+ error->err_int = I915_READ(GEN7_ERR_INT);
+
+ i915_get_extra_instdone(dev, error->extra_instdone);
+
i915_gem_record_fences(dev, error);
i915_gem_record_rings(dev, error);
@@ -1209,7 +1261,7 @@ static void i915_capture_error_state(struct drm_device *dev)
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
i++;
error->active_bo_count = i;
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
if (obj->pin_count)
i++;
error->pinned_bo_count = i - error->active_bo_count;
@@ -1234,7 +1286,7 @@ static void i915_capture_error_state(struct drm_device *dev)
error->pinned_bo_count =
capture_pinned_bo(error->pinned_bo,
error->pinned_bo_count,
- &dev_priv->mm.gtt_list);
+ &dev_priv->mm.bound_list);
do_gettimeofday(&error->time);
@@ -1273,24 +1325,26 @@ void i915_destroy_error_state(struct drm_device *dev)
static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t instdone[I915_NUM_INSTDONE_REG];
u32 eir = I915_READ(EIR);
- int pipe;
+ int pipe, i;
if (!eir)
return;
pr_err("render error detected, EIR: 0x%08x\n", eir);
+ i915_get_extra_instdone(dev, instdone);
+
if (IS_G4X(dev)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
u32 ipeir = I915_READ(IPEIR_I965);
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- pr_err(" INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE_I965));
+ for (i = 0; i < ARRAY_SIZE(instdone); i++)
+ pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
@@ -1324,12 +1378,13 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
if (eir & I915_ERROR_INSTRUCTION) {
pr_err("instruction error\n");
pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
+ for (i = 0; i < ARRAY_SIZE(instdone); i++)
+ pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
if (INTEL_INFO(dev)->gen < 4) {
u32 ipeir = I915_READ(IPEIR);
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
- pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
I915_WRITE(IPEIR, ipeir);
POSTING_READ(IPEIR);
@@ -1338,10 +1393,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- pr_err(" INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE_I965));
pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
@@ -1589,7 +1641,8 @@ ring_last_seqno(struct intel_ring_buffer *ring)
static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
{
if (list_empty(&ring->request_list) ||
- i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
+ i915_seqno_passed(ring->get_seqno(ring, false),
+ ring_last_seqno(ring))) {
/* Issue a wake-up to catch stuck h/w. */
if (waitqueue_active(&ring->irq_queue)) {
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
@@ -1655,7 +1708,7 @@ void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
+ uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
struct intel_ring_buffer *ring;
bool err = false, idle;
int i;
@@ -1683,25 +1736,16 @@ void i915_hangcheck_elapsed(unsigned long data)
return;
}
- if (INTEL_INFO(dev)->gen < 4) {
- instdone = I915_READ(INSTDONE);
- instdone1 = 0;
- } else {
- instdone = I915_READ(INSTDONE_I965);
- instdone1 = I915_READ(INSTDONE1);
- }
-
+ i915_get_extra_instdone(dev, instdone);
if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
- dev_priv->last_instdone == instdone &&
- dev_priv->last_instdone1 == instdone1) {
+ memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
if (i915_hangcheck_hung(dev))
return;
} else {
dev_priv->hangcheck_count = 0;
memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
- dev_priv->last_instdone = instdone;
- dev_priv->last_instdone1 = instdone1;
+ memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
}
repeat:
@@ -2646,7 +2690,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
- INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+ INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 28725ce5b82c..7637824c6a7d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -450,6 +450,7 @@
#define RING_ACTHD(base) ((base)+0x74)
#define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
+#define RING_TIMESTAMP(base) ((base)+0x358)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -478,6 +479,11 @@
#define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068
#define INSTDONE_I965 0x0206c
+#define GEN7_INSTDONE_1 0x0206c
+#define GEN7_SC_INSTDONE 0x07100
+#define GEN7_SAMPLER_INSTDONE 0x0e160
+#define GEN7_ROW_INSTDONE 0x0e164
+#define I915_NUM_INSTDONE_REG 4
#define RING_IPEIR(base) ((base)+0x64)
#define RING_IPEHR(base) ((base)+0x68)
#define RING_INSTDONE(base) ((base)+0x6c)
@@ -500,6 +506,8 @@
#define DMA_FADD_I8XX 0x020d0
#define ERROR_GEN6 0x040a0
+#define GEN7_ERR_INT 0x44040
+#define ERR_INT_MMIO_UNCLAIMED (1<<13)
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
@@ -529,6 +537,8 @@
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
+#define VLV_DISPLAY_BASE 0x180000
+
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
@@ -1496,6 +1506,14 @@
GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
GEN7_CXT_GT1_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f)
+#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7)
+#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff)
+#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \
+ HSW_CXT_RING_SIZE(ctx_reg) + \
+ HSW_CXT_RENDER_SIZE(ctx_reg) + \
+ GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+
/*
* Overlay regs
@@ -1549,12 +1567,35 @@
/* VGA port control */
#define ADPA 0x61100
+#define PCH_ADPA 0xe1100
+#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA)
+
#define ADPA_DAC_ENABLE (1<<31)
#define ADPA_DAC_DISABLE 0
#define ADPA_PIPE_SELECT_MASK (1<<30)
#define ADPA_PIPE_A_SELECT 0
#define ADPA_PIPE_B_SELECT (1<<30)
#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
+/* CPT uses bits 29:30 for pch transcoder select */
+#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
+#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
#define ADPA_SETS_HVPOLARITY 0
#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -1753,6 +1794,10 @@
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
+/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+ * of the infoframe structure specified by CEA-861. */
+#define VIDEO_DIP_DATA_SIZE 32
#define VIDEO_DIP_CTL 0x61170
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
@@ -3889,31 +3934,6 @@
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
-/* CRT */
-#define PCH_ADPA 0xe1100
-#define ADPA_TRANS_SELECT_MASK (1<<30)
-#define ADPA_TRANS_A_SELECT 0
-#define ADPA_TRANS_B_SELECT (1<<30)
-#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
-#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
-#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
-#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
-#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
-#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
-#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
-#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
-#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
-#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
-#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
-#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
@@ -4021,6 +4041,8 @@
#define PORT_TRANS_C_SEL_CPT (2<<29)
#define PORT_TRANS_SEL_MASK (3<<29)
#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
+#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
+#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
@@ -4239,7 +4261,15 @@
#define G4X_HDMIW_HDMIEDID 0x6210C
#define IBX_HDMIW_HDMIEDID_A 0xE2050
+#define IBX_HDMIW_HDMIEDID_B 0xE2150
+#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+ IBX_HDMIW_HDMIEDID_A, \
+ IBX_HDMIW_HDMIEDID_B)
#define IBX_AUD_CNTL_ST_A 0xE20B4
+#define IBX_AUD_CNTL_ST_B 0xE21B4
+#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+ IBX_AUD_CNTL_ST_A, \
+ IBX_AUD_CNTL_ST_B)
#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
#define IBX_ELD_ADDRESS (0x1f << 5)
#define IBX_ELD_ACK (1 << 4)
@@ -4248,7 +4278,15 @@
#define IBX_CP_READYB (1 << 1)
#define CPT_HDMIW_HDMIEDID_A 0xE5050
+#define CPT_HDMIW_HDMIEDID_B 0xE5150
+#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+ CPT_HDMIW_HDMIEDID_A, \
+ CPT_HDMIW_HDMIEDID_B)
#define CPT_AUD_CNTL_ST_A 0xE50B4
+#define CPT_AUD_CNTL_ST_B 0xE51B4
+#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+ CPT_AUD_CNTL_ST_A, \
+ CPT_AUD_CNTL_ST_B)
#define CPT_AUD_CNTRL_ST2 0xE50C0
/* These are the 4 32-bit write offset registers for each stream
@@ -4258,7 +4296,15 @@
#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
#define IBX_AUD_CONFIG_A 0xe2000
+#define IBX_AUD_CONFIG_B 0xe2100
+#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
+ IBX_AUD_CONFIG_A, \
+ IBX_AUD_CONFIG_B)
#define CPT_AUD_CONFIG_A 0xe5000
+#define CPT_AUD_CONFIG_B 0xe5100
+#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
+ CPT_AUD_CONFIG_A, \
+ CPT_AUD_CONFIG_B)
#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
#define AUD_CONFIG_UPPER_N_SHIFT 20
@@ -4269,195 +4315,233 @@
#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+/* HSW Audio */
+#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */
+#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */
+#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
+ HSW_AUD_CONFIG_A, \
+ HSW_AUD_CONFIG_B)
+
+#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */
+#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */
+#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
+ HSW_AUD_MISC_CTRL_A, \
+ HSW_AUD_MISC_CTRL_B)
+
+#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */
+#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */
+#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
+ HSW_AUD_DIP_ELD_CTRL_ST_A, \
+ HSW_AUD_DIP_ELD_CTRL_ST_B)
+
+/* Audio Digital Converter */
+#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */
+#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */
+#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
+ HSW_AUD_DIG_CNVT_1, \
+ HSW_AUD_DIG_CNVT_2)
+#define DIP_PORT_SEL_MASK 0x3
+
+#define HSW_AUD_EDID_DATA_A 0x65050
+#define HSW_AUD_EDID_DATA_B 0x65150
+#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
+ HSW_AUD_EDID_DATA_A, \
+ HSW_AUD_EDID_DATA_B)
+
+#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */
+#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */
+#define AUDIO_INACTIVE_C (1<<11)
+#define AUDIO_INACTIVE_B (1<<7)
+#define AUDIO_INACTIVE_A (1<<3)
+#define AUDIO_OUTPUT_ENABLE_A (1<<2)
+#define AUDIO_OUTPUT_ENABLE_B (1<<6)
+#define AUDIO_OUTPUT_ENABLE_C (1<<10)
+#define AUDIO_ELD_VALID_A (1<<0)
+#define AUDIO_ELD_VALID_B (1<<4)
+#define AUDIO_ELD_VALID_C (1<<8)
+#define AUDIO_CP_READY_A (1<<1)
+#define AUDIO_CP_READY_B (1<<5)
+#define AUDIO_CP_READY_C (1<<9)
+
/* HSW Power Wells */
-#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
-#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
-#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
-#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
-#define HSW_PWR_WELL_ENABLE (1<<31)
-#define HSW_PWR_WELL_STATE (1<<30)
-#define HSW_PWR_WELL_CTL5 0x45410
+#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
+#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
+#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
+#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
+#define HSW_PWR_WELL_ENABLE (1<<31)
+#define HSW_PWR_WELL_STATE (1<<30)
+#define HSW_PWR_WELL_CTL5 0x45410
#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
-#define HSW_PWR_WELL_FORCE_ON (1<<19)
-#define HSW_PWR_WELL_CTL6 0x45414
+#define HSW_PWR_WELL_FORCE_ON (1<<19)
+#define HSW_PWR_WELL_CTL6 0x45414
/* Per-pipe DDI Function Control */
-#define PIPE_DDI_FUNC_CTL_A 0x60400
-#define PIPE_DDI_FUNC_CTL_B 0x61400
-#define PIPE_DDI_FUNC_CTL_C 0x62400
+#define PIPE_DDI_FUNC_CTL_A 0x60400
+#define PIPE_DDI_FUNC_CTL_B 0x61400
+#define PIPE_DDI_FUNC_CTL_C 0x62400
#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
-#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \
- PIPE_DDI_FUNC_CTL_A, \
- PIPE_DDI_FUNC_CTL_B)
+#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
+ PIPE_DDI_FUNC_CTL_B)
#define PIPE_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define PIPE_DDI_PORT_MASK (7<<28)
-#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
-#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
-#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
+#define PIPE_DDI_PORT_MASK (7<<28)
+#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
+#define PIPE_DDI_MODE_SELECT_MASK (7<<24)
+#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
+#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
-#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
-#define PIPE_DDI_BPC_8 (0<<20)
-#define PIPE_DDI_BPC_10 (1<<20)
-#define PIPE_DDI_BPC_6 (2<<20)
-#define PIPE_DDI_BPC_12 (3<<20)
-#define PIPE_DDI_BFI_ENABLE (1<<4)
-#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
-#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
-#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
+#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
+#define PIPE_DDI_BPC_MASK (7<<20)
+#define PIPE_DDI_BPC_8 (0<<20)
+#define PIPE_DDI_BPC_10 (1<<20)
+#define PIPE_DDI_BPC_6 (2<<20)
+#define PIPE_DDI_BPC_12 (3<<20)
+#define PIPE_DDI_PVSYNC (1<<17)
+#define PIPE_DDI_PHSYNC (1<<16)
+#define PIPE_DDI_BFI_ENABLE (1<<4)
+#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
+#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
+#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
/* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040
#define DP_TP_CTL_B 0x64140
-#define DP_TP_CTL(port) _PORT(port, \
- DP_TP_CTL_A, \
- DP_TP_CTL_B)
-#define DP_TP_CTL_ENABLE (1<<31)
-#define DP_TP_CTL_MODE_SST (0<<27)
-#define DP_TP_CTL_MODE_MST (1<<27)
+#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
+#define DP_TP_CTL_ENABLE (1<<31)
+#define DP_TP_CTL_MODE_SST (0<<27)
+#define DP_TP_CTL_MODE_MST (1<<27)
#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
-#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
+#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
-#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
/* DisplayPort Transport Status */
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
-#define DP_TP_STATUS(port) _PORT(port, \
- DP_TP_STATUS_A, \
- DP_TP_STATUS_B)
+#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
/* DDI Buffer Control */
#define DDI_BUF_CTL_A 0x64000
#define DDI_BUF_CTL_B 0x64100
-#define DDI_BUF_CTL(port) _PORT(port, \
- DDI_BUF_CTL_A, \
- DDI_BUF_CTL_B)
-#define DDI_BUF_CTL_ENABLE (1<<31)
+#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
+#define DDI_BUF_CTL_ENABLE (1<<31)
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
-#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
+#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
-#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
+#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
-#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
+#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
-#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
-#define DDI_BUF_EMP_MASK (0xf<<24)
-#define DDI_BUF_IS_IDLE (1<<7)
-#define DDI_PORT_WIDTH_X1 (0<<1)
-#define DDI_PORT_WIDTH_X2 (1<<1)
-#define DDI_PORT_WIDTH_X4 (3<<1)
+#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
+#define DDI_BUF_EMP_MASK (0xf<<24)
+#define DDI_BUF_IS_IDLE (1<<7)
+#define DDI_PORT_WIDTH_X1 (0<<1)
+#define DDI_PORT_WIDTH_X2 (1<<1)
+#define DDI_PORT_WIDTH_X4 (3<<1)
#define DDI_INIT_DISPLAY_DETECTED (1<<0)
/* DDI Buffer Translations */
#define DDI_BUF_TRANS_A 0x64E00
#define DDI_BUF_TRANS_B 0x64E60
-#define DDI_BUF_TRANS(port) _PORT(port, \
- DDI_BUF_TRANS_A, \
- DDI_BUF_TRANS_B)
+#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
/* Sideband Interface (SBI) is programmed indirectly, via
* SBI_ADDR, which contains the register offset; and SBI_DATA,
* which contains the payload */
-#define SBI_ADDR 0xC6000
-#define SBI_DATA 0xC6004
+#define SBI_ADDR 0xC6000
+#define SBI_DATA 0xC6004
#define SBI_CTL_STAT 0xC6008
#define SBI_CTL_OP_CRRD (0x6<<8)
#define SBI_CTL_OP_CRWR (0x7<<8)
#define SBI_RESPONSE_FAIL (0x1<<1)
-#define SBI_RESPONSE_SUCCESS (0x0<<1)
-#define SBI_BUSY (0x1<<0)
-#define SBI_READY (0x0<<0)
+#define SBI_RESPONSE_SUCCESS (0x0<<1)
+#define SBI_BUSY (0x1<<0)
+#define SBI_READY (0x0<<0)
/* SBI offsets */
-#define SBI_SSCDIVINTPHASE6 0x0600
+#define SBI_SSCDIVINTPHASE6 0x0600
#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
-#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
-#define SBI_SSCCTL 0x020c
+#define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C
-#define SBI_SSCCTL_DISABLE (1<<0)
+#define SBI_SSCCTL_DISABLE (1<<0)
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
-#define SBI_DBUFF0 0x2a00
+#define SBI_DBUFF0 0x2a00
/* LPT PIXCLK_GATE */
-#define PIXCLK_GATE 0xC6020
-#define PIXCLK_GATE_UNGATE 1<<0
-#define PIXCLK_GATE_GATE 0<<0
+#define PIXCLK_GATE 0xC6020
+#define PIXCLK_GATE_UNGATE (1<<0)
+#define PIXCLK_GATE_GATE (0<<0)
/* SPLL */
-#define SPLL_CTL 0x46020
+#define SPLL_CTL 0x46020
#define SPLL_PLL_ENABLE (1<<31)
#define SPLL_PLL_SCC (1<<28)
#define SPLL_PLL_NON_SCC (2<<28)
-#define SPLL_PLL_FREQ_810MHz (0<<26)
-#define SPLL_PLL_FREQ_1350MHz (1<<26)
+#define SPLL_PLL_FREQ_810MHz (0<<26)
+#define SPLL_PLL_FREQ_1350MHz (1<<26)
/* WRPLL */
-#define WRPLL_CTL1 0x46040
-#define WRPLL_CTL2 0x46060
-#define WRPLL_PLL_ENABLE (1<<31)
-#define WRPLL_PLL_SELECT_SSC (0x01<<28)
-#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
+#define WRPLL_CTL1 0x46040
+#define WRPLL_CTL2 0x46060
+#define WRPLL_PLL_ENABLE (1<<31)
+#define WRPLL_PLL_SELECT_SSC (0x01<<28)
+#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
/* WRPLL divider programming */
-#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
-#define WRPLL_DIVIDER_POST(x) ((x)<<8)
-#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
+#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
+#define WRPLL_DIVIDER_POST(x) ((x)<<8)
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
/* Port clock selection */
#define PORT_CLK_SEL_A 0x46100
#define PORT_CLK_SEL_B 0x46104
-#define PORT_CLK_SEL(port) _PORT(port, \
- PORT_CLK_SEL_A, \
- PORT_CLK_SEL_B)
+#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
#define PORT_CLK_SEL_LCPLL_810 (2<<29)
-#define PORT_CLK_SEL_SPLL (3<<29)
+#define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
/* Pipe clock selection */
#define PIPE_CLK_SEL_A 0x46140
#define PIPE_CLK_SEL_B 0x46144
-#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
- PIPE_CLK_SEL_A, \
- PIPE_CLK_SEL_B)
+#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
/* For each pipe, we need to select the corresponding port clock */
-#define PIPE_CLK_SEL_DISABLED (0x0<<29)
-#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
+#define PIPE_CLK_SEL_DISABLED (0x0<<29)
+#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
/* LCPLL Control */
-#define LCPLL_CTL 0x130040
+#define LCPLL_CTL 0x130040
#define LCPLL_PLL_DISABLE (1<<31)
#define LCPLL_PLL_LOCK (1<<30)
-#define LCPLL_CD_CLOCK_DISABLE (1<<25)
+#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270
#define PIPE_WM_LINETIME_B 0x45274
-#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
- PIPE_WM_LINETIME_A, \
- PIPE_WM_LINETIME_B)
-#define PIPE_WM_LINETIME_MASK (0x1ff)
-#define PIPE_WM_LINETIME_TIME(x) ((x))
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
+ PIPE_WM_LINETIME_B)
+#define PIPE_WM_LINETIME_MASK (0x1ff)
+#define PIPE_WM_LINETIME_TIME(x) ((x))
#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
-#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
+#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
/* SFUSE_STRAP */
-#define SFUSE_STRAP 0xc2014
+#define SFUSE_STRAP 0xc2014
#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 7631807a2788..903eebd2117a 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -46,32 +46,32 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
}
static ssize_t
-show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
}
static ssize_t
-show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
}
static ssize_t
-show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
}
static ssize_t
-show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
}
@@ -93,6 +93,7 @@ static struct attribute_group rc6_attr_group = {
.name = power_group_name,
.attrs = rc6_attrs
};
+#endif
static int l3_access_valid(struct drm_device *dev, loff_t offset)
{
@@ -202,37 +203,214 @@ static struct bin_attribute dpf_attrs = {
.mmap = NULL
};
+static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev->struct_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev->struct_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_max_freq_mhz_store(struct device *kdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap, hw_max, hw_min;
+ ssize_t ret;
+
+ ret = kstrtou32(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ val /= GT_FREQUENCY_MULTIPLIER;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = (rp_state_cap & 0xff);
+ hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+ if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ if (dev_priv->rps.cur_delay > val)
+ gen6_set_rps(dev_priv->dev, val);
+
+ dev_priv->rps.max_delay = val;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev->struct_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_min_freq_mhz_store(struct device *kdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap, hw_max, hw_min;
+ ssize_t ret;
+
+ ret = kstrtou32(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ val /= GT_FREQUENCY_MULTIPLIER;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = (rp_state_cap & 0xff);
+ hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+ if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ if (dev_priv->rps.cur_delay < val)
+ gen6_set_rps(dev_priv->dev, val);
+
+ dev_priv->rps.min_delay = val;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+
+}
+
+static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
+static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
+static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+
+
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
+static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+
+/* For now we have a static number of RP states */
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap;
+ ssize_t ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (attr == &dev_attr_gt_RP0_freq_mhz) {
+ val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+ } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
+ val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+ } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
+ val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+ } else {
+ BUG();
+ }
+ return snprintf(buf, PAGE_SIZE, "%d", val);
+}
+
+static const struct attribute *gen6_attrs[] = {
+ &dev_attr_gt_cur_freq_mhz.attr,
+ &dev_attr_gt_max_freq_mhz.attr,
+ &dev_attr_gt_min_freq_mhz.attr,
+ &dev_attr_gt_RP0_freq_mhz.attr,
+ &dev_attr_gt_RP1_freq_mhz.attr,
+ &dev_attr_gt_RPn_freq_mhz.attr,
+ NULL,
+};
+
void i915_setup_sysfs(struct drm_device *dev)
{
int ret;
+#ifdef CONFIG_PM
if (INTEL_INFO(dev)->gen >= 6) {
ret = sysfs_merge_group(&dev->primary->kdev.kobj,
&rc6_attr_group);
if (ret)
DRM_ERROR("RC6 residency sysfs setup failed\n");
}
-
- if (IS_IVYBRIDGE(dev)) {
+#endif
+ if (HAS_L3_GPU_CACHE(dev)) {
ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
if (ret)
DRM_ERROR("l3 parity sysfs setup failed\n");
}
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
+ if (ret)
+ DRM_ERROR("gen6 sysfs setup failed\n");
+ }
}
void i915_teardown_sysfs(struct drm_device *dev)
{
+ sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
+#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+#endif
}
-#else
-void i915_setup_sysfs(struct drm_device *dev)
-{
- return;
-}
-
-void i915_teardown_sysfs(struct drm_device *dev)
-{
- return;
-}
-#endif /* CONFIG_PM */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fe90b3a84a6d..8134421b89a6 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -214,22 +214,18 @@ TRACE_EVENT(i915_gem_evict,
);
TRACE_EVENT(i915_gem_evict_everything,
- TP_PROTO(struct drm_device *dev, bool purgeable),
- TP_ARGS(dev, purgeable),
+ TP_PROTO(struct drm_device *dev),
+ TP_ARGS(dev),
TP_STRUCT__entry(
__field(u32, dev)
- __field(bool, purgeable)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
- __entry->purgeable = purgeable;
),
- TP_printk("dev=%d%s",
- __entry->dev,
- __entry->purgeable ? ", purgeable only" : "")
+ TP_printk("dev=%d", __entry->dev)
);
TRACE_EVENT(i915_gem_ring_dispatch,
@@ -434,6 +430,21 @@ TRACE_EVENT(i915_reg_rw,
(u32)(__entry->val >> 32))
);
+TRACE_EVENT(intel_gpu_freq_change,
+ TP_PROTO(u32 freq),
+ TP_ARGS(freq),
+
+ TP_STRUCT__entry(
+ __field(u32, freq)
+ ),
+
+ TP_fast_assign(
+ __entry->freq = freq;
+ ),
+
+ TP_printk("new_freq=%u", __entry->freq)
+);
+
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index c8f1c0db446d..893f30164b7e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -46,6 +46,7 @@
struct intel_crt {
struct intel_encoder base;
bool force_hotplug_required;
+ u32 adpa_reg;
};
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
@@ -54,42 +55,68 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
struct intel_crt, base);
}
-static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
+static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ return container_of(encoder, struct intel_crt, base);
+}
+
+static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 tmp;
+
+ tmp = I915_READ(crt->adpa_reg);
+
+ if (!(tmp & ADPA_DAC_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_disable_crt(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 temp;
- temp = I915_READ(PCH_ADPA);
+ temp = I915_READ(crt->adpa_reg);
+ temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
+ I915_WRITE(crt->adpa_reg, temp);
+}
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- temp |= ADPA_DAC_ENABLE;
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- /* Just leave port enable cleared */
- break;
- }
+static void intel_enable_crt(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 temp;
- I915_WRITE(PCH_ADPA, temp);
+ temp = I915_READ(crt->adpa_reg);
+ temp |= ADPA_DAC_ENABLE;
+ I915_WRITE(crt->adpa_reg, temp);
}
-static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
+/* Note: The caller is required to filter out dpms modes not supported by the
+ * platform. */
+static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 temp;
- temp = I915_READ(ADPA);
+ temp = I915_READ(crt->adpa_reg);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
- if (IS_VALLEYVIEW(dev) && mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
switch (mode) {
case DRM_MODE_DPMS_ON:
temp |= ADPA_DAC_ENABLE;
@@ -105,7 +132,51 @@ static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
break;
}
- I915_WRITE(ADPA, temp);
+ I915_WRITE(crt->adpa_reg, temp);
+}
+
+static void intel_crt_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct drm_crtc *crtc;
+ int old_dpms;
+
+ /* PCH platforms and VLV only support on/off. */
+ if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ old_dpms = connector->dpms;
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = encoder->base.crtc;
+ if (!crtc) {
+ encoder->connectors_active = false;
+ return;
+ }
+
+ /* We need the pipe to run for anything but OFF. */
+ if (mode == DRM_MODE_DPMS_OFF)
+ encoder->connectors_active = false;
+ else
+ encoder->connectors_active = true;
+
+ if (mode < old_dpms) {
+ /* From off to on, enable the pipe first. */
+ intel_crtc_update_dpms(crtc);
+
+ intel_crt_set_dpms(encoder, mode);
+ } else {
+ intel_crt_set_dpms(encoder, mode);
+
+ intel_crtc_update_dpms(crtc);
+ }
+
+ intel_modeset_check_state(connector->dev);
}
static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -144,19 +215,15 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crt *crt =
+ intel_encoder_to_crt(to_intel_encoder(encoder));
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_md_reg;
u32 adpa, dpll_md;
- u32 adpa_reg;
dpll_md_reg = DPLL_MD(intel_crtc->pipe);
- if (HAS_PCH_SPLIT(dev))
- adpa_reg = PCH_ADPA;
- else
- adpa_reg = ADPA;
-
/*
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
@@ -184,7 +251,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
- I915_WRITE(adpa_reg, adpa);
+ I915_WRITE(crt->adpa_reg, adpa);
}
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
@@ -544,14 +611,12 @@ intel_crt_detect(struct drm_connector *connector, bool force)
return connector->status;
/* for pre-945g platforms use load detect */
- if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
- &tmp)) {
+ if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else
status = intel_crt_load_detect(crt);
- intel_release_load_detect_pipe(&crt->base, connector,
- &tmp);
+ intel_release_load_detect_pipe(connector, &tmp);
} else
status = connector_status_unknown;
@@ -602,25 +667,15 @@ static void intel_crt_reset(struct drm_connector *connector)
* Routines for controlling stuff on the analog port
*/
-static const struct drm_encoder_helper_funcs pch_encoder_funcs = {
+static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
- .prepare = intel_encoder_prepare,
- .commit = intel_encoder_commit,
.mode_set = intel_crt_mode_set,
- .dpms = pch_crt_dpms,
-};
-
-static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
- .mode_fixup = intel_crt_mode_fixup,
- .prepare = intel_encoder_prepare,
- .commit = intel_encoder_commit,
- .mode_set = intel_crt_mode_set,
- .dpms = gmch_crt_dpms,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.reset = intel_crt_reset,
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_crt_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
@@ -661,7 +716,6 @@ void intel_crt_init(struct drm_device *dev)
struct intel_crt *crt;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- const struct drm_encoder_helper_funcs *encoder_helper_funcs;
/* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_no_crt))
@@ -687,13 +741,11 @@ void intel_crt_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, &crt->base);
crt->base.type = INTEL_OUTPUT_ANALOG;
- crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
- 1 << INTEL_ANALOG_CLONE_BIT |
- 1 << INTEL_SDVO_LVDS_CLONE_BIT);
+ crt->base.cloneable = true;
if (IS_HASWELL(dev))
crt->base.crtc_mask = (1 << 0);
else
- crt->base.crtc_mask = (1 << 0) | (1 << 1);
+ crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
if (IS_GEN2(dev))
connector->interlace_allowed = 0;
@@ -702,11 +754,18 @@ void intel_crt_init(struct drm_device *dev)
connector->doublescan_allowed = 0;
if (HAS_PCH_SPLIT(dev))
- encoder_helper_funcs = &pch_encoder_funcs;
+ crt->adpa_reg = PCH_ADPA;
+ else if (IS_VALLEYVIEW(dev))
+ crt->adpa_reg = VLV_ADPA;
else
- encoder_helper_funcs = &gmch_encoder_funcs;
+ crt->adpa_reg = ADPA;
+
+ crt->base.disable = intel_disable_crt;
+ crt->base.enable = intel_enable_crt;
+ crt->base.get_hw_state = intel_crt_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
- drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
+ drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 933c74859172..bfe375466a0e 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -250,7 +250,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
case PORT_B:
case PORT_C:
case PORT_D:
- intel_hdmi_init(dev, DDI_BUF_CTL(port));
+ intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
break;
default:
DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
@@ -267,7 +267,8 @@ struct wrpll_tmds_clock {
u16 r2; /* Reference divider */
};
-/* Table of matching values for WRPLL clocks programming for each frequency */
+/* Table of matching values for WRPLL clocks programming for each frequency.
+ * The code assumes this table is sorted. */
static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{19750, 38, 25, 18},
{20000, 48, 32, 18},
@@ -277,7 +278,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{23000, 36, 23, 15},
{23500, 40, 40, 23},
{23750, 26, 16, 14},
- {23750, 26, 16, 14},
{24000, 36, 24, 15},
{25000, 36, 25, 15},
{25175, 26, 40, 33},
@@ -437,7 +437,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{108000, 8, 24, 15},
{108108, 8, 173, 108},
{109000, 6, 23, 19},
- {109000, 6, 23, 19},
{110000, 6, 22, 18},
{110013, 6, 22, 18},
{110250, 8, 49, 30},
@@ -614,7 +613,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{218250, 4, 42, 26},
{218750, 4, 34, 21},
{219000, 4, 47, 29},
- {219000, 4, 47, 29},
{220000, 4, 44, 27},
{220640, 4, 49, 30},
{220750, 4, 36, 22},
@@ -658,7 +656,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int port = intel_hdmi->ddi_port;
int pipe = intel_crtc->pipe;
- int p, n2, r2, valid=0;
+ int p, n2, r2;
u32 temp, i;
/* On Haswell, we need to enable the clocks and prepare DDI function to
@@ -666,26 +664,23 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
*/
DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
- for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
- if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
- p = wrpll_tmds_clock_table[i].p;
- n2 = wrpll_tmds_clock_table[i].n2;
- r2 = wrpll_tmds_clock_table[i].r2;
+ for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
+ if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
+ break;
- DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
- crtc->mode.clock,
- p, n2, r2);
+ if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
+ i--;
- valid = 1;
- break;
- }
- }
+ p = wrpll_tmds_clock_table[i].p;
+ n2 = wrpll_tmds_clock_table[i].n2;
+ r2 = wrpll_tmds_clock_table[i].r2;
- if (!valid) {
- DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
- crtc->mode.clock);
- return;
- }
+ if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
+ DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
+ wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
+
+ DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+ crtc->mode.clock, p, n2, r2);
/* Enable LCPLL if disabled */
temp = I915_READ(LCPLL_CTL);
@@ -718,46 +713,107 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
/* Proper support for digital audio needs a new logic and a new set
* of registers, so we leave it for future patch bombing.
*/
- DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n",
+ DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
pipe_name(intel_crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
}
/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
- temp = I915_READ(DDI_FUNC_CTL(pipe));
- temp &= ~PIPE_DDI_PORT_MASK;
- temp &= ~PIPE_DDI_BPC_12;
- temp |= PIPE_DDI_SELECT_PORT(port) |
- PIPE_DDI_MODE_SELECT_HDMI |
- ((intel_crtc->bpp > 24) ?
- PIPE_DDI_BPC_12 :
- PIPE_DDI_BPC_8) |
- PIPE_DDI_FUNC_ENABLE;
+ temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
+
+ switch (intel_crtc->bpp) {
+ case 18:
+ temp |= PIPE_DDI_BPC_6;
+ break;
+ case 24:
+ temp |= PIPE_DDI_BPC_8;
+ break;
+ case 30:
+ temp |= PIPE_DDI_BPC_10;
+ break;
+ case 36:
+ temp |= PIPE_DDI_BPC_12;
+ break;
+ default:
+ WARN(1, "%d bpp unsupported by pipe DDI function\n",
+ intel_crtc->bpp);
+ }
+
+ if (intel_hdmi->has_hdmi_sink)
+ temp |= PIPE_DDI_MODE_SELECT_HDMI;
+ else
+ temp |= PIPE_DDI_MODE_SELECT_DVI;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= PIPE_DDI_PVSYNC;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= PIPE_DDI_PHSYNC;
I915_WRITE(DDI_FUNC_CTL(pipe), temp);
intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
-void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 tmp;
+ int i;
+
+ tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port));
+
+ if (!(tmp & DDI_BUF_CTL_ENABLE))
+ return false;
+
+ for_each_pipe(i) {
+ tmp = I915_READ(DDI_FUNC_CTL(i));
+
+ if ((tmp & PIPE_DDI_PORT_MASK)
+ == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) {
+ *pipe = i;
+ return true;
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port);
+
+ return true;
+}
+
+void intel_enable_ddi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
int port = intel_hdmi->ddi_port;
u32 temp;
temp = I915_READ(DDI_BUF_CTL(port));
-
- if (mode != DRM_MODE_DPMS_ON) {
- temp &= ~DDI_BUF_CTL_ENABLE;
- } else {
- temp |= DDI_BUF_CTL_ENABLE;
- }
+ temp |= DDI_BUF_CTL_ENABLE;
/* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
* and swing/emphasis values are ignored so nothing special needs
* to be done besides enabling the port.
*/
- I915_WRITE(DDI_BUF_CTL(port),
- temp);
+ I915_WRITE(DDI_BUF_CTL(port), temp);
+}
+
+void intel_disable_ddi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ int port = intel_hdmi->ddi_port;
+ u32 temp;
+
+ temp = I915_READ(DDI_BUF_CTL(port));
+ temp &= ~DDI_BUF_CTL_ENABLE;
+
+ I915_WRITE(DDI_BUF_CTL(port), temp);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7ea9a3ceb269..e3c02655d36f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1006,7 +1006,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
/* Wait for the Pipe State to go off */
if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
100))
- DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ WARN(1, "pipe_off wait timed out\n");
} else {
u32 last_line, line_mask;
int reg = PIPEDSL(pipe);
@@ -1024,7 +1024,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
} while (((I915_READ(reg) & line_mask) != last_line) &&
time_after(timeout, jiffies));
if (time_after(jiffies, timeout))
- DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ WARN(1, "pipe_off wait timed out\n");
}
}
@@ -1431,6 +1431,8 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
* protect mechanism may be enabled.
*
* Note! This is for pre-ILK only.
+ *
+ * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
*/
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
@@ -1860,59 +1862,6 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
intel_wait_for_vblank(dev_priv->dev, pipe);
}
-static void disable_pch_dp(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg, u32 port_sel)
-{
- u32 val = I915_READ(reg);
- if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
- DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
- I915_WRITE(reg, val & ~DP_PORT_EN);
- }
-}
-
-static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg)
-{
- u32 val = I915_READ(reg);
- if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
- DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
- reg, pipe);
- I915_WRITE(reg, val & ~PORT_ENABLE);
- }
-}
-
-/* Disable any ports connected to this transcoder */
-static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- u32 reg, val;
-
- val = I915_READ(PCH_PP_CONTROL);
- I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
-
- disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
- disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
- disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
-
- reg = PCH_ADPA;
- val = I915_READ(reg);
- if (adpa_pipe_enabled(dev_priv, pipe, val))
- I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
-
- reg = PCH_LVDS;
- val = I915_READ(reg);
- if (lvds_pipe_enabled(dev_priv, pipe, val)) {
- DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
- I915_WRITE(reg, val & ~LVDS_PORT_EN);
- POSTING_READ(reg);
- udelay(100);
- }
-
- disable_pch_hdmi(dev_priv, pipe, HDMIB);
- disable_pch_hdmi(dev_priv, pipe, HDMIC);
- disable_pch_hdmi(dev_priv, pipe, HDMID);
-}
-
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -2201,16 +2150,17 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_framebuffer *old_fb;
int ret;
/* no fb bound */
- if (!crtc->fb) {
+ if (!fb) {
DRM_ERROR("No FB bound\n");
return 0;
}
@@ -2224,7 +2174,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(crtc->fb)->obj,
+ to_intel_framebuffer(fb)->obj,
NULL);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
@@ -2232,17 +2182,22 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- if (old_fb)
- intel_finish_fb(old_fb);
+ if (crtc->fb)
+ intel_finish_fb(crtc->fb);
- ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
+ ret = dev_priv->display.update_plane(crtc, fb, x, y);
if (ret) {
- intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+ intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("failed to update base address\n");
return ret;
}
+ old_fb = crtc->fb;
+ crtc->fb = fb;
+ crtc->x = x;
+ crtc->y = y;
+
if (old_fb) {
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
@@ -2709,11 +2664,10 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
DRM_DEBUG_KMS("FDI train done.\n");
}
-static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
+static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
@@ -2754,6 +2708,35 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
}
}
+static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_PCDCLK);
+
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+
+ /* Wait for the clocks to turn off. */
+ POSTING_READ(reg);
+ udelay(100);
+}
+
static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2838,13 +2821,13 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct intel_encoder *encoder;
+ struct intel_encoder *intel_encoder;
/*
* If there's a non-PCH eDP on this crtc, it must be DP_A, and that
* must be driven by its own crtc; no sharing is possible.
*/
- for_each_encoder_on_crtc(dev, crtc, encoder) {
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
* CPU handles all others */
@@ -2852,19 +2835,19 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
/* It is still unclear how this will work on PPT, so throw up a warning */
WARN_ON(!HAS_PCH_LPT(dev));
- if (encoder->type == DRM_MODE_ENCODER_DAC) {
+ if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
return true;
} else {
DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
- encoder->type);
+ intel_encoder->type);
return false;
}
}
- switch (encoder->type) {
+ switch (intel_encoder->type) {
case INTEL_OUTPUT_EDP:
- if (!intel_encoder_is_pch_edp(&encoder->base))
+ if (!intel_encoder_is_pch_edp(&intel_encoder->base))
return false;
continue;
}
@@ -3181,11 +3164,14 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 temp;
bool is_pch_port;
+ WARN_ON(!crtc->enabled);
+
if (intel_crtc->active)
return;
@@ -3200,10 +3186,16 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
is_pch_port = intel_crtc_driving_pch(crtc);
- if (is_pch_port)
- ironlake_fdi_pll_enable(crtc);
- else
- ironlake_fdi_disable(crtc);
+ if (is_pch_port) {
+ ironlake_fdi_pll_enable(intel_crtc);
+ } else {
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+ }
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
@@ -3234,6 +3226,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
intel_crtc_update_cursor(crtc, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
+ if (HAS_PCH_CPT(dev))
+ intel_cpt_verify_modeset(dev, intel_crtc->pipe);
}
static void ironlake_crtc_disable(struct drm_crtc *crtc)
@@ -3241,13 +3239,18 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
+
if (!intel_crtc->active)
return;
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
intel_crtc_update_cursor(crtc, false);
@@ -3263,14 +3266,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
- ironlake_fdi_disable(crtc);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
- /* This is a horrible layering violation; we should be doing this in
- * the connector/encoder ->prepare instead, but we don't always have
- * enough information there about the config to know whether it will
- * actually be necessary or just cause undesired flicker.
- */
- intel_disable_pch_ports(dev_priv, pipe);
+ ironlake_fdi_disable(crtc);
intel_disable_transcoder(dev_priv, pipe);
@@ -3304,26 +3304,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
/* disable PCH DPLL */
intel_disable_pch_pll(intel_crtc);
- /* Switch from PCDclk to Rawclk */
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_PCDCLK);
-
- /* Disable CPU FDI TX PLL */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
-
- POSTING_READ(reg);
- udelay(100);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
-
- /* Wait for the clocks to turn off. */
- POSTING_READ(reg);
- udelay(100);
+ ironlake_fdi_pll_disable(intel_crtc);
intel_crtc->active = false;
intel_update_watermarks(dev);
@@ -3333,30 +3314,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
}
-static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
-
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
- ironlake_crtc_enable(crtc);
- break;
-
- case DRM_MODE_DPMS_OFF:
- DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
- ironlake_crtc_disable(crtc);
- break;
- }
-}
-
static void ironlake_crtc_off(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -3386,9 +3343,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ WARN_ON(!crtc->enabled);
+
if (intel_crtc->active)
return;
@@ -3405,6 +3365,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
/* Give the overlay scaler a chance to enable if it's on this pipe */
intel_crtc_dpms_overlay(intel_crtc, true);
intel_crtc_update_cursor(crtc, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
}
static void i9xx_crtc_disable(struct drm_crtc *crtc)
@@ -3412,12 +3375,17 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+
if (!intel_crtc->active)
return;
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
/* Give the overlay scaler a chance to disable if it's on this pipe */
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
@@ -3436,45 +3404,17 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_update_watermarks(dev);
}
-static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- i9xx_crtc_enable(crtc);
- break;
- case DRM_MODE_DPMS_OFF:
- i9xx_crtc_disable(crtc);
- break;
- }
-}
-
static void i9xx_crtc_off(struct drm_crtc *crtc)
{
}
-/**
- * Sets the power management mode of the pipe and plane.
- */
-static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void intel_crtc_update_sarea(struct drm_crtc *crtc,
+ bool enabled)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- bool enabled;
-
- if (intel_crtc->dpms_mode == mode)
- return;
-
- intel_crtc->dpms_mode = mode;
-
- dev_priv->display.dpms(crtc, mode);
if (!dev->primary->master)
return;
@@ -3483,8 +3423,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
if (!master_priv->sarea_priv)
return;
- enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
-
switch (pipe) {
case 0:
master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
@@ -3500,13 +3438,42 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+/**
+ * Sets the power management mode of the pipe and plane.
+ */
+void intel_crtc_update_dpms(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ bool enable = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+ enable |= intel_encoder->connectors_active;
+
+ if (enable)
+ dev_priv->display.crtc_enable(crtc);
+ else
+ dev_priv->display.crtc_disable(crtc);
+
+ intel_crtc_update_sarea(crtc, enable);
+}
+
+static void intel_crtc_noop(struct drm_crtc *crtc)
+{
+}
+
static void intel_crtc_disable(struct drm_crtc *crtc)
{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ /* crtc should still be enabled when we disable it. */
+ WARN_ON(!crtc->enabled);
+
+ dev_priv->display.crtc_disable(crtc);
+ intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
@@ -3516,63 +3483,128 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
+ crtc->fb = NULL;
+ }
+
+ /* Update computed state. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder || !connector->encoder->crtc)
+ continue;
+
+ if (connector->encoder->crtc != crtc)
+ continue;
+
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ to_intel_encoder(connector->encoder)->connectors_active = false;
}
}
-/* Prepare for a mode set.
- *
- * Note we could be a lot smarter here. We need to figure out which outputs
- * will be enabled, which disabled (in short, how the config will changes)
- * and perform the minimum necessary steps to accomplish that, e.g. updating
- * watermarks, FBC configuration, making sure PLLs are programmed correctly,
- * panel fitting is in the proper state, etc.
- */
-static void i9xx_crtc_prepare(struct drm_crtc *crtc)
+void intel_modeset_disable(struct drm_device *dev)
{
- i9xx_crtc_disable(crtc);
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled)
+ intel_crtc_disable(crtc);
+ }
}
-static void i9xx_crtc_commit(struct drm_crtc *crtc)
+void intel_encoder_noop(struct drm_encoder *encoder)
{
- i9xx_crtc_enable(crtc);
}
-static void ironlake_crtc_prepare(struct drm_crtc *crtc)
+void intel_encoder_destroy(struct drm_encoder *encoder)
{
- ironlake_crtc_disable(crtc);
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
-static void ironlake_crtc_commit(struct drm_crtc *crtc)
+/* Simple dpms helper for encodres with just one connector, no cloning and only
+ * one kind of off state. It clamps all !ON modes to fully OFF and changes the
+ * state of the entire output pipe. */
+void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
{
- ironlake_crtc_enable(crtc);
+ if (mode == DRM_MODE_DPMS_ON) {
+ encoder->connectors_active = true;
+
+ intel_crtc_update_dpms(encoder->base.crtc);
+ } else {
+ encoder->connectors_active = false;
+
+ intel_crtc_update_dpms(encoder->base.crtc);
+ }
}
-void intel_encoder_prepare(struct drm_encoder *encoder)
+/* Cross check the actual hw state with our own modeset state tracking (and it's
+ * internal consistency). */
+static void intel_connector_check_state(struct intel_connector *connector)
{
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- /* lvds has its own version of prepare see intel_lvds_prepare */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+ if (connector->get_hw_state(connector)) {
+ struct intel_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc;
+ bool encoder_enabled;
+ enum pipe pipe;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base));
+
+ WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
+ "wrong connector dpms state\n");
+ WARN(connector->base.encoder != &encoder->base,
+ "active connector not linked to encoder\n");
+ WARN(!encoder->connectors_active,
+ "encoder->connectors_active not set\n");
+
+ encoder_enabled = encoder->get_hw_state(encoder, &pipe);
+ WARN(!encoder_enabled, "encoder not enabled\n");
+ if (WARN_ON(!encoder->base.crtc))
+ return;
+
+ crtc = encoder->base.crtc;
+
+ WARN(!crtc->enabled, "crtc not enabled\n");
+ WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+ WARN(pipe != to_intel_crtc(crtc)->pipe,
+ "encoder active on the wrong pipe\n");
+ }
}
-void intel_encoder_commit(struct drm_encoder *encoder)
+/* Even simpler default implementation, if there's really no special case to
+ * consider. */
+void intel_connector_dpms(struct drm_connector *connector, int mode)
{
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- struct drm_device *dev = encoder->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
- /* lvds has its own version of commit see intel_lvds_commit */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+ /* All the simple cases only support two dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
- if (HAS_PCH_CPT(dev))
- intel_cpt_verify_modeset(dev, intel_crtc->pipe);
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ if (encoder->base.crtc)
+ intel_encoder_dpms(encoder, mode);
+ else
+ WARN_ON(encoder->connectors_active != false);
+
+ intel_modeset_check_state(connector->dev);
}
-void intel_encoder_destroy(struct drm_encoder *encoder)
+/* Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector. */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ enum pipe pipe = 0;
+ struct intel_encoder *encoder = connector->encoder;
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
+ return encoder->get_hw_state(encoder, &pipe);
}
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -3593,6 +3625,13 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
drm_mode_set_crtcinfo(adjusted_mode, 0);
+ /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
+ * with a hsync front porch of 0.
+ */
+ if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+ adjusted_mode->hsync_start == adjusted_mode->hdisplay)
+ return false;
+
return true;
}
@@ -3728,6 +3767,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
* true if they don't match).
*/
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
unsigned int *pipe_bpp,
struct drm_display_mode *mode)
{
@@ -3797,7 +3837,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
* also stays within the max display bpc discovered above.
*/
- switch (crtc->fb->depth) {
+ switch (fb->depth) {
case 8:
bpc = 8; /* since we go through a colormap */
break;
@@ -4216,7 +4256,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
- struct drm_framebuffer *old_fb)
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4406,7 +4446,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
- ret = intel_pipe_set_base(crtc, x, y, old_fb);
+ ret = intel_pipe_set_base(crtc, x, y, fb);
intel_update_watermarks(dev);
@@ -4560,24 +4600,130 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
return 120000;
}
+static void ironlake_set_pipeconf(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ bool dither)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ uint32_t val;
+
+ val = I915_READ(PIPECONF(pipe));
+
+ val &= ~PIPE_BPC_MASK;
+ switch (intel_crtc->bpp) {
+ case 18:
+ val |= PIPE_6BPC;
+ break;
+ case 24:
+ val |= PIPE_8BPC;
+ break;
+ case 30:
+ val |= PIPE_10BPC;
+ break;
+ case 36:
+ val |= PIPE_12BPC;
+ break;
+ default:
+ val |= PIPE_8BPC;
+ break;
+ }
+
+ val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+ if (dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ val &= ~PIPECONF_INTERLACE_MASK;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ I915_WRITE(PIPECONF(pipe), val);
+ POSTING_READ(PIPECONF(pipe));
+}
+
+static bool ironlake_compute_clocks(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock,
+ bool *has_reduced_clock,
+ intel_clock_t *reduced_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ int refclk;
+ const intel_limit_t *limit;
+ bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ if (intel_encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ }
+ }
+
+ refclk = ironlake_get_refclk(crtc);
+
+ /*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+ limit = intel_limit(crtc, refclk);
+ ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+ clock);
+ if (!ret)
+ return false;
+
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target clock.
+ * If the clocks don't match, we can't switch the display clock
+ * by using the FP0/FP1. In such case we will disable the LVDS
+ * downclock feature.
+ */
+ *has_reduced_clock = limit->find_pll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk,
+ clock,
+ reduced_clock);
+ }
+
+ if (is_sdvo && is_tv)
+ i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
+
+ return true;
+}
+
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
- struct drm_framebuffer *old_fb)
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- int refclk, num_connectors = 0;
+ int num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
+ u32 dpll, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *encoder, *edp_encoder = NULL;
- const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
u32 temp;
@@ -4619,16 +4765,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
- refclk = ironlake_get_refclk(crtc);
-
- /*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
- limit = intel_limit(crtc, refclk);
- ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
- &clock);
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock, &reduced_clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
@@ -4637,24 +4775,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* Ensure that the cursor is valid for the new mode before changing... */
intel_crtc_update_cursor(crtc, true);
- if (is_lvds && dev_priv->lvds_downclock_avail) {
- /*
- * Ensure we match the reduced clock's P to the target clock.
- * If the clocks don't match, we can't switch the display clock
- * by using the FP0/FP1. In such case we will disable the LVDS
- * downclock feature.
- */
- has_reduced_clock = limit->find_pll(limit, crtc,
- dev_priv->lvds_downclock,
- refclk,
- &clock,
- &reduced_clock);
- }
-
- if (is_sdvo && is_tv)
- i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
-
-
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
lane = 0;
@@ -4682,32 +4802,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
target_clock = adjusted_mode->clock;
/* determine panel color depth */
- temp = I915_READ(PIPECONF(pipe));
- temp &= ~PIPE_BPC_MASK;
- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
- switch (pipe_bpp) {
- case 18:
- temp |= PIPE_6BPC;
- break;
- case 24:
- temp |= PIPE_8BPC;
- break;
- case 30:
- temp |= PIPE_10BPC;
- break;
- case 36:
- temp |= PIPE_12BPC;
- break;
- default:
+ dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, mode);
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
+ pipe_bpp != 36) {
WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
- pipe_bpp);
- temp |= PIPE_8BPC;
+ pipe_bpp);
pipe_bpp = 24;
- break;
}
-
intel_crtc->bpp = pipe_bpp;
- I915_WRITE(PIPECONF(pipe), temp);
if (!lane) {
/*
@@ -4791,12 +4896,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
dpll |= PLL_REF_INPUT_DREFCLK;
- /* setup pipeconf */
- pipeconf = I915_READ(PIPECONF(pipe));
-
- /* Set up the display plane register */
- dspcntr = DISPPLANE_GAMMA_ENABLE;
-
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
@@ -4856,12 +4955,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PCH_LVDS, temp);
}
- pipeconf &= ~PIPECONF_DITHER_EN;
- pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
- if ((is_lvds && dev_priv->lvds_dither) || dither) {
- pipeconf |= PIPECONF_DITHER_EN;
- pipeconf |= PIPECONF_DITHER_TYPE_SP;
- }
if (is_dp && !is_cpu_edp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
@@ -4897,9 +4990,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
}
- pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- pipeconf |= PIPECONF_INTERLACED_ILK;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_end -= 1;
@@ -4907,7 +4998,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode->crtc_hsync_start
- adjusted_mode->crtc_htotal/2);
} else {
- pipeconf |= PIPECONF_PROGRESSIVE;
I915_WRITE(VSYNCSHIFT(pipe), 0);
}
@@ -4945,15 +5035,15 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
- I915_WRITE(PIPECONF(pipe), pipeconf);
- POSTING_READ(PIPECONF(pipe));
+ ironlake_set_pipeconf(crtc, adjusted_mode, dither);
intel_wait_for_vblank(dev, pipe);
- I915_WRITE(DSPCNTR(plane), dspcntr);
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
POSTING_READ(DSPCNTR(plane));
- ret = intel_pipe_set_base(crtc, x, y, old_fb);
+ ret = intel_pipe_set_base(crtc, x, y, fb);
intel_update_watermarks(dev);
@@ -4966,7 +5056,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
- struct drm_framebuffer *old_fb)
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4977,14 +5067,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_pre_modeset(dev, pipe);
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
- x, y, old_fb);
+ x, y, fb);
drm_vblank_post_modeset(dev, pipe);
- if (ret)
- intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
- else
- intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
-
return ret;
}
@@ -5057,6 +5142,91 @@ static void g4x_write_eld(struct drm_connector *connector,
I915_WRITE(G4X_AUD_CNTL_ST, i);
}
+static void haswell_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ struct drm_device *dev = crtc->dev;
+ uint32_t eldv;
+ uint32_t i;
+ int len;
+ int pipe = to_intel_crtc(crtc)->pipe;
+ int tmp;
+
+ int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
+ int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
+ int aud_config = HSW_AUD_CFG(pipe);
+ int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
+
+
+ DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
+
+ /* Audio output enable */
+ DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
+ I915_WRITE(aud_cntrl_st2, tmp);
+
+ /* Wait for 1 vertical blank */
+ intel_wait_for_vblank(dev, pipe);
+
+ /* Set ELD valid state */
+ tmp = I915_READ(aud_cntrl_st2);
+ DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
+ tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
+ I915_WRITE(aud_cntrl_st2, tmp);
+ tmp = I915_READ(aud_cntrl_st2);
+ DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
+
+ /* Enable HDMI mode */
+ tmp = I915_READ(aud_config);
+ DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
+ /* clear N_programing_enable and N_value_index */
+ tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
+ I915_WRITE(aud_config, tmp);
+
+ DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
+
+ eldv = AUDIO_ELD_VALID_A << (pipe * 4);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
+ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+ } else
+ I915_WRITE(aud_config, 0);
+
+ if (intel_eld_uptodate(connector,
+ aud_cntrl_st2, eldv,
+ aud_cntl_st, IBX_ELD_ADDRESS,
+ hdmiw_hdmiedid))
+ return;
+
+ i = I915_READ(aud_cntrl_st2);
+ i &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+ if (!eld[0])
+ return;
+
+ i = I915_READ(aud_cntl_st);
+ i &= ~IBX_ELD_ADDRESS;
+ I915_WRITE(aud_cntl_st, i);
+ i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
+ DRM_DEBUG_DRIVER("port num:%d\n", i);
+
+ len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+ i = I915_READ(aud_cntrl_st2);
+ i |= eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+}
+
static void ironlake_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc)
{
@@ -5069,28 +5239,24 @@ static void ironlake_write_eld(struct drm_connector *connector,
int aud_config;
int aud_cntl_st;
int aud_cntrl_st2;
+ int pipe = to_intel_crtc(crtc)->pipe;
if (HAS_PCH_IBX(connector->dev)) {
- hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
- aud_config = IBX_AUD_CONFIG_A;
- aud_cntl_st = IBX_AUD_CNTL_ST_A;
+ hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+ aud_config = IBX_AUD_CFG(pipe);
+ aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else {
- hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
- aud_config = CPT_AUD_CONFIG_A;
- aud_cntl_st = CPT_AUD_CNTL_ST_A;
+ hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+ aud_config = CPT_AUD_CFG(pipe);
+ aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
- i = to_intel_crtc(crtc)->pipe;
- hdmiw_hdmiedid += i * 0x100;
- aud_cntl_st += i * 0x100;
- aud_config += i * 0x100;
-
- DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
+ DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
i = I915_READ(aud_cntl_st);
- i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */
+ i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
if (!i) {
DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
/* operate blindly on all ports */
@@ -5337,8 +5503,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t addr;
int ret;
- DRM_DEBUG_KMS("\n");
-
/* if we want to turn off the cursor ignore width and height */
if (!handle) {
DRM_DEBUG_KMS("cursor off\n");
@@ -5584,17 +5748,18 @@ mode_fits_in_fbdev(struct drm_device *dev,
return fb;
}
-bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old)
{
struct intel_crtc *intel_crtc;
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(connector);
struct drm_crtc *possible_crtc;
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
- struct drm_framebuffer *old_fb;
+ struct drm_framebuffer *fb;
int i = -1;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -5615,21 +5780,12 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
if (encoder->crtc) {
crtc = encoder->crtc;
- intel_crtc = to_intel_crtc(crtc);
- old->dpms_mode = intel_crtc->dpms_mode;
+ old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
/* Make sure the crtc and connector are running */
- if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
- struct drm_encoder_helper_funcs *encoder_funcs;
- struct drm_crtc_helper_funcs *crtc_funcs;
-
- crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-
- encoder_funcs = encoder->helper_private;
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- }
+ if (connector->dpms != DRM_MODE_DPMS_ON)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
return true;
}
@@ -5653,19 +5809,17 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
return false;
}
- encoder->crtc = crtc;
- connector->encoder = encoder;
+ intel_encoder->new_crtc = to_intel_crtc(crtc);
+ to_intel_connector(connector)->new_encoder = intel_encoder;
intel_crtc = to_intel_crtc(crtc);
- old->dpms_mode = intel_crtc->dpms_mode;
+ old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
if (!mode)
mode = &load_detect_mode;
- old_fb = crtc->fb;
-
/* We need a framebuffer large enough to accommodate all accesses
* that the plane may generate whilst we perform load detection.
* We can not rely on the fbcon either being present (we get called
@@ -5673,50 +5827,52 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
* not even exist) or that it is large enough to satisfy the
* requested mode.
*/
- crtc->fb = mode_fits_in_fbdev(dev, mode);
- if (crtc->fb == NULL) {
+ fb = mode_fits_in_fbdev(dev, mode);
+ if (fb == NULL) {
DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
- crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
- old->release_fb = crtc->fb;
+ fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
+ old->release_fb = fb;
} else
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
- if (IS_ERR(crtc->fb)) {
+ if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
- crtc->fb = old_fb;
- return false;
+ goto fail;
}
- if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
+ if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
- crtc->fb = old_fb;
- return false;
+ goto fail;
}
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
return true;
+fail:
+ connector->encoder = NULL;
+ encoder->crtc = NULL;
+ return false;
}
-void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old)
{
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_crtc *crtc = encoder->crtc;
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
encoder->base.id, drm_get_encoder_name(encoder));
if (old->load_detect_temp) {
- connector->encoder = NULL;
- drm_helper_disable_unused_functions(dev);
+ struct drm_crtc *crtc = encoder->crtc;
+
+ to_intel_connector(connector)->new_encoder = NULL;
+ intel_encoder->new_crtc = NULL;
+ intel_set_mode(crtc, NULL, 0, 0, NULL);
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
@@ -5725,10 +5881,8 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
}
/* Switch crtc and encoder back off if necessary */
- if (old->dpms_mode != DRM_MODE_DPMS_ON) {
- encoder_funcs->dpms(encoder, old->dpms_mode);
- crtc_funcs->dpms(crtc, old->dpms_mode);
- }
+ if (old->dpms_mode != DRM_MODE_DPMS_ON)
+ connector->funcs->dpms(connector, old->dpms_mode);
}
/* Returns the clock of the currently programmed mode of the given pipe. */
@@ -5850,46 +6004,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-#define GPU_IDLE_TIMEOUT 500 /* ms */
-
-/* When this timer fires, we've been idle for awhile */
-static void intel_gpu_idle_timer(unsigned long arg)
-{
- struct drm_device *dev = (struct drm_device *)arg;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (!list_empty(&dev_priv->mm.active_list)) {
- /* Still processing requests, so just re-arm the timer. */
- mod_timer(&dev_priv->idle_timer, jiffies +
- msecs_to_jiffies(GPU_IDLE_TIMEOUT));
- return;
- }
-
- dev_priv->busy = false;
- queue_work(dev_priv->wq, &dev_priv->idle_work);
-}
-
-#define CRTC_IDLE_TIMEOUT 1000 /* ms */
-
-static void intel_crtc_idle_timer(unsigned long arg)
-{
- struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
- struct drm_crtc *crtc = &intel_crtc->base;
- drm_i915_private_t *dev_priv = crtc->dev->dev_private;
- struct intel_framebuffer *intel_fb;
-
- intel_fb = to_intel_framebuffer(crtc->fb);
- if (intel_fb && intel_fb->obj->active) {
- /* The framebuffer is still being accessed by the GPU. */
- mod_timer(&intel_crtc->idle_timer, jiffies +
- msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
- return;
- }
-
- intel_crtc->busy = false;
- queue_work(dev_priv->wq, &dev_priv->idle_work);
-}
-
static void intel_increase_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -5919,10 +6033,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
}
-
- /* Schedule downclock */
- mod_timer(&intel_crtc->idle_timer, jiffies +
- msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
}
static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -5961,89 +6071,46 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
}
-/**
- * intel_idle_update - adjust clocks for idleness
- * @work: work struct
- *
- * Either the GPU or display (or both) went idle. Check the busy status
- * here and adjust the CRTC and GPU clocks as necessary.
- */
-static void intel_idle_update(struct work_struct *work)
+void intel_mark_busy(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- idle_work);
- struct drm_device *dev = dev_priv->dev;
+ i915_update_gfx_val(dev->dev_private);
+}
+
+void intel_mark_idle(struct drm_device *dev)
+{
+}
+
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
if (!i915_powersave)
return;
- mutex_lock(&dev->struct_mutex);
-
- i915_update_gfx_val(dev_priv);
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- /* Skip inactive CRTCs */
if (!crtc->fb)
continue;
- intel_crtc = to_intel_crtc(crtc);
- if (!intel_crtc->busy)
- intel_decrease_pllclock(crtc);
+ if (to_intel_framebuffer(crtc->fb)->obj == obj)
+ intel_increase_pllclock(crtc);
}
-
-
- mutex_unlock(&dev->struct_mutex);
}
-/**
- * intel_mark_busy - mark the GPU and possibly the display busy
- * @dev: drm device
- * @obj: object we're operating on
- *
- * Callers can use this function to indicate that the GPU is busy processing
- * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
- * buffer), we'll also mark the display as busy, so we know to increase its
- * clock frequency.
- */
-void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
+void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = NULL;
- struct intel_framebuffer *intel_fb;
- struct intel_crtc *intel_crtc;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- if (!dev_priv->busy) {
- intel_sanitize_pm(dev);
- dev_priv->busy = true;
- } else
- mod_timer(&dev_priv->idle_timer, jiffies +
- msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ struct drm_device *dev = obj->base.dev;
+ struct drm_crtc *crtc;
- if (obj == NULL)
+ if (!i915_powersave)
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
continue;
- intel_crtc = to_intel_crtc(crtc);
- intel_fb = to_intel_framebuffer(crtc->fb);
- if (intel_fb->obj == obj) {
- if (!intel_crtc->busy) {
- /* Non-busy -> busy, upclock */
- intel_increase_pllclock(crtc);
- intel_crtc->busy = true;
- } else {
- /* Busy -> busy, put off timer */
- mod_timer(&intel_crtc->idle_timer, jiffies +
- msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
- }
- }
+ if (to_intel_framebuffer(crtc->fb)->obj == obj)
+ intel_decrease_pllclock(crtc);
}
}
@@ -6394,7 +6461,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
default:
WARN_ONCE(1, "unknown plane in flip command\n");
ret = -ENODEV;
- goto err;
+ goto err_unpin;
}
ret = intel_ring_begin(ring, 4);
@@ -6502,7 +6569,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup_pending;
intel_disable_fbc(dev);
- intel_mark_busy(dev, obj);
+ intel_mark_fb_busy(obj);
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
@@ -6527,81 +6594,807 @@ free_work:
return ret;
}
-static void intel_sanitize_modesetting(struct drm_device *dev,
- int pipe, int plane)
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
+ .load_lut = intel_crtc_load_lut,
+ .disable = intel_crtc_noop,
+};
+
+bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg, val;
- int i;
+ struct intel_encoder *other_encoder;
+ struct drm_crtc *crtc = &encoder->new_crtc->base;
- /* Clear any frame start delays used for debugging left by the BIOS */
- for_each_pipe(i) {
- reg = PIPECONF(i);
- I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ if (WARN_ON(!crtc))
+ return false;
+
+ list_for_each_entry(other_encoder,
+ &crtc->dev->mode_config.encoder_list,
+ base.head) {
+
+ if (&other_encoder->new_crtc->base != crtc ||
+ encoder == other_encoder)
+ continue;
+ else
+ return true;
}
- if (HAS_PCH_SPLIT(dev))
- return;
+ return false;
+}
- /* Who knows what state these registers were left in by the BIOS or
- * grub?
- *
- * If we leave the registers in a conflicting state (e.g. with the
- * display plane reading from the other pipe than the one we intend
- * to use) then when we attempt to teardown the active mode, we will
- * not disable the pipes and planes in the correct order -- leaving
- * a plane reading from a disabled pipe and possibly leading to
- * undefined behaviour.
+static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
+ struct drm_crtc *crtc)
+{
+ struct drm_device *dev;
+ struct drm_crtc *tmp;
+ int crtc_mask = 1;
+
+ WARN(!crtc, "checking null crtc?\n");
+
+ dev = crtc->dev;
+
+ list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+ if (tmp == crtc)
+ break;
+ crtc_mask <<= 1;
+ }
+
+ if (encoder->possible_crtcs & crtc_mask)
+ return true;
+ return false;
+}
+
+/**
+ * intel_modeset_update_staged_output_state
+ *
+ * Updates the staged output configuration state, e.g. after we've read out the
+ * current hw state.
+ */
+static void intel_modeset_update_staged_output_state(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ connector->new_encoder =
+ to_intel_encoder(connector->base.encoder);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ encoder->new_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ }
+}
+
+/**
+ * intel_modeset_commit_output_state
+ *
+ * This function copies the stage display pipe configuration to the real one.
+ */
+static void intel_modeset_commit_output_state(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ connector->base.encoder = &connector->new_encoder->base;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ encoder->base.crtc = &encoder->new_crtc->base;
+ }
+}
+
+static struct drm_display_mode *
+intel_modeset_adjusted_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_display_mode *adjusted_mode;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct intel_encoder *encoder;
+
+ adjusted_mode = drm_mode_duplicate(dev, mode);
+ if (!adjusted_mode)
+ return ERR_PTR(-ENOMEM);
+
+ /* Pass our mode to the connectors and the CRTC to give them a chance to
+ * adjust it according to limitations or connector properties, and also
+ * a chance to reject the mode entirely.
*/
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
- reg = DSPCNTR(plane);
- val = I915_READ(reg);
+ if (&encoder->new_crtc->base != crtc)
+ continue;
+ encoder_funcs = encoder->base.helper_private;
+ if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
+ adjusted_mode))) {
+ DRM_DEBUG_KMS("Encoder fixup failed\n");
+ goto fail;
+ }
+ }
- if ((val & DISPLAY_PLANE_ENABLE) == 0)
- return;
- if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
- return;
+ if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
+ DRM_DEBUG_KMS("CRTC fixup failed\n");
+ goto fail;
+ }
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
- /* This display plane is active and attached to the other CPU pipe. */
- pipe = !pipe;
+ return adjusted_mode;
+fail:
+ drm_mode_destroy(dev, adjusted_mode);
+ return ERR_PTR(-EINVAL);
+}
- /* Disable the plane and wait for it to stop reading from the pipe. */
- intel_disable_plane(dev_priv, plane, pipe);
- intel_disable_pipe(dev_priv, pipe);
+/* Computes which crtcs are affected and sets the relevant bits in the mask. For
+ * simplicity we use the crtc's pipe number (because it's easier to obtain). */
+static void
+intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
+ unsigned *prepare_pipes, unsigned *disable_pipes)
+{
+ struct intel_crtc *intel_crtc;
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_crtc *tmp_crtc;
+
+ *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
+
+ /* Check which crtcs have changed outputs connected to them, these need
+ * to be part of the prepare_pipes mask. We don't (yet) support global
+ * modeset across multiple crtcs, so modeset_pipes will only have one
+ * bit set at most. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->base.encoder == &connector->new_encoder->base)
+ continue;
+
+ if (connector->base.encoder) {
+ tmp_crtc = connector->base.encoder->crtc;
+
+ *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+ }
+
+ if (connector->new_encoder)
+ *prepare_pipes |=
+ 1 << connector->new_encoder->new_crtc->pipe;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->base.crtc == &encoder->new_crtc->base)
+ continue;
+
+ if (encoder->base.crtc) {
+ tmp_crtc = encoder->base.crtc;
+
+ *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+ }
+
+ if (encoder->new_crtc)
+ *prepare_pipes |= 1 << encoder->new_crtc->pipe;
+ }
+
+ /* Check for any pipes that will be fully disabled ... */
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ bool used = false;
+
+ /* Don't try to disable disabled crtcs. */
+ if (!intel_crtc->base.enabled)
+ continue;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->new_crtc == intel_crtc)
+ used = true;
+ }
+
+ if (!used)
+ *disable_pipes |= 1 << intel_crtc->pipe;
+ }
+
+
+ /* set_mode is also used to update properties on life display pipes. */
+ intel_crtc = to_intel_crtc(crtc);
+ if (crtc->enabled)
+ *prepare_pipes |= 1 << intel_crtc->pipe;
+
+ /* We only support modeset on one single crtc, hence we need to do that
+ * only for the passed in crtc iff we change anything else than just
+ * disable crtcs.
+ *
+ * This is actually not true, to be fully compatible with the old crtc
+ * helper we automatically disable _any_ output (i.e. doesn't need to be
+ * connected to the crtc we're modesetting on) if it's disconnected.
+ * Which is a rather nutty api (since changed the output configuration
+ * without userspace's explicit request can lead to confusion), but
+ * alas. Hence we currently need to modeset on all pipes we prepare. */
+ if (*prepare_pipes)
+ *modeset_pipes = *prepare_pipes;
+
+ /* ... and mask these out. */
+ *modeset_pipes &= ~(*disable_pipes);
+ *prepare_pipes &= ~(*disable_pipes);
}
-static void intel_crtc_reset(struct drm_crtc *crtc)
+static bool intel_crtc_in_use(struct drm_crtc *crtc)
{
+ struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- /* Reset flags back to the 'unknown' status so that they
- * will be correctly set on the initial modeset.
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc)
+ return true;
+
+ return false;
+}
+
+static void
+intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
+{
+ struct intel_encoder *intel_encoder;
+ struct intel_crtc *intel_crtc;
+ struct drm_connector *connector;
+
+ list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (!intel_encoder->base.crtc)
+ continue;
+
+ intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+
+ if (prepare_pipes & (1 << intel_crtc->pipe))
+ intel_encoder->connectors_active = false;
+ }
+
+ intel_modeset_commit_output_state(dev);
+
+ /* Update computed state. */
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder || !connector->encoder->crtc)
+ continue;
+
+ intel_crtc = to_intel_crtc(connector->encoder->crtc);
+
+ if (prepare_pipes & (1 << intel_crtc->pipe)) {
+ struct drm_property *dpms_property =
+ dev->mode_config.dpms_property;
+
+ connector->dpms = DRM_MODE_DPMS_ON;
+ drm_connector_property_set_value(connector,
+ dpms_property,
+ DRM_MODE_DPMS_ON);
+
+ intel_encoder = to_intel_encoder(connector->encoder);
+ intel_encoder->connectors_active = true;
+ }
+ }
+
+}
+
+#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
+ list_for_each_entry((intel_crtc), \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ if (mask & (1 <<(intel_crtc)->pipe)) \
+
+void
+intel_modeset_check_state(struct drm_device *dev)
+{
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ /* This also checks the encoder/connector hw state with the
+ * ->get_hw_state callbacks. */
+ intel_connector_check_state(connector);
+
+ WARN(&connector->new_encoder->base != connector->base.encoder,
+ "connector's staged encoder doesn't match current encoder\n");
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ bool enabled = false;
+ bool active = false;
+ enum pipe pipe, tracked_pipe;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+
+ WARN(&encoder->new_crtc->base != encoder->base.crtc,
+ "encoder's stage crtc doesn't match current crtc\n");
+ WARN(encoder->connectors_active && !encoder->base.crtc,
+ "encoder's active_connectors set, but no crtc\n");
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->base.encoder != &encoder->base)
+ continue;
+ enabled = true;
+ if (connector->base.dpms != DRM_MODE_DPMS_OFF)
+ active = true;
+ }
+ WARN(!!encoder->base.crtc != enabled,
+ "encoder's enabled state mismatch "
+ "(expected %i, found %i)\n",
+ !!encoder->base.crtc, enabled);
+ WARN(active && !encoder->base.crtc,
+ "active encoder with no crtc\n");
+
+ WARN(encoder->connectors_active != active,
+ "encoder's computed active state doesn't match tracked active state "
+ "(expected %i, found %i)\n", active, encoder->connectors_active);
+
+ active = encoder->get_hw_state(encoder, &pipe);
+ WARN(active != encoder->connectors_active,
+ "encoder's hw state doesn't match sw tracking "
+ "(expected %i, found %i)\n",
+ encoder->connectors_active, active);
+
+ if (!encoder->base.crtc)
+ continue;
+
+ tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+ WARN(active && pipe != tracked_pipe,
+ "active encoder's pipe doesn't match"
+ "(expected %i, found %i)\n",
+ tracked_pipe, pipe);
+
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ bool enabled = false;
+ bool active = false;
+
+ DRM_DEBUG_KMS("[CRTC:%d]\n",
+ crtc->base.base.id);
+
+ WARN(crtc->active && !crtc->base.enabled,
+ "active crtc, but not enabled in sw tracking\n");
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->base.crtc != &crtc->base)
+ continue;
+ enabled = true;
+ if (encoder->connectors_active)
+ active = true;
+ }
+ WARN(active != crtc->active,
+ "crtc's computed active state doesn't match tracked active state "
+ "(expected %i, found %i)\n", active, crtc->active);
+ WARN(enabled != crtc->base.enabled,
+ "crtc's computed enabled state doesn't match tracked enabled state "
+ "(expected %i, found %i)\n", enabled, crtc->base.enabled);
+
+ assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
+ }
+}
+
+bool intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_encoder *encoder;
+ struct intel_crtc *intel_crtc;
+ unsigned disable_pipes, prepare_pipes, modeset_pipes;
+ bool ret = true;
+
+ intel_modeset_affected_pipes(crtc, &modeset_pipes,
+ &prepare_pipes, &disable_pipes);
+
+ DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
+ modeset_pipes, prepare_pipes, disable_pipes);
+
+ for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
+ intel_crtc_disable(&intel_crtc->base);
+
+ saved_hwmode = crtc->hwmode;
+ saved_mode = crtc->mode;
+
+ /* Hack: Because we don't (yet) support global modeset on multiple
+ * crtcs, we don't keep track of the new mode for more than one crtc.
+ * Hence simply check whether any bit is set in modeset_pipes in all the
+ * pieces of code that are not yet converted to deal with mutliple crtcs
+ * changing their mode at the same time. */
+ adjusted_mode = NULL;
+ if (modeset_pipes) {
+ adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
+ if (IS_ERR(adjusted_mode)) {
+ return false;
+ }
+ }
+
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
+ if (intel_crtc->base.enabled)
+ dev_priv->display.crtc_disable(&intel_crtc->base);
+ }
+
+ /* crtc->mode is already used by the ->mode_set callbacks, hence we need
+ * to set it here already despite that we pass it down the callchain.
*/
- intel_crtc->dpms_mode = -1;
+ if (modeset_pipes)
+ crtc->mode = *mode;
- /* We need to fix up any BIOS configuration that conflicts with
- * our expectations.
+ /* Only after disabling all output pipelines that will be changed can we
+ * update the the output configuration. */
+ intel_modeset_update_state(dev, prepare_pipes);
+
+ /* Set up the DPLL and any encoders state that needs to adjust or depend
+ * on the DPLL.
*/
- intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
+ for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+ ret = !intel_crtc_mode_set(&intel_crtc->base,
+ mode, adjusted_mode,
+ x, y, fb);
+ if (!ret)
+ goto done;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != &intel_crtc->base)
+ continue;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.id, drm_get_encoder_name(encoder),
+ mode->base.id, mode->name);
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+ }
+ }
+
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
+ dev_priv->display.crtc_enable(&intel_crtc->base);
+
+ if (modeset_pipes) {
+ /* Store real post-adjustment hardware mode. */
+ crtc->hwmode = *adjusted_mode;
+
+ /* Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc);
+ }
+
+ /* FIXME: add subpixel order */
+done:
+ drm_mode_destroy(dev, adjusted_mode);
+ if (!ret && crtc->enabled) {
+ crtc->hwmode = saved_hwmode;
+ crtc->mode = saved_mode;
+ } else {
+ intel_modeset_check_state(dev);
+ }
+
+ return ret;
}
-static struct drm_crtc_helper_funcs intel_helper_funcs = {
- .dpms = intel_crtc_dpms,
- .mode_fixup = intel_crtc_mode_fixup,
- .mode_set = intel_crtc_mode_set,
- .mode_set_base = intel_pipe_set_base,
- .mode_set_base_atomic = intel_pipe_set_base_atomic,
- .load_lut = intel_crtc_load_lut,
- .disable = intel_crtc_disable,
-};
+#undef for_each_intel_crtc_masked
+
+static void intel_set_config_free(struct intel_set_config *config)
+{
+ if (!config)
+ return;
+
+ kfree(config->save_connector_encoders);
+ kfree(config->save_encoder_crtcs);
+ kfree(config);
+}
+
+static int intel_set_config_save_state(struct drm_device *dev,
+ struct intel_set_config *config)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int count;
+
+ config->save_encoder_crtcs =
+ kcalloc(dev->mode_config.num_encoder,
+ sizeof(struct drm_crtc *), GFP_KERNEL);
+ if (!config->save_encoder_crtcs)
+ return -ENOMEM;
+
+ config->save_connector_encoders =
+ kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_encoder *), GFP_KERNEL);
+ if (!config->save_connector_encoders)
+ return -ENOMEM;
+
+ /* Copy data. Note that driver private data is not affected.
+ * Should anything bad happen only the expected state is
+ * restored, not the drivers personal bookkeeping.
+ */
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ config->save_encoder_crtcs[count++] = encoder->crtc;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ config->save_connector_encoders[count++] = connector->encoder;
+ }
+
+ return 0;
+}
+
+static void intel_set_config_restore_state(struct drm_device *dev,
+ struct intel_set_config *config)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ int count;
+
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ encoder->new_crtc =
+ to_intel_crtc(config->save_encoder_crtcs[count++]);
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ connector->new_encoder =
+ to_intel_encoder(config->save_connector_encoders[count++]);
+ }
+}
+
+static void
+intel_set_config_compute_mode_changes(struct drm_mode_set *set,
+ struct intel_set_config *config)
+{
+
+ /* We should be able to check here if the fb has the same properties
+ * and then just flip_or_move it */
+ if (set->crtc->fb != set->fb) {
+ /* If we have no fb then treat it as a full mode set */
+ if (set->crtc->fb == NULL) {
+ DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+ config->mode_changed = true;
+ } else if (set->fb == NULL) {
+ config->mode_changed = true;
+ } else if (set->fb->depth != set->crtc->fb->depth) {
+ config->mode_changed = true;
+ } else if (set->fb->bits_per_pixel !=
+ set->crtc->fb->bits_per_pixel) {
+ config->mode_changed = true;
+ } else
+ config->fb_changed = true;
+ }
+
+ if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
+ config->fb_changed = true;
+
+ if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+ DRM_DEBUG_KMS("modes are different, full mode set\n");
+ drm_mode_debug_printmodeline(&set->crtc->mode);
+ drm_mode_debug_printmodeline(set->mode);
+ config->mode_changed = true;
+ }
+}
+
+static int
+intel_modeset_stage_output_state(struct drm_device *dev,
+ struct drm_mode_set *set,
+ struct intel_set_config *config)
+{
+ struct drm_crtc *new_crtc;
+ struct intel_connector *connector;
+ struct intel_encoder *encoder;
+ int count, ro;
+
+ /* The upper layers ensure that we either disabl a crtc or have a list
+ * of connectors. For paranoia, double-check this. */
+ WARN_ON(!set->fb && (set->num_connectors != 0));
+ WARN_ON(set->fb && (set->num_connectors == 0));
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ /* Otherwise traverse passed in connector list and get encoders
+ * for them. */
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == &connector->base) {
+ connector->new_encoder = connector->encoder;
+ break;
+ }
+ }
+
+ /* If we disable the crtc, disable all its connectors. Also, if
+ * the connector is on the changing crtc but not on the new
+ * connector list, disable it. */
+ if ((!set->fb || ro == set->num_connectors) &&
+ connector->base.encoder &&
+ connector->base.encoder->crtc == set->crtc) {
+ connector->new_encoder = NULL;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base));
+ }
+
+
+ if (&connector->new_encoder->base != connector->base.encoder) {
+ DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ config->mode_changed = true;
+ }
+
+ /* Disable all disconnected encoders. */
+ if (connector->base.status == connector_status_disconnected)
+ connector->new_encoder = NULL;
+ }
+ /* connector->new_encoder is now updated for all connectors. */
+
+ /* Update crtc of enabled connectors. */
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (!connector->new_encoder)
+ continue;
+
+ new_crtc = connector->new_encoder->base.crtc;
+
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == &connector->base)
+ new_crtc = set->crtc;
+ }
+
+ /* Make sure the new CRTC will work with the encoder */
+ if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
+ new_crtc)) {
+ return -EINVAL;
+ }
+ connector->encoder->new_crtc = to_intel_crtc(new_crtc);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base),
+ new_crtc->base.id);
+ }
+
+ /* Check for any encoders that needs to be disabled. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->new_encoder == encoder) {
+ WARN_ON(!connector->new_encoder->new_crtc);
+
+ goto next_encoder;
+ }
+ }
+ encoder->new_crtc = NULL;
+next_encoder:
+ /* Only now check for crtc changes so we don't miss encoders
+ * that will be disabled. */
+ if (&encoder->new_crtc->base != encoder->base.crtc) {
+ DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ config->mode_changed = true;
+ }
+ }
+ /* Now we've also updated encoder->new_crtc for all encoders. */
+
+ return 0;
+}
+
+static int intel_crtc_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct drm_mode_set save_set;
+ struct intel_set_config *config;
+ int ret;
+
+ BUG_ON(!set);
+ BUG_ON(!set->crtc);
+ BUG_ON(!set->crtc->helper_private);
+
+ if (!set->mode)
+ set->fb = NULL;
+
+ /* The fb helper likes to play gross jokes with ->mode_set_config.
+ * Unfortunately the crtc helper doesn't do much at all for this case,
+ * so we have to cope with this madness until the fb helper is fixed up. */
+ if (set->fb && set->num_connectors == 0)
+ return 0;
+
+ if (set->fb) {
+ DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+ }
+
+ dev = set->crtc->dev;
+
+ ret = -ENOMEM;
+ config = kzalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ goto out_config;
+
+ ret = intel_set_config_save_state(dev, config);
+ if (ret)
+ goto out_config;
+
+ save_set.crtc = set->crtc;
+ save_set.mode = &set->crtc->mode;
+ save_set.x = set->crtc->x;
+ save_set.y = set->crtc->y;
+ save_set.fb = set->crtc->fb;
+
+ /* Compute whether we need a full modeset, only an fb base update or no
+ * change at all. In the future we might also check whether only the
+ * mode changed, e.g. for LVDS where we only change the panel fitter in
+ * such cases. */
+ intel_set_config_compute_mode_changes(set, config);
+
+ ret = intel_modeset_stage_output_state(dev, set, config);
+ if (ret)
+ goto fail;
+
+ if (config->mode_changed) {
+ if (set->mode) {
+ DRM_DEBUG_KMS("attempting to set mode from"
+ " userspace\n");
+ drm_mode_debug_printmodeline(set->mode);
+ }
+
+ if (!intel_set_mode(set->crtc, set->mode,
+ set->x, set->y, set->fb)) {
+ DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+ set->crtc->base.id);
+ ret = -EINVAL;
+ goto fail;
+ }
+ } else if (config->fb_changed) {
+ ret = intel_pipe_set_base(set->crtc,
+ set->x, set->y, set->fb);
+ }
+
+ intel_set_config_free(config);
+
+ return 0;
+
+fail:
+ intel_set_config_restore_state(dev, config);
+
+ /* Try to restore the config */
+ if (config->mode_changed &&
+ !intel_set_mode(save_set.crtc, save_set.mode,
+ save_set.x, save_set.y, save_set.fb))
+ DRM_ERROR("failed to restore config after modeset failure\n");
+
+out_config:
+ intel_set_config_free(config);
+ return ret;
+}
static const struct drm_crtc_funcs intel_crtc_funcs = {
- .reset = intel_crtc_reset,
.cursor_set = intel_crtc_cursor_set,
.cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
+ .set_config = intel_crtc_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
};
@@ -6655,24 +7448,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
- intel_crtc_reset(&intel_crtc->base);
- intel_crtc->active = true; /* force the pipe off on setup_init_config */
intel_crtc->bpp = 24; /* default for pre-Ironlake */
- if (HAS_PCH_SPLIT(dev)) {
- intel_helper_funcs.prepare = ironlake_crtc_prepare;
- intel_helper_funcs.commit = ironlake_crtc_commit;
- } else {
- intel_helper_funcs.prepare = i9xx_crtc_prepare;
- intel_helper_funcs.commit = i9xx_crtc_commit;
- }
-
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
-
- intel_crtc->busy = false;
-
- setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
- (unsigned long)intel_crtc);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -6699,15 +7477,23 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
return 0;
}
-static int intel_encoder_clones(struct drm_device *dev, int type_mask)
+static int intel_encoder_clones(struct intel_encoder *encoder)
{
- struct intel_encoder *encoder;
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_encoder *source_encoder;
int index_mask = 0;
int entry = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
- if (type_mask & encoder->clone_mask)
+ list_for_each_entry(source_encoder,
+ &dev->mode_config.encoder_list, base.head) {
+
+ if (encoder == source_encoder)
index_mask |= (1 << entry);
+
+ /* Intel hw has only one MUX where enocoders could be cloned. */
+ if (encoder->cloneable && source_encoder->cloneable)
+ index_mask |= (1 << entry);
+
entry++;
}
@@ -6748,10 +7534,10 @@ static void intel_setup_outputs(struct drm_device *dev)
dpd_is_edp = intel_dpd_is_edp(dev);
if (has_edp_a(dev))
- intel_dp_init(dev, DP_A);
+ intel_dp_init(dev, DP_A, PORT_A);
if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_D);
+ intel_dp_init(dev, PCH_DP_D, PORT_D);
}
intel_crt_init(dev);
@@ -6782,22 +7568,22 @@ static void intel_setup_outputs(struct drm_device *dev)
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, PCH_SDVOB, true);
if (!found)
- intel_hdmi_init(dev, HDMIB);
+ intel_hdmi_init(dev, HDMIB, PORT_B);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_B);
+ intel_dp_init(dev, PCH_DP_B, PORT_B);
}
if (I915_READ(HDMIC) & PORT_DETECTED)
- intel_hdmi_init(dev, HDMIC);
+ intel_hdmi_init(dev, HDMIC, PORT_C);
if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
- intel_hdmi_init(dev, HDMID);
+ intel_hdmi_init(dev, HDMID, PORT_D);
if (I915_READ(PCH_DP_C) & DP_DETECTED)
- intel_dp_init(dev, PCH_DP_C);
+ intel_dp_init(dev, PCH_DP_C, PORT_C);
if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_D);
+ intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
int found;
@@ -6805,17 +7591,17 @@ static void intel_setup_outputs(struct drm_device *dev)
/* SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, SDVOB, true);
if (!found)
- intel_hdmi_init(dev, SDVOB);
+ intel_hdmi_init(dev, SDVOB, PORT_B);
if (!found && (I915_READ(DP_B) & DP_DETECTED))
- intel_dp_init(dev, DP_B);
+ intel_dp_init(dev, DP_B, PORT_B);
}
if (I915_READ(SDVOC) & PORT_DETECTED)
- intel_hdmi_init(dev, SDVOC);
+ intel_hdmi_init(dev, SDVOC, PORT_C);
/* Shares lanes with HDMI on SDVOC */
if (I915_READ(DP_C) & DP_DETECTED)
- intel_dp_init(dev, DP_C);
+ intel_dp_init(dev, DP_C, PORT_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -6824,12 +7610,12 @@ static void intel_setup_outputs(struct drm_device *dev)
found = intel_sdvo_init(dev, SDVOB, true);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
- intel_hdmi_init(dev, SDVOB);
+ intel_hdmi_init(dev, SDVOB, PORT_B);
}
if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_B\n");
- intel_dp_init(dev, DP_B);
+ intel_dp_init(dev, DP_B, PORT_B);
}
}
@@ -6844,18 +7630,18 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
- intel_hdmi_init(dev, SDVOC);
+ intel_hdmi_init(dev, SDVOC, PORT_C);
}
if (SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_C\n");
- intel_dp_init(dev, DP_C);
+ intel_dp_init(dev, DP_C, PORT_C);
}
}
if (SUPPORTS_INTEGRATED_DP(dev) &&
(I915_READ(DP_D) & DP_DETECTED)) {
DRM_DEBUG_KMS("probing DP_D\n");
- intel_dp_init(dev, DP_D);
+ intel_dp_init(dev, DP_D, PORT_D);
}
} else if (IS_GEN2(dev))
intel_dvo_init(dev);
@@ -6866,12 +7652,9 @@ static void intel_setup_outputs(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
- intel_encoder_clones(dev, encoder->clone_mask);
+ intel_encoder_clones(encoder);
}
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ironlake_init_pch_refclk(dev);
}
@@ -6973,13 +7756,15 @@ static void intel_init_display(struct drm_device *dev)
/* We always want a DPMS function */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.dpms = ironlake_crtc_dpms;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.crtc_enable = ironlake_crtc_enable;
+ dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
} else {
- dev_priv->display.dpms = i9xx_crtc_dpms;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_enable = i9xx_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_plane = i9xx_update_plane;
}
@@ -7023,7 +7808,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_HASWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.write_eld = haswell_write_eld;
} else
dev_priv->display.update_wm = NULL;
} else if (IS_G4X(dev)) {
@@ -7101,21 +7886,16 @@ static struct intel_quirk intel_quirks[] = {
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
- /* Thinkpad R31 needs pipe A force quirk */
- { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
- /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
- { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
- /* ThinkPad X40 needs pipe A force quirk */
-
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
/* 855 & before need to leave pipe A & dpll A up */
{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+ { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
/* Lenovo U160 cannot use SSC on LVDS */
{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
@@ -7231,10 +8011,251 @@ void intel_modeset_init(struct drm_device *dev)
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
+}
- INIT_WORK(&dev_priv->idle_work, intel_idle_update);
- setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
- (unsigned long)dev);
+static void
+intel_connector_break_all_links(struct intel_connector *connector)
+{
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ connector->encoder->connectors_active = false;
+ connector->encoder->base.crtc = NULL;
+}
+
+static void intel_enable_pipe_a(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+ struct drm_connector *crt = NULL;
+ struct intel_load_detect_pipe load_detect_temp;
+
+ /* We can't just switch on the pipe A, we need to set things up with a
+ * proper mode and output configuration. As a gross hack, enable pipe A
+ * by enabling the load detect pipe once. */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
+ crt = &connector->base;
+ break;
+ }
+ }
+
+ if (!crt)
+ return;
+
+ if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
+ intel_release_load_detect_pipe(crt, &load_detect_temp);
+
+
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg, val;
+
+ /* Clear any frame start delays used for debugging left by the BIOS */
+ reg = PIPECONF(crtc->pipe);
+ I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+
+ /* We need to sanitize the plane -> pipe mapping first because this will
+ * disable the crtc (and hence change the state) if it is wrong. */
+ if (!HAS_PCH_SPLIT(dev)) {
+ struct intel_connector *connector;
+ bool plane;
+
+ reg = DSPCNTR(crtc->plane);
+ val = I915_READ(reg);
+
+ if ((val & DISPLAY_PLANE_ENABLE) == 0 &&
+ (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
+ goto ok;
+
+ DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
+ crtc->base.base.id);
+
+ /* Pipe has the wrong plane attached and the plane is active.
+ * Temporarily change the plane mapping and disable everything
+ * ... */
+ plane = crtc->plane;
+ crtc->plane = !plane;
+ dev_priv->display.crtc_disable(&crtc->base);
+ crtc->plane = plane;
+
+ /* ... and break all links. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder->base.crtc != &crtc->base)
+ continue;
+
+ intel_connector_break_all_links(connector);
+ }
+
+ WARN_ON(crtc->active);
+ crtc->base.enabled = false;
+ }
+ok:
+
+ if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+ crtc->pipe == PIPE_A && !crtc->active) {
+ /* BIOS forgot to enable pipe A, this mostly happens after
+ * resume. Force-enable the pipe to fix this, the update_dpms
+ * call below we restore the pipe to the right state, but leave
+ * the required bits on. */
+ intel_enable_pipe_a(dev);
+ }
+
+ /* Adjust the state of the output pipe according to whether we
+ * have active connectors/encoders. */
+ intel_crtc_update_dpms(&crtc->base);
+
+ if (crtc->active != crtc->base.enabled) {
+ struct intel_encoder *encoder;
+
+ /* This can happen either due to bugs in the get_hw_state
+ * functions or because the pipe is force-enabled due to the
+ * pipe A quirk. */
+ DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
+ crtc->base.base.id,
+ crtc->base.enabled ? "enabled" : "disabled",
+ crtc->active ? "enabled" : "disabled");
+
+ crtc->base.enabled = crtc->active;
+
+ /* Because we only establish the connector -> encoder ->
+ * crtc links if something is active, this means the
+ * crtc is now deactivated. Break the links. connector
+ * -> encoder links are only establish when things are
+ * actually up, hence no need to break them. */
+ WARN_ON(crtc->active);
+
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+ WARN_ON(encoder->connectors_active);
+ encoder->base.crtc = NULL;
+ }
+ }
+}
+
+static void intel_sanitize_encoder(struct intel_encoder *encoder)
+{
+ struct intel_connector *connector;
+ struct drm_device *dev = encoder->base.dev;
+
+ /* We need to check both for a crtc link (meaning that the
+ * encoder is active and trying to read from a pipe) and the
+ * pipe itself being active. */
+ bool has_active_crtc = encoder->base.crtc &&
+ to_intel_crtc(encoder->base.crtc)->active;
+
+ if (encoder->connectors_active && !has_active_crtc) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+
+ /* Connector is active, but has no active pipe. This is
+ * fallout from our resume register restoring. Disable
+ * the encoder manually again. */
+ if (encoder->base.crtc) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+ encoder->disable(encoder);
+ }
+
+ /* Inconsistent output/port/pipe state happens presumably due to
+ * a bug in one of the get_hw_state functions. Or someplace else
+ * in our code, like the register restore mess on resume. Clamp
+ * things to off as a safer default. */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ intel_connector_break_all_links(connector);
+ }
+ }
+ /* Enabled encoders without active connectors will be fixed in
+ * the crtc fixup. */
+}
+
+/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
+ * and i915 state tracking structures. */
+void intel_modeset_setup_hw_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ u32 tmp;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ tmp = I915_READ(PIPECONF(pipe));
+ if (tmp & PIPECONF_ENABLE)
+ crtc->active = true;
+ else
+ crtc->active = false;
+
+ crtc->base.enabled = crtc->active;
+
+ DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
+ crtc->base.base.id,
+ crtc->active ? "enabled" : "disabled");
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ pipe = 0;
+
+ if (encoder->get_hw_state(encoder, &pipe)) {
+ encoder->base.crtc =
+ dev_priv->pipe_to_crtc_mapping[pipe];
+ } else {
+ encoder->base.crtc = NULL;
+ }
+
+ encoder->connectors_active = false;
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base),
+ encoder->base.crtc ? "enabled" : "disabled",
+ pipe);
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->get_hw_state(connector)) {
+ connector->base.dpms = DRM_MODE_DPMS_ON;
+ connector->encoder->connectors_active = true;
+ connector->base.encoder = &connector->encoder->base;
+ } else {
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ }
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base),
+ connector->base.encoder ? "enabled" : "disabled");
+ }
+
+ /* HW state is read out, now we need to sanitize this mess. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ intel_sanitize_encoder(encoder);
+ }
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ intel_sanitize_crtc(crtc);
+ }
+
+ intel_modeset_update_staged_output_state(dev);
+
+ intel_modeset_check_state(dev);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -7242,6 +8263,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
+
+ intel_modeset_setup_hw_state(dev);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -7280,19 +8303,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
* enqueue unpin/hotplug work. */
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
- cancel_work_sync(&dev_priv->rps_work);
+ cancel_work_sync(&dev_priv->rps.work);
/* flush any delayed tasks or pending work */
flush_scheduled_work();
- /* Shut off idle work before the crtcs get freed. */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- intel_crtc = to_intel_crtc(crtc);
- del_timer_sync(&intel_crtc->idle_timer);
- }
- del_timer_sync(&dev_priv->idle_timer);
- cancel_work_sync(&dev_priv->idle_work);
-
drm_mode_config_cleanup(dev);
}
@@ -7338,7 +8353,7 @@ struct intel_display_error_state {
u32 position;
u32 base;
u32 size;
- } cursor[2];
+ } cursor[I915_MAX_PIPES];
struct intel_pipe_error_state {
u32 conf;
@@ -7350,7 +8365,7 @@ struct intel_display_error_state {
u32 vtotal;
u32 vblank;
u32 vsync;
- } pipe[2];
+ } pipe[I915_MAX_PIPES];
struct intel_plane_error_state {
u32 control;
@@ -7360,7 +8375,7 @@ struct intel_display_error_state {
u32 addr;
u32 surface;
u32 tile_offset;
- } plane[2];
+ } plane[I915_MAX_PIPES];
};
struct intel_display_error_state *
@@ -7374,7 +8389,7 @@ intel_display_capture_error_state(struct drm_device *dev)
if (error == NULL)
return NULL;
- for (i = 0; i < 2; i++) {
+ for_each_pipe(i) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
error->cursor[i].base = I915_READ(CURBASE(i));
@@ -7407,9 +8422,11 @@ intel_display_print_error_state(struct seq_file *m,
struct drm_device *dev,
struct intel_display_error_state *error)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
int i;
- for (i = 0; i < 2; i++) {
+ seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
+ for_each_pipe(i) {
seq_printf(m, "Pipe [%d]:\n", i);
seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f1bd4f4cd667..6c8746c030c7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -35,42 +35,10 @@
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include <drm/drm_dp_helper.h>
-#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
-#define DP_LINK_CONFIGURATION_SIZE 9
-
-struct intel_dp {
- struct intel_encoder base;
- uint32_t output_reg;
- uint32_t DP;
- uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
- bool has_audio;
- enum hdmi_force_audio force_audio;
- uint32_t color_range;
- int dpms_mode;
- uint8_t link_bw;
- uint8_t lane_count;
- uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
- struct i2c_adapter adapter;
- struct i2c_algo_dp_aux_data algo;
- bool is_pch_edp;
- uint8_t train_set[4];
- int panel_power_up_delay;
- int panel_power_down_delay;
- int panel_power_cycle_delay;
- int backlight_on_delay;
- int backlight_off_delay;
- struct drm_display_mode *panel_fixed_mode; /* for eDP */
- struct delayed_work panel_vdd_work;
- bool want_panel_vdd;
- struct edid *edid; /* cached EDID for eDP */
- int edid_mode_count;
-};
-
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
@@ -839,9 +807,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
}
}
-static void ironlake_edp_pll_on(struct drm_encoder *encoder);
-static void ironlake_edp_pll_off(struct drm_encoder *encoder);
-
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -852,14 +817,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- /* Turn on the eDP PLL if needed */
- if (is_edp(intel_dp)) {
- if (!is_pch_edp(intel_dp))
- ironlake_edp_pll_on(encoder);
- else
- ironlake_edp_pll_off(encoder);
- }
-
/*
* There are four kinds of DP registers:
*
@@ -881,10 +838,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
* supposed to be read-only.
*/
intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
- intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
/* Handle DP bits in common between all three register formats */
-
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
switch (intel_dp->lane_count) {
@@ -931,7 +886,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= intel_crtc->pipe << 29;
/* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
@@ -953,7 +907,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (is_cpu_edp(intel_dp)) {
/* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
@@ -1224,27 +1177,49 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
msleep(intel_dp->backlight_off_delay);
}
-static void ironlake_edp_pll_on(struct drm_encoder *encoder)
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
- dpa_ctl |= DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
+ WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We don't adjust intel_dp->DP while tearing down the link, to
+ * facilitate link retraining (e.g. after hotplug). Hence clear all
+ * enable bits here to ensure that we don't enable too much. */
+ intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_dp->DP |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, intel_dp->DP);
POSTING_READ(DP_A);
udelay(200);
}
-static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
dpa_ctl = I915_READ(DP_A);
+ WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
+ "dp pll off, should be on\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We can't rely on the value tracked for the DP register in
+ * intel_dp->DP because link_down must not change that (otherwise link
+ * re-training will fail. */
dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
POSTING_READ(DP_A);
@@ -1281,10 +1256,57 @@ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
}
}
-static void intel_dp_prepare(struct drm_encoder *encoder)
+static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ(intel_dp->output_reg);
+ if (!(tmp & DP_PORT_EN))
+ return false;
+
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+ *pipe = PORT_TO_PIPE(tmp);
+ } else {
+ u32 trans_sel;
+ u32 trans_dp;
+ int i;
+
+ switch (intel_dp->output_reg) {
+ case PCH_DP_B:
+ trans_sel = TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ trans_sel = TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ trans_sel = TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ return true;
+ }
+
+ for_each_pipe(i) {
+ trans_dp = I915_READ(TRANS_DP_CTL(i));
+ if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
+ *pipe = i;
+ return true;
+ }
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
+
+ return true;
+}
+
+static void intel_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
@@ -1292,14 +1314,31 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
ironlake_edp_panel_off(intel_dp);
- intel_dp_link_down(intel_dp);
+
+ /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
+ if (!is_cpu_edp(intel_dp))
+ intel_dp_link_down(intel_dp);
}
-static void intel_dp_commit(struct drm_encoder *encoder)
+static void intel_post_disable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = encoder->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (is_cpu_edp(intel_dp)) {
+ intel_dp_link_down(intel_dp);
+ ironlake_edp_pll_off(intel_dp);
+ }
+}
+
+static void intel_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+ if (WARN_ON(dp_reg & DP_PORT_EN))
+ return;
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@@ -1308,47 +1347,14 @@ static void intel_dp_commit(struct drm_encoder *encoder)
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
-
- intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
-
- if (HAS_PCH_CPT(dev))
- intel_cpt_verify_modeset(dev, intel_crtc->pipe);
}
-static void
-intel_dp_dpms(struct drm_encoder *encoder, int mode)
+static void intel_pre_enable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- if (mode != DRM_MODE_DPMS_ON) {
- /* Switching the panel off requires vdd. */
- ironlake_edp_panel_vdd_on(intel_dp);
- ironlake_edp_backlight_off(intel_dp);
- intel_dp_sink_dpms(intel_dp, mode);
- ironlake_edp_panel_off(intel_dp);
- intel_dp_link_down(intel_dp);
-
- if (is_cpu_edp(intel_dp))
- ironlake_edp_pll_off(encoder);
- } else {
- if (is_cpu_edp(intel_dp))
- ironlake_edp_pll_on(encoder);
-
- ironlake_edp_panel_vdd_on(intel_dp);
- intel_dp_sink_dpms(intel_dp, mode);
- if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_start_link_train(intel_dp);
- ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, true);
- intel_dp_complete_link_train(intel_dp);
- } else
- ironlake_edp_panel_vdd_off(intel_dp, false);
- ironlake_edp_backlight_on(intel_dp);
- }
- intel_dp->dpms_mode = mode;
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_on(intel_dp);
}
/*
@@ -1667,6 +1673,45 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+ dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
+
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ DRM_ERROR("DP training pattern 3 not supported\n");
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ }
+
+ } else {
+ dp_reg_value &= ~DP_LINK_TRAIN_MASK;
+
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dp_reg_value |= DP_LINK_TRAIN_OFF;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ DRM_ERROR("DP training pattern 3 not supported\n");
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ break;
+ }
+ }
+
I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg);
@@ -1674,12 +1719,15 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
- ret = intel_dp_aux_native_write(intel_dp,
- DP_TRAINING_LANE0_SET,
- intel_dp->train_set,
- intel_dp->lane_count);
- if (ret != intel_dp->lane_count)
- return false;
+ if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
+ DP_TRAINING_PATTERN_DISABLE) {
+ ret = intel_dp_aux_native_write(intel_dp,
+ DP_TRAINING_LANE0_SET,
+ intel_dp->train_set,
+ intel_dp->lane_count);
+ if (ret != intel_dp->lane_count)
+ return false;
+ }
return true;
}
@@ -1689,26 +1737,12 @@ static void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
int i;
uint8_t voltage;
bool clock_recovery = false;
int voltage_tries, loop_tries;
- u32 reg;
uint32_t DP = intel_dp->DP;
- /*
- * On CPT we have to enable the port in training pattern 1, which
- * will happen below in intel_dp_set_link_train. Otherwise, enable
- * the port and wait for it to become active.
- */
- if (!HAS_PCH_CPT(dev)) {
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- }
-
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
intel_dp->link_configuration,
@@ -1716,10 +1750,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- DP &= ~DP_LINK_TRAIN_MASK_CPT;
- else
- DP &= ~DP_LINK_TRAIN_MASK;
memset(intel_dp->train_set, 0, 4);
voltage = 0xff;
voltage_tries = 0;
@@ -1743,12 +1773,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
- else
- reg = DP | DP_LINK_TRAIN_PAT_1;
-
- if (!intel_dp_set_link_train(intel_dp, reg,
+ if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE))
break;
@@ -1803,10 +1828,8 @@ static void
intel_dp_complete_link_train(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
bool channel_eq = false;
int tries, cr_tries;
- u32 reg;
uint32_t DP = intel_dp->DP;
/* channel equalization */
@@ -1835,13 +1858,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
- else
- reg = DP | DP_LINK_TRAIN_PAT_2;
-
/* channel eq pattern */
- if (!intel_dp_set_link_train(intel_dp, reg,
+ if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_2 |
DP_LINK_SCRAMBLING_DISABLE))
break;
@@ -1876,15 +1894,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- reg = DP | DP_LINK_TRAIN_OFF_CPT;
- else
- reg = DP | DP_LINK_TRAIN_OFF;
-
- I915_WRITE(intel_dp->output_reg, reg);
- POSTING_READ(intel_dp->output_reg);
- intel_dp_aux_native_write_1(intel_dp,
- DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+ intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
}
static void
@@ -1894,18 +1904,11 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
- if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
return;
DRM_DEBUG_KMS("\n");
- if (is_edp(intel_dp)) {
- DP &= ~DP_PLL_ENABLE;
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
- udelay(100);
- }
-
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
@@ -1917,13 +1920,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(17);
- if (is_edp(intel_dp)) {
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- DP |= DP_LINK_TRAIN_OFF_CPT;
- else
- DP |= DP_LINK_TRAIN_OFF;
- }
-
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct drm_crtc *crtc = intel_dp->base.base.crtc;
@@ -2032,10 +2028,10 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
- if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
+ if (!intel_dp->base.connectors_active)
return;
- if (!intel_dp->base.base.crtc)
+ if (WARN_ON(!intel_dp->base.base.crtc))
return;
/* Try to read receiver status if the link appears to be up */
@@ -2159,7 +2155,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
ret = drm_add_edid_modes(connector, intel_dp->edid);
drm_edid_to_eld(connector,
intel_dp->edid);
- connector->display_info.raw_edid = NULL;
return intel_dp->edid_mode_count;
}
@@ -2205,7 +2200,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
intel_dp->has_audio = drm_detect_monitor_audio(edid);
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
}
@@ -2270,8 +2264,6 @@ intel_dp_detect_audio(struct drm_connector *connector)
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
has_audio = drm_detect_monitor_audio(edid);
-
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -2325,9 +2317,8 @@ intel_dp_set_property(struct drm_connector *connector,
done:
if (intel_dp->base.base.crtc) {
struct drm_crtc *crtc = intel_dp->base.base.crtc;
- drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y,
- crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
return 0;
@@ -2361,15 +2352,13 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
- .dpms = intel_dp_dpms,
.mode_fixup = intel_dp_mode_fixup,
- .prepare = intel_dp_prepare,
.mode_set = intel_dp_mode_set,
- .commit = intel_dp_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
@@ -2440,7 +2429,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
}
void
-intel_dp_init(struct drm_device *dev, int output_reg)
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
@@ -2455,7 +2444,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
return;
intel_dp->output_reg = output_reg;
- intel_dp->dpms_mode = -1;
+ intel_dp->port = port;
+ /* Preserve the current hw state. */
+ intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
@@ -2482,18 +2473,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
connector->polled = DRM_CONNECTOR_POLL_HPD;
- if (output_reg == DP_B || output_reg == PCH_DP_B)
- intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
- else if (output_reg == DP_C || output_reg == PCH_DP_C)
- intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
- else if (output_reg == DP_D || output_reg == PCH_DP_D)
- intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
+ intel_encoder->cloneable = false;
- if (is_edp(intel_dp)) {
- intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
- INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
- ironlake_panel_vdd_work);
- }
+ INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+ ironlake_panel_vdd_work);
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
@@ -2507,29 +2490,33 @@ intel_dp_init(struct drm_device *dev, int output_reg)
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
+ intel_encoder->enable = intel_enable_dp;
+ intel_encoder->pre_enable = intel_pre_enable_dp;
+ intel_encoder->disable = intel_disable_dp;
+ intel_encoder->post_disable = intel_post_disable_dp;
+ intel_encoder->get_hw_state = intel_dp_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
/* Set up the DDC bus. */
- switch (output_reg) {
- case DP_A:
- name = "DPDDC-A";
- break;
- case DP_B:
- case PCH_DP_B:
- dev_priv->hotplug_supported_mask |=
- DPB_HOTPLUG_INT_STATUS;
- name = "DPDDC-B";
- break;
- case DP_C:
- case PCH_DP_C:
- dev_priv->hotplug_supported_mask |=
- DPC_HOTPLUG_INT_STATUS;
- name = "DPDDC-C";
- break;
- case DP_D:
- case PCH_DP_D:
- dev_priv->hotplug_supported_mask |=
- DPD_HOTPLUG_INT_STATUS;
- name = "DPDDC-D";
- break;
+ switch (port) {
+ case PORT_A:
+ name = "DPDDC-A";
+ break;
+ case PORT_B:
+ dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
+ name = "DPDDC-B";
+ break;
+ case PORT_C:
+ dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
+ name = "DPDDC-C";
+ break;
+ case PORT_D:
+ dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
+ name = "DPDDC-D";
+ break;
+ default:
+ WARN(1, "Invalid port %c\n", port_name(port));
+ break;
}
/* Cache some DPCD data in the eDP case */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7db849052a98..05cc7c372fc5 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -31,6 +31,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_dp_helper.h>
#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
@@ -40,7 +41,11 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W && drm_can_sleep()) msleep(W); \
+ if (W && drm_can_sleep()) { \
+ msleep(W); \
+ } else { \
+ cpu_relax(); \
+ } \
} \
ret__; \
})
@@ -90,25 +95,6 @@
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
-/* Intel Pipe Clone Bit */
-#define INTEL_HDMIB_CLONE_BIT 1
-#define INTEL_HDMIC_CLONE_BIT 2
-#define INTEL_HDMID_CLONE_BIT 3
-#define INTEL_HDMIE_CLONE_BIT 4
-#define INTEL_HDMIF_CLONE_BIT 5
-#define INTEL_SDVO_NON_TV_CLONE_BIT 6
-#define INTEL_SDVO_TV_CLONE_BIT 7
-#define INTEL_SDVO_LVDS_CLONE_BIT 8
-#define INTEL_ANALOG_CLONE_BIT 9
-#define INTEL_TV_CLONE_BIT 10
-#define INTEL_DP_B_CLONE_BIT 11
-#define INTEL_DP_C_CLONE_BIT 12
-#define INTEL_DP_D_CLONE_BIT 13
-#define INTEL_LVDS_CLONE_BIT 14
-#define INTEL_DVO_TMDS_CLONE_BIT 15
-#define INTEL_DVO_LVDS_CLONE_BIT 16
-#define INTEL_EDP_CLONE_BIT 17
-
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2
@@ -151,16 +137,48 @@ struct intel_fbdev {
struct intel_encoder {
struct drm_encoder base;
+ /*
+ * The new crtc this encoder will be driven from. Only differs from
+ * base->crtc while a modeset is in progress.
+ */
+ struct intel_crtc *new_crtc;
+
int type;
bool needs_tv_clock;
+ /*
+ * Intel hw has only one MUX where encoders could be clone, hence a
+ * simple flag is enough to compute the possible_clones mask.
+ */
+ bool cloneable;
+ bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
+ void (*pre_enable)(struct intel_encoder *);
+ void (*enable)(struct intel_encoder *);
+ void (*disable)(struct intel_encoder *);
+ void (*post_disable)(struct intel_encoder *);
+ /* Read out the current hw state of this connector, returning true if
+ * the encoder is active. If the encoder is enabled it also set the pipe
+ * it is connected to in the pipe parameter. */
+ bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
int crtc_mask;
- int clone_mask;
};
struct intel_connector {
struct drm_connector base;
+ /*
+ * The fixed encoder this connector is connected to.
+ */
struct intel_encoder *encoder;
+
+ /*
+ * The new encoder this connector will be driven. Only differs from
+ * encoder while a modeset is in progress.
+ */
+ struct intel_encoder *new_encoder;
+
+ /* Reads out the current hw, returning true if the connector is enabled
+ * and active (i.e. dpms ON state). */
+ bool (*get_hw_state)(struct intel_connector *);
};
struct intel_crtc {
@@ -168,11 +186,13 @@ struct intel_crtc {
enum pipe pipe;
enum plane plane;
u8 lut_r[256], lut_g[256], lut_b[256];
- int dpms_mode;
- bool active; /* is the crtc on? independent of the dpms mode */
+ /*
+ * Whether the crtc and the connected output pipeline is active. Implies
+ * that crtc->enabled is set, i.e. the current mode configuration has
+ * some outputs connected to this crtc.
+ */
+ bool active;
bool primary_disabled; /* is the crtc obscured by a plane? */
- bool busy; /* is scanout buffer being updated frequently? */
- struct timer_list idle_timer;
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
@@ -311,6 +331,37 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode);
};
+#define DP_RECEIVER_CAP_SIZE 0xf
+#define DP_LINK_CONFIGURATION_SIZE 9
+
+struct intel_dp {
+ struct intel_encoder base;
+ uint32_t output_reg;
+ uint32_t DP;
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ enum port port;
+ uint32_t color_range;
+ uint8_t link_bw;
+ uint8_t lane_count;
+ uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+ struct i2c_adapter adapter;
+ struct i2c_algo_dp_aux_data algo;
+ bool is_pch_edp;
+ uint8_t train_set[4];
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct drm_display_mode *panel_fixed_mode; /* for eDP */
+ struct delayed_work panel_vdd_work;
+ bool want_panel_vdd;
+ struct edid *edid; /* cached EDID for eDP */
+ int edid_mode_count;
+};
+
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
@@ -350,17 +401,21 @@ extern void intel_attach_force_audio_property(struct drm_connector *connector);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
extern void intel_crt_init(struct drm_device *dev);
-extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
+extern void intel_hdmi_init(struct drm_device *dev,
+ int sdvox_reg, enum port port);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev,
- struct drm_i915_gem_object *obj);
+extern void intel_mark_busy(struct drm_device *dev);
+extern void intel_mark_idle(struct drm_device *dev);
+extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
+extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev);
-extern void intel_dp_init(struct drm_device *dev, int dp_reg);
+extern void intel_dp_init(struct drm_device *dev, int output_reg,
+ enum port port);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
@@ -373,8 +428,6 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
-void intel_sanitize_pm(struct drm_device *dev);
-
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
@@ -391,10 +444,27 @@ extern void intel_panel_disable_backlight(struct drm_device *dev);
extern void intel_panel_destroy_backlight(struct drm_device *dev);
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+struct intel_set_config {
+ struct drm_encoder **save_connector_encoders;
+ struct drm_crtc **save_encoder_crtcs;
+
+ bool fb_changed;
+ bool mode_changed;
+};
+
+extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *old_fb);
+extern void intel_modeset_disable(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void intel_encoder_prepare(struct drm_encoder *encoder);
-extern void intel_encoder_commit(struct drm_encoder *encoder);
+extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
+extern void intel_encoder_noop(struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
+extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
+extern void intel_connector_dpms(struct drm_connector *, int mode);
+extern bool intel_connector_get_hw_state(struct intel_connector *connector);
+extern void intel_modeset_check_state(struct drm_device *dev);
+
static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
{
@@ -417,12 +487,10 @@ struct intel_load_detect_pipe {
bool load_detect_temp;
int dpms_mode;
};
-extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old);
-extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+extern void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
extern void intelfb_restore(void);
@@ -503,7 +571,10 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev);
-extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
+extern void intel_enable_ddi(struct intel_encoder *encoder);
+extern void intel_disable_ddi(struct intel_encoder *encoder);
+extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe);
extern void intel_ddi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ac9f2dd5648a..15da99533e5b 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -36,6 +36,7 @@
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38
+#define NS2501_ADDR 0x38
static const struct intel_dvo_device intel_dvo_devices[] = {
{
@@ -73,7 +74,14 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.slave_addr = 0x75,
.gpio = GMBUS_PORT_DPB,
.dev_ops = &ch7017_ops,
- }
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "ns2501",
+ .dvo_reg = DVOC,
+ .slave_addr = NS2501_ADDR,
+ .dev_ops = &ns2501_ops,
+ }
};
struct intel_dvo {
@@ -96,22 +104,91 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
struct intel_dvo, base);
}
-static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+
+ return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
+}
+
+static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+ if (!(tmp & DVO_ENABLE))
+ return false;
+
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_disable_dvo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 dvo_reg = intel_dvo->dev.dvo_reg;
+ u32 temp = I915_READ(dvo_reg);
+
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+ I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
+ I915_READ(dvo_reg);
+}
+
+static void intel_enable_dvo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
+ I915_WRITE(dvo_reg, temp | DVO_ENABLE);
+ I915_READ(dvo_reg);
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+}
+
+static void intel_dvo_dpms(struct drm_connector *connector, int mode)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_crtc *crtc;
+
+ /* dvo supports only 2 dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = intel_dvo->base.base.crtc;
+ if (!crtc) {
+ intel_dvo->base.connectors_active = false;
+ return;
+ }
+
if (mode == DRM_MODE_DPMS_ON) {
- I915_WRITE(dvo_reg, temp | DVO_ENABLE);
- I915_READ(dvo_reg);
- intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
+ intel_dvo->base.connectors_active = true;
+
+ intel_crtc_update_dpms(crtc);
+
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
} else {
- intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
- I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
- I915_READ(dvo_reg);
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+
+ intel_dvo->base.connectors_active = false;
+
+ intel_crtc_update_dpms(crtc);
}
+
+ intel_modeset_check_state(connector->dev);
}
static int intel_dvo_mode_valid(struct drm_connector *connector,
@@ -266,15 +343,13 @@ static void intel_dvo_destroy(struct drm_connector *connector)
}
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
- .dpms = intel_dvo_dpms,
.mode_fixup = intel_dvo_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_dvo_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_dvo_dpms,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -363,6 +438,11 @@ void intel_dvo_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->base,
&intel_dvo_enc_funcs, encoder_type);
+ intel_encoder->disable = intel_disable_dvo;
+ intel_encoder->enable = intel_enable_dvo;
+ intel_encoder->get_hw_state = intel_dvo_get_hw_state;
+ intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
struct drm_connector *connector = &intel_connector->base;
@@ -395,17 +475,14 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
- intel_encoder->clone_mask =
- (1 << INTEL_DVO_TMDS_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
+ intel_encoder->cloneable = true;
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
case INTEL_DVO_CHIP_LVDS:
- intel_encoder->clone_mask =
- (1 << INTEL_DVO_LVDS_CLONE_BIT);
+ intel_encoder->cloneable = false;
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 025be7dd2a27..9ba0aaed7ee8 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -150,6 +150,9 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VIDEO_DIP_DATA, 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -185,6 +188,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -223,6 +229,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -258,6 +267,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -291,6 +303,9 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(data_reg + i, *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(data_reg + i, 0);
mmiowb();
val |= hsw_infoframe_enable(frame);
@@ -376,6 +391,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
port = VIDEO_DIP_PORT_C;
break;
default:
+ BUG();
return;
}
@@ -434,6 +450,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
port = VIDEO_DIP_PORT_D;
break;
default:
+ BUG();
return;
}
@@ -600,15 +617,36 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
-static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_hdmi->sdvox_reg);
+
+ if (!(tmp & SDVO_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_enable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
u32 enable_bits = SDVO_ENABLE;
- if (intel_hdmi->has_audio || mode != DRM_MODE_DPMS_ON)
+ if (intel_hdmi->has_audio)
enable_bits |= SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->sdvox_reg);
@@ -616,31 +654,12 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
/* HW workaround for IBX, we need to move the port to transcoder A
* before disabling it. */
if (HAS_PCH_IBX(dev)) {
- struct drm_crtc *crtc = encoder->crtc;
+ struct drm_crtc *crtc = encoder->base.crtc;
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
- if (mode != DRM_MODE_DPMS_ON) {
- if (temp & SDVO_PIPE_B_SELECT) {
- temp &= ~SDVO_PIPE_B_SELECT;
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
-
- /* Again we need to write this twice. */
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
-
- /* Transcoder selection bits only update
- * effectively on vblank. */
- if (crtc)
- intel_wait_for_vblank(dev, pipe);
- else
- msleep(50);
- }
- } else {
- /* Restore the transcoder select bit. */
- if (pipe == PIPE_B)
- enable_bits |= SDVO_PIPE_B_SELECT;
- }
+ /* Restore the transcoder select bit. */
+ if (pipe == PIPE_B)
+ enable_bits |= SDVO_PIPE_B_SELECT;
}
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
@@ -651,12 +670,64 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
POSTING_READ(intel_hdmi->sdvox_reg);
}
- if (mode != DRM_MODE_DPMS_ON) {
- temp &= ~enable_bits;
- } else {
- temp |= enable_bits;
+ temp |= enable_bits;
+
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* HW workaround, need to write this twice for issue that may result
+ * in first write getting masked.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+ }
+}
+
+static void intel_disable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 temp;
+ u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
+
+ temp = I915_READ(intel_hdmi->sdvox_reg);
+
+ /* HW workaround for IBX, we need to move the port to transcoder A
+ * before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(dev, pipe);
+ else
+ msleep(50);
+ }
+ }
+
+ /* HW workaround, need to toggle enable bit off and on for 12bpc, but
+ * we do this anyway which shows more stable in testing.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(intel_hdmi->sdvox_reg);
}
+ temp &= ~enable_bits;
+
I915_WRITE(intel_hdmi->sdvox_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg);
@@ -736,7 +807,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
}
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -777,8 +847,6 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
-
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -832,9 +900,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
done:
if (intel_hdmi->base.base.crtc) {
struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
- drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y,
- crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
return 0;
@@ -848,23 +915,19 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
- .dpms = intel_ddi_dpms,
.mode_fixup = intel_hdmi_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_ddi_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
- .dpms = intel_hdmi_dpms,
.mode_fixup = intel_hdmi_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_hdmi_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_hdmi_set_property,
@@ -888,7 +951,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_broadcast_rgb_property(connector);
}
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
@@ -922,48 +985,25 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- /* Set up the DDC bus. */
- if (sdvox_reg == SDVOB) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == SDVOC) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == HDMIB) {
- intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == HDMIC) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == HDMID) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
- dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
- DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
- intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+ intel_encoder->cloneable = false;
+
+ intel_hdmi->ddi_port = port;
+ switch (port) {
+ case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- intel_hdmi->ddi_port = PORT_B;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
- DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
- intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+ break;
+ case PORT_C:
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- intel_hdmi->ddi_port = PORT_C;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
- DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
- intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+ break;
+ case PORT_D:
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
- intel_hdmi->ddi_port = PORT_D;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
- } else {
- /* If we got an unknown sdvox_reg, things are pretty much broken
- * in a way that we should let the kernel know about it */
+ break;
+ case PORT_A:
+ /* Internal port only for eDP. */
+ default:
BUG();
}
@@ -986,10 +1026,21 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_hdmi->set_infoframes = cpt_set_infoframes;
}
- if (IS_HASWELL(dev))
- drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw);
- else
- drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+ if (IS_HASWELL(dev)) {
+ intel_encoder->enable = intel_enable_ddi;
+ intel_encoder->disable = intel_disable_ddi;
+ intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+ drm_encoder_helper_add(&intel_encoder->base,
+ &intel_hdmi_helper_funcs_hsw);
+ } else {
+ intel_encoder->enable = intel_enable_hdmi;
+ intel_encoder->disable = intel_disable_hdmi;
+ intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+ drm_encoder_helper_add(&intel_encoder->base,
+ &intel_hdmi_helper_funcs);
+ }
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
intel_hdmi_add_properties(intel_hdmi, connector);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8552be9f5db1..e3166df55daa 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -64,13 +64,40 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
struct intel_lvds, base);
}
+static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 lvds_reg, tmp;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ lvds_reg = PCH_LVDS;
+ } else {
+ lvds_reg = LVDS;
+ }
+
+ tmp = I915_READ(lvds_reg);
+
+ if (!(tmp & LVDS_PORT_EN))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
/**
* Sets the power state for the panel.
*/
-static void intel_lvds_enable(struct intel_lvds *intel_lvds)
+static void intel_enable_lvds(struct intel_encoder *encoder)
{
- struct drm_device *dev = intel_lvds->base.base.dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_lvds->base.base.crtc);
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -110,9 +137,10 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
intel_panel_enable_backlight(dev, intel_crtc->pipe);
}
-static void intel_lvds_disable(struct intel_lvds *intel_lvds)
+static void intel_disable_lvds(struct intel_encoder *encoder)
{
- struct drm_device *dev = intel_lvds->base.base.dev;
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -141,18 +169,6 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
POSTING_READ(lvds_reg);
}
-static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
-{
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
- if (mode == DRM_MODE_DPMS_ON)
- intel_lvds_enable(intel_lvds);
- else
- intel_lvds_disable(intel_lvds);
-
- /* XXX: We never power down the LVDS pairs. */
-}
-
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -233,9 +249,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- struct intel_encoder *tmp_encoder;
+ struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe;
@@ -245,14 +260,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false;
}
- /* Should never happen!! */
- for_each_encoder_on_crtc(dev, encoder->crtc, tmp_encoder) {
- if (&tmp_encoder->base != encoder) {
- DRM_ERROR("Can't enable LVDS and another "
- "encoder on the same pipe\n");
- return false;
- }
- }
+ if (intel_encoder_check_is_cloned(&intel_lvds->base))
+ return false;
/*
* We have timings from the BIOS for the panel, put them in
@@ -404,23 +413,6 @@ out:
return true;
}
-static void intel_lvds_prepare(struct drm_encoder *encoder)
-{
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
- intel_lvds_disable(intel_lvds);
-}
-
-static void intel_lvds_commit(struct drm_encoder *encoder)
-{
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
- /* Always do a full power on as we do not know what state
- * we were left in.
- */
- intel_lvds_enable(intel_lvds);
-}
-
static void intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -534,7 +526,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
dev_priv->modeset_on_lid = 0;
mutex_lock(&dev->mode_config.mutex);
- drm_helper_resume_force_mode(dev);
+ intel_modeset_check_state(dev);
mutex_unlock(&dev->mode_config.mutex);
return NOTIFY_OK;
@@ -586,8 +578,8 @@ static int intel_lvds_set_property(struct drm_connector *connector,
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
*/
- drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
}
@@ -595,11 +587,9 @@ static int intel_lvds_set_property(struct drm_connector *connector,
}
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
- .dpms = intel_lvds_dpms,
.mode_fixup = intel_lvds_mode_fixup,
- .prepare = intel_lvds_prepare,
.mode_set = intel_lvds_mode_set,
- .commit = intel_lvds_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -609,7 +599,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
@@ -971,10 +961,15 @@ bool intel_lvds_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
+ intel_encoder->enable = intel_enable_lvds;
+ intel_encoder->disable = intel_disable_lvds;
+ intel_encoder->get_hw_state = intel_lvds_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
- intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
+ intel_encoder->cloneable = false;
if (HAS_PCH_SPLIT(dev))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
else if (IS_GEN4(dev))
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index cc71fd9aaed5..cabd84bf66eb 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
- connector->display_info.raw_edid = NULL;
kfree(edid);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 5cc624eb6133..5530413213d8 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -427,6 +427,25 @@ blind_set:
goto end;
}
+static void intel_setup_cadls(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ int i = 0;
+ u32 disp_id;
+
+ /* Initialize the CADL field by duplicating the DIDL values.
+ * Technically, this is not always correct as display outputs may exist,
+ * but not active. This initialization is necessary for some Clevo
+ * laptops that check this field before processing the brightness and
+ * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
+ * there are less than eight devices. */
+ do {
+ disp_id = ioread32(&opregion->acpi->didl[i]);
+ iowrite32(disp_id, &opregion->acpi->cadl[i]);
+ } while (++i < 8 && disp_id != 0);
+}
+
void intel_opregion_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -436,8 +455,10 @@ void intel_opregion_init(struct drm_device *dev)
return;
if (opregion->acpi) {
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_didl_outputs(dev);
+ intel_setup_cadls(dev);
+ }
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 10510221d763..ebff850a9ab6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -234,54 +234,6 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
return 0;
}
-/* Workaround for i830 bug where pipe a must be enable to change control regs */
-static int
-i830_activate_pipe_a(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_crtc *crtc;
- struct drm_crtc_helper_funcs *crtc_funcs;
- struct drm_display_mode vesa_640x480 = {
- DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
- }, *mode;
-
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
- if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
- return 0;
-
- /* most i8xx have pipe a forced on, so don't trust dpms mode */
- if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
- return 0;
-
- crtc_funcs = crtc->base.helper_private;
- if (crtc_funcs->dpms == NULL)
- return 0;
-
- DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
-
- mode = drm_mode_duplicate(dev, &vesa_640x480);
-
- if (!drm_crtc_helper_set_mode(&crtc->base, mode,
- crtc->base.x, crtc->base.y,
- crtc->base.fb))
- return 0;
-
- crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
- return 1;
-}
-
-static void
-i830_deactivate_pipe_a(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
@@ -289,17 +241,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct drm_i915_gem_request *request;
- int pipe_a_quirk = 0;
int ret;
BUG_ON(overlay->active);
overlay->active = 1;
- if (IS_I830(dev)) {
- pipe_a_quirk = i830_activate_pipe_a(dev);
- if (pipe_a_quirk < 0)
- return pipe_a_quirk;
- }
+ WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL) {
@@ -321,9 +268,6 @@ static int intel_overlay_on(struct intel_overlay *overlay)
ret = intel_overlay_do_wait_request(overlay, request, NULL);
out:
- if (pipe_a_quirk)
- i830_deactivate_pipe_a(dev);
-
return ret;
}
@@ -1438,7 +1382,7 @@ void intel_setup_overlay(struct drm_device *dev)
}
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
if (ret) {
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ba8a27b1757a..d69f8f49beb5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -31,6 +31,8 @@
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
+#define FORCEWAKE_ACK_TIMEOUT_MS 2
+
/* FBC, or Frame Buffer Compression, is a technique employed to compress the
* framebuffer contents in-memory, aiming at reducing the required bandwidth
* during in-memory transfers and, therefore, reduce the power packet.
@@ -593,7 +595,7 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
break;
}
- dev_priv->r_t = dev_priv->mem_freq;
+ dev_priv->ips.r_t = dev_priv->mem_freq;
switch (csipll & 0x3ff) {
case 0x00c:
@@ -625,11 +627,11 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
}
if (dev_priv->fsb_freq == 3200) {
- dev_priv->c_m = 0;
+ dev_priv->ips.c_m = 0;
} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
- dev_priv->c_m = 1;
+ dev_priv->ips.c_m = 1;
} else {
- dev_priv->c_m = 2;
+ dev_priv->ips.c_m = 2;
}
}
@@ -2138,7 +2140,7 @@ intel_alloc_context_page(struct drm_device *dev)
return NULL;
}
- ret = i915_gem_object_pin(ctx, 4096, true);
+ ret = i915_gem_object_pin(ctx, 4096, true, false);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
@@ -2160,11 +2162,22 @@ err_unref:
return NULL;
}
+/**
+ * Lock protecting IPS related data structures
+ */
+DEFINE_SPINLOCK(mchdev_lock);
+
+/* Global for IPS driver to get at the current i915 device. Protected by
+ * mchdev_lock. */
+static struct drm_i915_private *i915_mch_dev;
+
bool ironlake_set_drps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
+ assert_spin_locked(&mchdev_lock);
+
rgvswctl = I915_READ16(MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
DRM_DEBUG("gpu busy, RCS change rejected\n");
@@ -2188,6 +2201,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
+ spin_lock_irq(&mchdev_lock);
+
/* Enable temp reporting */
I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
@@ -2211,12 +2226,12 @@ static void ironlake_enable_drps(struct drm_device *dev)
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
- dev_priv->fmax = fmax; /* IPS callback will increase this */
- dev_priv->fstart = fstart;
+ dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
+ dev_priv->ips.fstart = fstart;
- dev_priv->max_delay = fstart;
- dev_priv->min_delay = fmin;
- dev_priv->cur_delay = fstart;
+ dev_priv->ips.max_delay = fstart;
+ dev_priv->ips.min_delay = fmin;
+ dev_priv->ips.cur_delay = fstart;
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
fmax, fmin, fstart);
@@ -2233,23 +2248,29 @@ static void ironlake_enable_drps(struct drm_device *dev)
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
- if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+ if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
DRM_ERROR("stuck trying to change perf mode\n");
- msleep(1);
+ mdelay(1);
ironlake_set_drps(dev, fstart);
- dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+ dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
I915_READ(0x112e0);
- dev_priv->last_time1 = jiffies_to_msecs(jiffies);
- dev_priv->last_count2 = I915_READ(0x112f4);
- getrawmonotonic(&dev_priv->last_time2);
+ dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
+ dev_priv->ips.last_count2 = I915_READ(0x112f4);
+ getrawmonotonic(&dev_priv->ips.last_time2);
+
+ spin_unlock_irq(&mchdev_lock);
}
static void ironlake_disable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u16 rgvswctl = I915_READ16(MEMSWCTL);
+ u16 rgvswctl;
+
+ spin_lock_irq(&mchdev_lock);
+
+ rgvswctl = I915_READ16(MEMSWCTL);
/* Ack interrupts, disable EFC interrupt */
I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
@@ -2259,31 +2280,54 @@ static void ironlake_disable_drps(struct drm_device *dev)
I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
/* Go back to the starting frequency */
- ironlake_set_drps(dev, dev_priv->fstart);
- msleep(1);
+ ironlake_set_drps(dev, dev_priv->ips.fstart);
+ mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE(MEMSWCTL, rgvswctl);
- msleep(1);
+ mdelay(1);
+ spin_unlock_irq(&mchdev_lock);
}
-void gen6_set_rps(struct drm_device *dev, u8 val)
+/* There's a funny hw issue where the hw returns all 0 when reading from
+ * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
+ * ourselves, instead of doing a rmw cycle (which might result in us clearing
+ * all limits and the gpu stuck at whatever frequency it is at atm).
+ */
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 limits;
limits = 0;
- if (val >= dev_priv->max_delay)
- val = dev_priv->max_delay;
- else
- limits |= dev_priv->max_delay << 24;
- if (val <= dev_priv->min_delay)
- val = dev_priv->min_delay;
- else
- limits |= dev_priv->min_delay << 16;
+ if (*val >= dev_priv->rps.max_delay)
+ *val = dev_priv->rps.max_delay;
+ limits |= dev_priv->rps.max_delay << 24;
+
+ /* Only set the down limit when we've reached the lowest level to avoid
+ * getting more interrupts, otherwise leave this clear. This prevents a
+ * race in the hw when coming out of rc6: There's a tiny window where
+ * the hw runs at the minimal clock before selecting the desired
+ * frequency, if the down threshold expires in that window we will not
+ * receive a down interrupt. */
+ if (*val <= dev_priv->rps.min_delay) {
+ *val = dev_priv->rps.min_delay;
+ limits |= dev_priv->rps.min_delay << 16;
+ }
- if (val == dev_priv->cur_delay)
+ return limits;
+}
+
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 limits = gen6_rps_limits(dev_priv, &val);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(val > dev_priv->rps.max_delay);
+ WARN_ON(val < dev_priv->rps.min_delay);
+
+ if (val == dev_priv->rps.cur_delay)
return;
I915_WRITE(GEN6_RPNSWREQ,
@@ -2296,7 +2340,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
*/
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
- dev_priv->cur_delay = val;
+ POSTING_READ(GEN6_RPNSWREQ);
+
+ dev_priv->rps.cur_delay = val;
+
+ trace_intel_gpu_freq_change(val * 50);
}
static void gen6_disable_rps(struct drm_device *dev)
@@ -2312,40 +2360,40 @@ static void gen6_disable_rps(struct drm_device *dev)
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
- spin_lock_irq(&dev_priv->rps_lock);
- dev_priv->pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps_lock);
+ spin_lock_irq(&dev_priv->rps.lock);
+ dev_priv->rps.pm_iir = 0;
+ spin_unlock_irq(&dev_priv->rps.lock);
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
}
int intel_enable_rc6(const struct drm_device *dev)
{
- /*
- * Respect the kernel parameter if it is set
- */
+ /* Respect the kernel parameter if it is set */
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
- /*
- * Disable RC6 on Ironlake
- */
- if (INTEL_INFO(dev)->gen == 5)
- return 0;
+ if (INTEL_INFO(dev)->gen == 5) {
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable rc6 on ilk if VT-d is on. */
+ if (intel_iommu_gfx_mapped)
+ return false;
+#endif
+ DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
+ return INTEL_RC6_ENABLE;
+ }
- /* On Haswell, only RC6 is available. So let's enable it by default to
- * provide better testing and coverage since the beginning.
- */
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev)) {
+ DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
return INTEL_RC6_ENABLE;
+ }
- /*
- * Disable rc6 on Sandybridge
- */
+ /* snb/ivb have more than one rc6 state. */
if (INTEL_INFO(dev)->gen == 6) {
DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
return INTEL_RC6_ENABLE;
}
+
DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
@@ -2383,9 +2431,9 @@ static void gen6_enable_rps(struct drm_device *dev)
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
/* In units of 100MHz */
- dev_priv->max_delay = rp_state_cap & 0xff;
- dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
- dev_priv->cur_delay = 0;
+ dev_priv->rps.max_delay = rp_state_cap & 0xff;
+ dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+ dev_priv->rps.cur_delay = 0;
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -2438,8 +2486,8 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- dev_priv->max_delay << 24 |
- dev_priv->min_delay << 16);
+ dev_priv->rps.max_delay << 24 |
+ dev_priv->rps.min_delay << 16);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -2477,7 +2525,7 @@ static void gen6_enable_rps(struct drm_device *dev)
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
- dev_priv->max_delay = pcu_mbox & 0xff;
+ dev_priv->rps.max_delay = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
@@ -2485,10 +2533,10 @@ static void gen6_enable_rps(struct drm_device *dev)
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
- spin_lock_irq(&dev_priv->rps_lock);
- WARN_ON(dev_priv->pm_iir != 0);
+ spin_lock_irq(&dev_priv->rps.lock);
+ WARN_ON(dev_priv->rps.pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps_lock);
+ spin_unlock_irq(&dev_priv->rps.lock);
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
@@ -2520,9 +2568,9 @@ static void gen6_update_ring_freq(struct drm_device *dev)
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
- for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+ for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
gpu_freq--) {
- int diff = dev_priv->max_delay - gpu_freq;
+ int diff = dev_priv->rps.max_delay - gpu_freq;
/*
* For GPU frequencies less than 750MHz, just use the lowest
@@ -2686,14 +2734,16 @@ static const struct cparams {
{ 0, 800, 231, 23784 },
};
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
{
u64 total_count, diff, ret;
u32 count1, count2, count3, m = 0, c = 0;
unsigned long now = jiffies_to_msecs(jiffies), diff1;
int i;
- diff1 = now - dev_priv->last_time1;
+ assert_spin_locked(&mchdev_lock);
+
+ diff1 = now - dev_priv->ips.last_time1;
/* Prevent division-by-zero if we are asking too fast.
* Also, we don't get interesting results if we are polling
@@ -2701,7 +2751,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
* in such cases.
*/
if (diff1 <= 10)
- return dev_priv->chipset_power;
+ return dev_priv->ips.chipset_power;
count1 = I915_READ(DMIEC);
count2 = I915_READ(DDREC);
@@ -2710,16 +2760,16 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
total_count = count1 + count2 + count3;
/* FIXME: handle per-counter overflow */
- if (total_count < dev_priv->last_count1) {
- diff = ~0UL - dev_priv->last_count1;
+ if (total_count < dev_priv->ips.last_count1) {
+ diff = ~0UL - dev_priv->ips.last_count1;
diff += total_count;
} else {
- diff = total_count - dev_priv->last_count1;
+ diff = total_count - dev_priv->ips.last_count1;
}
for (i = 0; i < ARRAY_SIZE(cparams); i++) {
- if (cparams[i].i == dev_priv->c_m &&
- cparams[i].t == dev_priv->r_t) {
+ if (cparams[i].i == dev_priv->ips.c_m &&
+ cparams[i].t == dev_priv->ips.r_t) {
m = cparams[i].m;
c = cparams[i].c;
break;
@@ -2730,14 +2780,30 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
ret = ((m * diff) + c);
ret = div_u64(ret, 10);
- dev_priv->last_count1 = total_count;
- dev_priv->last_time1 = now;
+ dev_priv->ips.last_count1 = total_count;
+ dev_priv->ips.last_time1 = now;
- dev_priv->chipset_power = ret;
+ dev_priv->ips.chipset_power = ret;
return ret;
}
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long val;
+
+ if (dev_priv->info->gen != 5)
+ return 0;
+
+ spin_lock_irq(&mchdev_lock);
+
+ val = __i915_chipset_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+
+ return val;
+}
+
unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
{
unsigned long m, x, b;
@@ -2894,18 +2960,17 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
return v_table[pxvid].vd;
}
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
struct timespec now, diff1;
u64 diff;
unsigned long diffms;
u32 count;
- if (dev_priv->info->gen != 5)
- return;
+ assert_spin_locked(&mchdev_lock);
getrawmonotonic(&now);
- diff1 = timespec_sub(now, dev_priv->last_time2);
+ diff1 = timespec_sub(now, dev_priv->ips.last_time2);
/* Don't divide by 0 */
diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
@@ -2914,28 +2979,42 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
count = I915_READ(GFXEC);
- if (count < dev_priv->last_count2) {
- diff = ~0UL - dev_priv->last_count2;
+ if (count < dev_priv->ips.last_count2) {
+ diff = ~0UL - dev_priv->ips.last_count2;
diff += count;
} else {
- diff = count - dev_priv->last_count2;
+ diff = count - dev_priv->ips.last_count2;
}
- dev_priv->last_count2 = count;
- dev_priv->last_time2 = now;
+ dev_priv->ips.last_count2 = count;
+ dev_priv->ips.last_time2 = now;
/* More magic constants... */
diff = diff * 1181;
diff = div_u64(diff, diffms * 10);
- dev_priv->gfx_power = diff;
+ dev_priv->ips.gfx_power = diff;
}
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->info->gen != 5)
+ return;
+
+ spin_lock_irq(&mchdev_lock);
+
+ __i915_update_gfx_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
{
unsigned long t, corr, state1, corr2, state2;
u32 pxvid, ext_v;
- pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+ assert_spin_locked(&mchdev_lock);
+
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(dev_priv, pxvid);
@@ -2955,27 +3034,31 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
corr = corr * ((150142 * state1) / 10000 - 78642);
corr /= 100000;
- corr2 = (corr * dev_priv->corr);
+ corr2 = (corr * dev_priv->ips.corr);
state2 = (corr2 * state1) / 10000;
state2 /= 100; /* convert to mW */
- i915_update_gfx_val(dev_priv);
+ __i915_update_gfx_val(dev_priv);
- return dev_priv->gfx_power + state2;
+ return dev_priv->ips.gfx_power + state2;
}
-/* Global for IPS driver to get at the current i915 device */
-static struct drm_i915_private *i915_mch_dev;
-/*
- * Lock protecting IPS related data structures
- * - i915_mch_dev
- * - dev_priv->max_delay
- * - dev_priv->min_delay
- * - dev_priv->fmax
- * - dev_priv->gpu_busy
- */
-static DEFINE_SPINLOCK(mchdev_lock);
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long val;
+
+ if (dev_priv->info->gen != 5)
+ return 0;
+
+ spin_lock_irq(&mchdev_lock);
+
+ val = __i915_gfx_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+
+ return val;
+}
/**
* i915_read_mch_val - return value for IPS use
@@ -2988,18 +3071,18 @@ unsigned long i915_read_mch_val(void)
struct drm_i915_private *dev_priv;
unsigned long chipset_val, graphics_val, ret = 0;
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
- chipset_val = i915_chipset_val(dev_priv);
- graphics_val = i915_gfx_val(dev_priv);
+ chipset_val = __i915_chipset_val(dev_priv);
+ graphics_val = __i915_gfx_val(dev_priv);
ret = chipset_val + graphics_val;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3015,18 +3098,18 @@ bool i915_gpu_raise(void)
struct drm_i915_private *dev_priv;
bool ret = true;
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
- if (dev_priv->max_delay > dev_priv->fmax)
- dev_priv->max_delay--;
+ if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
+ dev_priv->ips.max_delay--;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3043,18 +3126,18 @@ bool i915_gpu_lower(void)
struct drm_i915_private *dev_priv;
bool ret = true;
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
- if (dev_priv->max_delay < dev_priv->min_delay)
- dev_priv->max_delay++;
+ if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
+ dev_priv->ips.max_delay++;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3068,17 +3151,20 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
bool i915_gpu_busy(void)
{
struct drm_i915_private *dev_priv;
+ struct intel_ring_buffer *ring;
bool ret = false;
+ int i;
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
- ret = dev_priv->busy;
+ for_each_ring(ring, dev_priv, i)
+ ret |= !list_empty(&ring->request_list);
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3095,20 +3181,20 @@ bool i915_gpu_turbo_disable(void)
struct drm_i915_private *dev_priv;
bool ret = true;
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
- dev_priv->max_delay = dev_priv->fstart;
+ dev_priv->ips.max_delay = dev_priv->ips.fstart;
- if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+ if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
ret = false;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3136,19 +3222,20 @@ ips_ping_for_i915_load(void)
void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
{
- spin_lock(&mchdev_lock);
+ /* We only register the i915 ips part with intel-ips once everything is
+ * set up, to avoid intel-ips sneaking in and reading bogus values. */
+ spin_lock_irq(&mchdev_lock);
i915_mch_dev = dev_priv;
- dev_priv->mchdev_lock = &mchdev_lock;
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
ips_ping_for_i915_load();
}
void intel_gpu_ips_teardown(void)
{
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
i915_mch_dev = NULL;
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
}
static void intel_init_emon(struct drm_device *dev)
{
@@ -3218,7 +3305,7 @@ static void intel_init_emon(struct drm_device *dev)
lcfuse = I915_READ(LCFUSE02);
- dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+ dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
}
void intel_disable_gt_powersave(struct drm_device *dev)
@@ -3731,42 +3818,6 @@ void intel_init_clock_gating(struct drm_device *dev)
dev_priv->display.init_pch_clock_gating(dev);
}
-static void gen6_sanitize_pm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 limits, delay, old;
-
- gen6_gt_force_wake_get(dev_priv);
-
- old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
- /* Make sure we continue to get interrupts
- * until we hit the minimum or maximum frequencies.
- */
- limits &= ~(0x3f << 16 | 0x3f << 24);
- delay = dev_priv->cur_delay;
- if (delay < dev_priv->max_delay)
- limits |= (dev_priv->max_delay & 0x3f) << 24;
- if (delay > dev_priv->min_delay)
- limits |= (dev_priv->min_delay & 0x3f) << 16;
-
- if (old != limits) {
- /* Note that the known failure case is to read back 0. */
- DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
- "expected %08x, was %08x\n", limits, old);
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
- }
-
- gen6_gt_force_wake_put(dev_priv);
-}
-
-void intel_sanitize_pm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->display.sanitize_pm)
- dev_priv->display.sanitize_pm(dev);
-}
-
/* Starting with Haswell, we have different power wells for
* different parts of the GPU. This attempts to enable them all.
*/
@@ -3852,7 +3903,6 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
- dev_priv->display.sanitize_pm = gen6_sanitize_pm;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
if (SNB_READ_WM0_LATENCY()) {
@@ -3864,7 +3914,6 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
- dev_priv->display.sanitize_pm = gen6_sanitize_pm;
} else if (IS_HASWELL(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
@@ -3876,7 +3925,6 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
- dev_priv->display.sanitize_pm = gen6_sanitize_pm;
} else
dev_priv->display.update_wm = NULL;
} else if (IS_VALLEYVIEW(dev)) {
@@ -3955,14 +4003,16 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
else
forcewake_ack = FORCEWAKE_ACK;
- if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
- DRM_ERROR("Force wake wait timed out\n");
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
I915_WRITE_NOTRACE(FORCEWAKE, 1);
- POSTING_READ(FORCEWAKE);
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
- if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
- DRM_ERROR("Force wake wait timed out\n");
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
__gen6_gt_wait_for_thread_c0(dev_priv);
}
@@ -3976,14 +4026,16 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
else
forcewake_ack = FORCEWAKE_MT_ACK;
- if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
- DRM_ERROR("Force wake wait timed out\n");
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
- POSTING_READ(FORCEWAKE_MT);
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
- if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
- DRM_ERROR("Force wake wait timed out\n");
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
__gen6_gt_wait_for_thread_c0(dev_priv);
}
@@ -4016,14 +4068,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
- POSTING_READ(FORCEWAKE);
+ /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
- POSTING_READ(FORCEWAKE_MT);
+ /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
@@ -4062,24 +4114,24 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
- /* Already awake? */
- if ((I915_READ(0x130094) & 0xa1) == 0xa1)
- return;
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
- POSTING_READ(FORCEWAKE_VLV);
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
- if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500))
- DRM_ERROR("Force wake wait timed out\n");
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
__gen6_gt_wait_for_thread_c0(dev_priv);
}
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
- /* FIXME: confirm VLV behavior with Punit folks */
- POSTING_READ(FORCEWAKE_VLV);
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
+ /* The below doubles as a POSTING_READ */
+ gen6_gt_check_fifodbg(dev_priv);
}
void intel_gt_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1aef516cc6fa..ecbc5c5dbbbc 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -261,6 +261,83 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
return 0;
}
+static int
+gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen7_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /*
+ * Ensure that any following seqno writes only happen when the render
+ * cache is indeed flushed.
+ *
+ * Workaround: 4th PIPE_CONTROL command (except the ones with only
+ * read-cache invalidate bits set) must have the CS_STALL bit set. We
+ * don't try to be clever and just set it unconditionally.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ /* Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ /* Workaround: we must issue a pipe_control with CS-stall bit
+ * set before a pipe_control command that has the state cache
+ * invalidate bit set. */
+ gen7_render_ring_cs_stall_wa(ring);
+ }
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
static void ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
@@ -381,12 +458,12 @@ init_pipe_control(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true);
+ ret = i915_gem_object_pin(obj, 4096, true, false);
if (ret)
goto err_unref;
pc->gtt_offset = obj->gtt_offset;
- pc->cpu_page = kmap(obj->pages[0]);
+ pc->cpu_page = kmap(sg_page(obj->pages->sgl));
if (pc->cpu_page == NULL)
goto err_unpin;
@@ -413,7 +490,8 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
return;
obj = pc->obj;
- kunmap(obj->pages[0]);
+
+ kunmap(sg_page(obj->pages->sgl));
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
@@ -461,7 +539,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- if (IS_IVYBRIDGE(dev))
+ if (HAS_L3_GPU_CACHE(dev))
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
return ret;
@@ -627,26 +705,24 @@ pc_render_add_request(struct intel_ring_buffer *ring,
}
static u32
-gen6_ring_get_seqno(struct intel_ring_buffer *ring)
+gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
- struct drm_device *dev = ring->dev;
-
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page. */
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (!lazy_coherency)
intel_ring_get_active_head(ring);
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static u32
-ring_get_seqno(struct intel_ring_buffer *ring)
+ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static u32
-pc_render_get_seqno(struct intel_ring_buffer *ring)
+pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
struct pipe_control *pc = ring->private;
return pc->cpu_page[0];
@@ -851,7 +927,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+ if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
GEN6_RENDER_L3_PARITY_ERROR));
else
@@ -874,7 +950,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+ if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
else
I915_WRITE_IMR(ring, ~0);
@@ -950,7 +1026,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
if (obj == NULL)
return;
- kunmap(obj->pages[0]);
+ kunmap(sg_page(obj->pages->sgl));
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
@@ -971,13 +1047,13 @@ static int init_status_page(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true);
+ ret = i915_gem_object_pin(obj, 4096, true, false);
if (ret != 0) {
goto err_unref;
}
ring->status_page.gfx_addr = obj->gtt_offset;
- ring->status_page.page_addr = kmap(obj->pages[0]);
+ ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
goto err_unpin;
@@ -1009,7 +1085,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
ring->size = 32 * PAGE_SIZE;
init_waitqueue_head(&ring->irq_queue);
@@ -1029,7 +1104,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->obj = obj;
- ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
if (ret)
goto err_unref;
@@ -1378,7 +1453,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
- ring->flush = gen6_render_ring_flush;
+ ring->flush = gen7_render_ring_flush;
+ if (INTEL_INFO(dev)->gen == 6)
+ ring->flush = gen6_render_ring_flush;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT;
@@ -1480,7 +1557,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
ring->size = size;
ring->effective_size = ring->size;
@@ -1573,3 +1649,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
return intel_init_ring_buffer(dev, ring);
}
+
+int
+intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ if (!ring->gpu_caches_dirty)
+ return 0;
+
+ ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+ trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
+
+int
+intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+{
+ uint32_t flush_domains;
+ int ret;
+
+ flush_domains = 0;
+ if (ring->gpu_caches_dirty)
+ flush_domains = I915_GEM_GPU_DOMAINS;
+
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+ if (ret)
+ return ret;
+
+ trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 1d3c81fdad92..2ea7a311a1f0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -72,7 +72,14 @@ struct intel_ring_buffer {
u32 flush_domains);
int (*add_request)(struct intel_ring_buffer *ring,
u32 *seqno);
- u32 (*get_seqno)(struct intel_ring_buffer *ring);
+ /* Some chipsets are not quite as coherent as advertised and need
+ * an expensive kick to force a true read of the up-to-date seqno.
+ * However, the up-to-date seqno is not always required and the last
+ * seen value is good enough. Note that the seqno will always be
+ * monotonic, even if not coherent.
+ */
+ u32 (*get_seqno)(struct intel_ring_buffer *ring,
+ bool lazy_coherency);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length);
void (*cleanup)(struct intel_ring_buffer *ring);
@@ -101,15 +108,6 @@ struct intel_ring_buffer {
struct list_head request_list;
/**
- * List of objects currently pending a GPU write flush.
- *
- * All elements on this list will belong to either the
- * active_list or flushing_list, last_rendering_seqno can
- * be used to differentiate between the two elements.
- */
- struct list_head gpu_write_list;
-
- /**
* Do we have some not yet emitted requests outstanding?
*/
u32 outstanding_lazy_request;
@@ -204,6 +202,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
void intel_ring_advance(struct intel_ring_buffer *ring);
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
+int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d251d9d7a06c..0007a4d9bf6e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -96,7 +96,7 @@ struct intel_sdvo {
/*
* Hotplug activation bits for this device
*/
- uint8_t hotplug_active[2];
+ uint16_t hotplug_active;
/**
* This is used to select the color range of RBG outputs in HDMI mode.
@@ -627,6 +627,14 @@ static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
&outputs, sizeof(outputs));
}
+static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
+ u16 *outputs)
+{
+ return intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ACTIVE_OUTPUTS,
+ outputs, sizeof(*outputs));
+}
+
static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
int mode)
{
@@ -1141,51 +1149,132 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
-static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
{
- struct drm_device *dev = encoder->dev;
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(&connector->base);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+ u16 active_outputs;
+
+ intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+ if (active_outputs & intel_sdvo_connector->output_flag)
+ return true;
+ else
+ return false;
+}
+
+static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_sdvo->sdvo_reg);
+
+ if (!(tmp & SDVO_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_disable_sdvo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ u32 temp;
+
+ intel_sdvo_set_active_outputs(intel_sdvo, 0);
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_OFF);
+
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) != 0) {
+ intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
+ }
+}
+
+static void intel_enable_sdvo(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
+ bool input1, input2;
+ int i;
+ u8 status;
+
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) == 0)
+ intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+ for (i = 0; i < 2; i++)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
+ /* Warn if the device reported failure to sync.
+ * A lot of SDVO devices fail to notify of sync, but it's
+ * a given it the status is a success, we succeeded.
+ */
+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+ DRM_DEBUG_KMS("First %s output reported failure to "
+ "sync\n", SDVO_NAME(intel_sdvo));
+ }
+
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_ON);
+ intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+}
+
+static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_crtc *crtc;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+ /* dvo supports only 2 dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = intel_sdvo->base.base.crtc;
+ if (!crtc) {
+ intel_sdvo->base.connectors_active = false;
+ return;
+ }
if (mode != DRM_MODE_DPMS_ON) {
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
- if (mode == DRM_MODE_DPMS_OFF) {
- temp = I915_READ(intel_sdvo->sdvo_reg);
- if ((temp & SDVO_ENABLE) != 0) {
- intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
- }
- }
+ intel_sdvo->base.connectors_active = false;
+
+ intel_crtc_update_dpms(crtc);
} else {
- bool input1, input2;
- int i;
- u8 status;
-
- temp = I915_READ(intel_sdvo->sdvo_reg);
- if ((temp & SDVO_ENABLE) == 0)
- intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
- for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
- status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
- /* Warn if the device reported failure to sync.
- * A lot of SDVO devices fail to notify of sync, but it's
- * a given it the status is a success, we succeeded.
- */
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
- DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(intel_sdvo));
- }
+ intel_sdvo->base.connectors_active = true;
+
+ intel_crtc_update_dpms(crtc);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
}
- return;
+
+ intel_modeset_check_state(connector->dev);
}
static int intel_sdvo_mode_valid(struct drm_connector *connector,
@@ -1250,25 +1339,29 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
return true;
}
-static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
+static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
struct drm_device *dev = intel_sdvo->base.base.dev;
- u8 response[2];
+ uint16_t hotplug;
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line. */
if (IS_I945G(dev) || IS_I945GM(dev))
- return false;
+ return 0;
- return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
- &response, 2) && response[0];
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &hotplug, sizeof(hotplug)))
+ return 0;
+
+ return hotplug;
}
static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+ &intel_sdvo->hotplug_active, 2);
}
static bool
@@ -1344,7 +1437,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
}
} else
status = connector_status_disconnected;
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -1418,7 +1510,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
else
ret = connector_status_disconnected;
- connector->display_info.raw_edid = NULL;
kfree(edid);
} else
ret = connector_status_connected;
@@ -1464,7 +1555,6 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
drm_add_edid_modes(connector, edid);
}
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
}
@@ -1836,8 +1926,8 @@ set_value:
done:
if (intel_sdvo->base.base.crtc) {
struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
- drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- crtc->y, crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
return 0;
@@ -1845,15 +1935,13 @@ done:
}
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
- .dpms = intel_sdvo_dpms,
.mode_fixup = intel_sdvo_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_sdvo_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_sdvo_dpms,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
@@ -2025,6 +2113,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
connector->base.base.interlace_allowed = 1;
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
intel_connector_attach_encoder(&connector->base, &encoder->base);
drm_sysfs_connector_add(&connector->base.base);
@@ -2063,17 +2152,18 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
- if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
+ if (intel_sdvo_get_hotplug_support(intel_sdvo) &
+ intel_sdvo_connector->output_flag) {
connector->polled = DRM_CONNECTOR_POLL_HPD;
- intel_sdvo->hotplug_active[0] |= 1 << device;
+ intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
/* Some SDVO devices have one-shot hotplug interrupts.
* Ensure that they get re-enabled when an interrupt happens.
*/
intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
intel_sdvo_enable_hotplug(intel_encoder);
- }
- else
+ } else {
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ }
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
@@ -2081,8 +2171,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo->is_hdmi = true;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT));
+ intel_sdvo->base.cloneable = true;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi)
@@ -2113,7 +2202,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true;
- intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+ intel_sdvo->base.cloneable = false;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
@@ -2156,8 +2245,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT));
+ intel_sdvo->base.cloneable = true;
intel_sdvo_connector_init(intel_sdvo_connector,
intel_sdvo);
@@ -2189,8 +2277,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+ /* SDVO LVDS is cloneable because the SDVO encoder does the upscaling,
+ * as opposed to native LVDS, where we upscale with the panel-fitter
+ * (and hence only the native LVDS resolution could be cloned). */
+ intel_sdvo->base.cloneable = true;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
@@ -2575,6 +2665,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
+ intel_encoder->disable = intel_disable_sdvo;
+ intel_encoder->enable = intel_enable_sdvo;
+ intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
+
/* In default case sdvo lvds is false */
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
goto err;
@@ -2589,7 +2683,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
/* Only enable the hotplug irq if we need it, to work around noisy
* hotplug lines.
*/
- if (intel_sdvo->hotplug_active[0])
+ if (intel_sdvo->hotplug_active)
dev_priv->hotplug_supported_mask |= hotplug_mask;
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ccfb2ff4c31d..62bb048c135e 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -835,22 +835,37 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
base);
}
+static bool
+intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ(TV_CTL);
+
+ if (!(tmp & TV_ENC_ENABLE))
+ return false;
+
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
static void
-intel_tv_dpms(struct drm_encoder *encoder, int mode)
+intel_enable_tv(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
- break;
- }
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+}
+
+static void
+intel_disable_tv(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
}
static const struct tv_mode *
@@ -894,17 +909,14 @@ intel_tv_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
- struct intel_encoder *other_encoder;
if (!tv_mode)
return false;
- for_each_encoder_on_crtc(dev, encoder->crtc, other_encoder)
- if (&other_encoder->base != encoder)
- return false;
+ if (intel_encoder_check_is_cloned(&intel_tv->base))
+ return false;
adjusted_mode->clock = tv_mode->clock;
return true;
@@ -1302,12 +1314,9 @@ intel_tv_detect(struct drm_connector *connector, bool force)
if (force) {
struct intel_load_detect_pipe tmp;
- if (intel_get_load_detect_pipe(&intel_tv->base, connector,
- &mode, &tmp)) {
+ if (intel_get_load_detect_pipe(connector, &mode, &tmp)) {
type = intel_tv_detect_type(intel_tv, connector);
- intel_release_load_detect_pipe(&intel_tv->base,
- connector,
- &tmp);
+ intel_release_load_detect_pipe(connector, &tmp);
} else
return connector_status_unknown;
} else
@@ -1473,22 +1482,20 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
}
if (changed && crtc)
- drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- crtc->y, crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
out:
return ret;
}
static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
- .dpms = intel_tv_dpms,
.mode_fixup = intel_tv_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_tv_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_tv_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
@@ -1618,10 +1625,15 @@ intel_tv_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
+ intel_encoder->enable = intel_enable_tv;
+ intel_encoder->disable = intel_disable_tv;
+ intel_encoder->get_hw_state = intel_tv_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
- intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
+ intel_encoder->cloneable = false;
intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;