From c10e408a00bb74c39f4f9b817f2b948851513377 Mon Sep 17 00:00:00 2001 From: Mathias Fröhlich Date: Thu, 1 Mar 2012 06:44:35 +0100 Subject: i915: Add option to bypass vbt table. This change enables the use of displays where the vbt table just contains inappropriate values, but either the vesa defaults or the video=... modes do something sensible with the attached display. The problem happens with an embedded board that contains vbt bios tables that do not match the attached display. Using this change and the appropriate kernel boot command line they are able to use an otherwise completely unusable secondary display on that embedded board. Reviewed-by: Paul Menzel Signed-off-by: Mathias Froehlich Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 4 ++-- drivers/gpu/drm/i915/intel_bios.c | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0694e170a338..0e797d3cb5f4 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -89,8 +89,8 @@ MODULE_PARM_DESC(lvds_use_ssc, int i915_vbt_sdvo_panel_type __read_mostly = -1; module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); MODULE_PARM_DESC(vbt_sdvo_panel_type, - "Override selection of SDVO panel mode in the VBT " - "(default: auto)"); + "Override/Ignore selection of SDVO panel mode in the VBT " + "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); static bool i915_try_reset __read_mostly = true; module_param_named(reset, i915_try_reset, bool, 0600); diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 8168d8f8a634..0ae76d6b6eab 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -255,6 +255,11 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv, int index; index = i915_vbt_sdvo_panel_type; + if (index == -2) { + DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n"); + return; + } + if (index == -1) { struct bdb_sdvo_lvds_options *sdvo_lvds_options; -- cgit v1.2.3-59-g8ed1b From b2dbf316f32b44d174445f868a39b37e4608541e Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 12 Jan 2012 09:03:14 -0800 Subject: drm/i915: remove ACPI related DRM_ERRORs They're not really errors (well actually I don't know; I don't understand _DSM and _MUX well enough to say, but I do know they spam people's logs and seem to be harmless). Signed-off-by: Jesse Barnes [danvet: The _DSM error got remove in another patch already] Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=44250 Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_acpi.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index bae3edf956a4..f152b2a7fc54 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -182,8 +182,6 @@ static void intel_dsm_platform_mux_info(void) DRM_DEBUG_DRIVER(" hpd mux info: %s\n", intel_dsm_mux_type(info->buffer.pointer[3])); } - } else { - DRM_ERROR("MUX INFO call failed\n"); } out: -- cgit v1.2.3-59-g8ed1b From fa37d39e4c6622d80bd8061d600701bcea1d6870 Mon Sep 17 00:00:00 2001 From: Sean Paul Date: Fri, 2 Mar 2012 12:53:39 -0500 Subject: drm/i915: Retry reading the PCH FDI receiver ISR According to the PRM (Vol3P2), the PCH FDI receiver ISR read for bit lock should be retried at least once. This patch retries the read 5 times with a small delay in between reads. I've had reports of display corruption on resume with "FDI train 1 fail!", so I'm hoping that adding this retry will mitigate the issue. Signed-off-by: Sean Paul Reviewed-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 42 +++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index de1ba19726f7..615b3973114b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2537,7 +2537,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - u32 reg, temp, i; + u32 reg, temp, i, retry; /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ @@ -2589,15 +2589,19 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) POSTING_READ(reg); udelay(500); - reg = FDI_RX_IIR(pipe); - temp = I915_READ(reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); - - if (temp & FDI_RX_BIT_LOCK) { - I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); - DRM_DEBUG_KMS("FDI train 1 done.\n"); - break; + for (retry = 0; retry < 5; retry++) { + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + if (temp & FDI_RX_BIT_LOCK) { + I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); + DRM_DEBUG_KMS("FDI train 1 done.\n"); + break; + } + udelay(50); } + if (retry < 5) + break; } if (i == 4) DRM_ERROR("FDI train 1 fail!\n"); @@ -2638,15 +2642,19 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) POSTING_READ(reg); udelay(500); - reg = FDI_RX_IIR(pipe); - temp = I915_READ(reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); - - if (temp & FDI_RX_SYMBOL_LOCK) { - I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); - DRM_DEBUG_KMS("FDI train 2 done.\n"); - break; + for (retry = 0; retry < 5; retry++) { + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + if (temp & FDI_RX_SYMBOL_LOCK) { + I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); + DRM_DEBUG_KMS("FDI train 2 done.\n"); + break; + } + udelay(50); } + if (retry < 5) + break; } if (i == 4) DRM_ERROR("FDI train 2 fail!\n"); -- cgit v1.2.3-59-g8ed1b From e2b665c48099d9c2229a187467761da4882eb066 Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Wed, 14 Mar 2012 11:22:10 -0400 Subject: drm/i915: Pull MTRR setup to its own function No functional change here, just clarifying code flow. Signed-off-by: Adam Jackson Reviewed-by: Kenneth Graunke Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 9341eb8ce93b..fa5c276b7992 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1914,6 +1914,22 @@ ips_ping_for_i915_load(void) } } +static void +i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base, + unsigned long size) +{ + /* Set up a WC MTRR for non-PAT systems. This is more common than + * one would think, because the kernel disables PAT on first + * generation Core chips because WC PAT gets overridden by a UC + * MTRR if present. Even if a UC MTRR isn't present. + */ + dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1); + if (dev_priv->mm.gtt_mtrr < 0) { + DRM_INFO("MTRR allocation failed. Graphics " + "performance may suffer.\n"); + } +} + /** * i915_driver_load - setup chip and create an initial config * @dev: DRM device @@ -1992,18 +2008,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_rmmap; } - /* Set up a WC MTRR for non-PAT systems. This is more common than - * one would think, because the kernel disables PAT on first - * generation Core chips because WC PAT gets overridden by a UC - * MTRR if present. Even if a UC MTRR isn't present. - */ - dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, - agp_size, - MTRR_TYPE_WRCOMB, 1); - if (dev_priv->mm.gtt_mtrr < 0) { - DRM_INFO("MTRR allocation failed. Graphics " - "performance may suffer.\n"); - } + i915_mtrr_setup(dev_priv, dev->agp->base, agp_size); /* The i915 workqueue is primarily used for batched retirement of * requests (and thus managing bo) once the task has been completed -- cgit v1.2.3-59-g8ed1b From 9e984bc1dffd405138ff22356188b6a1677c64c8 Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Wed, 14 Mar 2012 11:22:11 -0400 Subject: drm/i915: Don't do MTRR setup if PAT is enabled Some newer BIOSes are shipping with all MTRRs already populated. These BIOSes are all on machines with sufficiently new CPUs that the referenced errata doesn't apply anyway, so just don't try to claim the MTRR. Signed-off-by: Adam Jackson Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=41648 Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index fa5c276b7992..ee7775c55450 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -43,6 +43,7 @@ #include #include #include +#include static void i915_write_hws_pga(struct drm_device *dev) { @@ -1918,6 +1919,11 @@ static void i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base, unsigned long size) { +#if defined(CONFIG_X86_PAT) + if (cpu_has_pat) + return; +#endif + /* Set up a WC MTRR for non-PAT systems. This is more common than * one would think, because the kernel disables PAT on first * generation Core chips because WC PAT gets overridden by a UC -- cgit v1.2.3-59-g8ed1b From f01db988ef6f6c70a6cc36ee71e4a98a68901229 Mon Sep 17 00:00:00 2001 From: Sean Paul Date: Fri, 16 Mar 2012 12:43:22 -0400 Subject: drm/i915: Add wait_for in init_ring_common I have seen a number of "blt ring initialization failed" messages where the ctl or start registers are not the correct value. Upon further inspection, if the code just waited a little bit, it would read the correct value. Adding the wait_for to these reads should eliminate the issue. Signed-off-by: Sean Paul Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index ca3972f2c6f5..b7b50a5b0478 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -290,9 +290,9 @@ static int init_ring_common(struct intel_ring_buffer *ring) | RING_REPORT_64K | RING_VALID); /* If the head is still not zero, the ring is dead */ - if ((I915_READ_CTL(ring) & RING_VALID) == 0 || - I915_READ_START(ring) != obj->gtt_offset || - (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { + if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && + I915_READ_START(ring) == obj->gtt_offset && + (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", ring->name, -- cgit v1.2.3-59-g8ed1b From 7bd90909bbf9ce7c40e1da3d72b97b93839c188a Mon Sep 17 00:00:00 2001 From: Carsten Emde Date: Thu, 15 Mar 2012 15:56:25 +0100 Subject: drm/i915: panel: invert brightness via parameter Following the documentation of the Legacy Backlight Brightness (LBB) Register in the configuration space of some Intel PCI graphics adapters, setting the LBB register with the value 0x0 causes the backlight to be turned off, and 0xFF causes the backlight to be set to 100% intensity (http://download.intel.com/embedded/processors/Whitepaper/324567.pdf). The Acer Aspire 5734Z, however, turns the backlight off at 0xFF and sets it to maximum intensity at 0. In consequence, the screen of this systems becomes dark at an early boot stage which makes it unusable. The same inversion applies to the BLC_PWM_CTL I915 register. This problem was introduced in kernel version 2.6.38 when the PCI device of this system was first supported by the i915 KMS module. This patch adds a parameter to the i915 module to enable inversion of the brightness variable (i915.invert_brightness). Signed-off-by: Carsten Emde Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- Documentation/kernel-parameters.txt | 9 +++++++++ drivers/gpu/drm/i915/intel_panel.c | 17 +++++++++++++++++ 2 files changed, 26 insertions(+) (limited to 'drivers') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 033d4e69b43b..9f6ba8fab088 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -967,6 +967,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted. i8k.restricted [HW] Allow controlling fans only if SYS_ADMIN capability is set. + i915.invert_brightness + [DRM] Invert the sense of the variable that is used to + set the brightness of the panel backlight. Normally a + value of 0 indicates backlight switched off, and the + maximum value sets the backlight to maximum brightness. + If this parameter is specified, a value of 0 sets the + backlight to maximum brightness, and the maximum value + switches the backlight off. + icn= [HW,ISDN] Format: [,[,[,]]] diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 230a141dbea3..c4b3f349af69 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -28,6 +28,7 @@ * Chris Wilson */ +#include #include "intel_drv.h" #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ @@ -191,6 +192,20 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev) return max; } +static bool i915_panel_invert_brightness; +MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness, please " + "report PCI device ID, subsystem vendor and subsystem device ID " + "to dri-devel@lists.freedesktop.org, if your machine needs it. " + "It will then be included in an upcoming module version."); +module_param_named(invert_brightness, i915_panel_invert_brightness, bool, 0600); +static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val) +{ + if (i915_panel_invert_brightness) + return intel_panel_get_max_backlight(dev) - val; + + return val; +} + u32 intel_panel_get_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -211,6 +226,7 @@ u32 intel_panel_get_backlight(struct drm_device *dev) } } + val = intel_panel_compute_brightness(dev, val); DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); return val; } @@ -228,6 +244,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level u32 tmp; DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); + level = intel_panel_compute_brightness(dev, level); if (HAS_PCH_SPLIT(dev)) return intel_pch_panel_set_backlight(dev, level); -- cgit v1.2.3-59-g8ed1b From 4dca20efb1a9c2efefc28ad2867e5d6c3f5e1955 Mon Sep 17 00:00:00 2001 From: Carsten Emde Date: Thu, 15 Mar 2012 15:56:26 +0100 Subject: drm/i915: panel: invert brightness via quirk A machine may need to invert the panel backlight brightness value. This patch adds the infrastructure for a quirk to do so. Signed-off-by: Carsten Emde Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- Documentation/kernel-parameters.txt | 17 +++++++++++------ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_display.c | 9 +++++++++ drivers/gpu/drm/i915/intel_panel.c | 15 +++++++++++---- 4 files changed, 32 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 9f6ba8fab088..da449991e8ae 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -967,14 +967,19 @@ bytes respectively. Such letter suffixes can also be entirely omitted. i8k.restricted [HW] Allow controlling fans only if SYS_ADMIN capability is set. - i915.invert_brightness + i915.invert_brightness= [DRM] Invert the sense of the variable that is used to set the brightness of the panel backlight. Normally a - value of 0 indicates backlight switched off, and the - maximum value sets the backlight to maximum brightness. - If this parameter is specified, a value of 0 sets the - backlight to maximum brightness, and the maximum value - switches the backlight off. + brightness value of 0 indicates backlight switched off, + and the maximum of the brightness value sets the backlight + to maximum brightness. If this parameter is set to 0 + (default) and the machine requires it, or this parameter + is set to 1, a brightness value of 0 sets the backlight + to maximum brightness, and the maximum of the brightness + value switches the backlight off. + -1 -- never invert brightness + 0 -- machine default + 1 -- force brightness inversion icn= [HW,ISDN] Format: [,[,[,]]] diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c0f19f572004..e7a00b7cd372 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -295,6 +295,7 @@ enum intel_pch { #define QUIRK_PIPEA_FORCE (1<<0) #define QUIRK_LVDS_SSC_DISABLE (1<<1) +#define QUIRK_INVERT_BRIGHTNESS (1<<2) struct intel_fbdev; struct intel_fbc_work; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 615b3973114b..92208f8d3512 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9020,6 +9020,15 @@ static void quirk_ssc_force_disable(struct drm_device *dev) dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; } +/* + * A machine may need to invert the panel backlight brightness value + */ +static void quirk_invert_brightness(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; +} + struct intel_quirk { int device; int subsystem_vendor; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index c4b3f349af69..7998ca6f594f 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -192,15 +192,22 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev) return max; } -static bool i915_panel_invert_brightness; -MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness, please " +static int i915_panel_invert_brightness; +MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness " + "(-1 force normal, 0 machine defaults, 1 force inversion), please " "report PCI device ID, subsystem vendor and subsystem device ID " "to dri-devel@lists.freedesktop.org, if your machine needs it. " "It will then be included in an upcoming module version."); -module_param_named(invert_brightness, i915_panel_invert_brightness, bool, 0600); +module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600); static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val) { - if (i915_panel_invert_brightness) + struct drm_i915_private *dev_priv = dev->dev_private; + + if (i915_panel_invert_brightness < 0) + return val; + + if (i915_panel_invert_brightness > 0 || + dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) return intel_panel_get_max_backlight(dev) - val; return val; -- cgit v1.2.3-59-g8ed1b From 5a15ab5b93e4a3ebcd4fa6c76cf646a45e9cf806 Mon Sep 17 00:00:00 2001 From: Carsten Emde Date: Thu, 15 Mar 2012 15:56:27 +0100 Subject: drm/i915: panel: invert brightness acer aspire 5734z Mark the Acer Aspire 5734Z that this machines requires the module to invert the panel backlight brightness value after reading from and prior to writing to the PCI configuration space. Signed-off-by: Carsten Emde Acked-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 92208f8d3512..683002fb63d1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9021,7 +9021,8 @@ static void quirk_ssc_force_disable(struct drm_device *dev) } /* - * A machine may need to invert the panel backlight brightness value + * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight + * brightness value */ static void quirk_invert_brightness(struct drm_device *dev) { @@ -9061,6 +9062,9 @@ struct intel_quirk intel_quirks[] = { /* Sony Vaio Y cannot use SSC on LVDS */ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, + + /* Acer Aspire 5734Z must invert backlight brightness */ + { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, }; static void intel_init_quirks(struct drm_device *dev) -- cgit v1.2.3-59-g8ed1b From a70491cc6bb599c5ea8a60cbfae46ec41a7efb2a Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 18 Mar 2012 13:00:11 -0700 Subject: i915: Add and use pr_fmt and pr_ Use a more current logging style. Ensure that appropriate logging messages are prefixed with "i915: ". Convert printks to pr_. Align arguments. Signed-off-by: Joe Perches Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 6 ++- drivers/gpu/drm/i915/i915_irq.c | 70 ++++++++++++++--------------------- drivers/gpu/drm/i915/intel_opregion.c | 4 +- drivers/gpu/drm/i915/intel_panel.c | 4 +- 4 files changed, 37 insertions(+), 47 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ee7775c55450..3c086d707a91 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -26,6 +26,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include "drmP.h" #include "drm.h" #include "drm_crtc_helper.h" @@ -1159,14 +1161,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ struct drm_device *dev = pci_get_drvdata(pdev); pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; if (state == VGA_SWITCHEROO_ON) { - printk(KERN_INFO "i915: switched on\n"); + pr_info("switched on\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; /* i915 resume handler doesn't set to D0 */ pci_set_power_state(dev->pdev, PCI_D0); i915_resume(dev); dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { - printk(KERN_ERR "i915: switched off\n"); + pr_err("switched off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; i915_suspend(dev, pmm); dev->switch_power_state = DRM_SWITCH_POWER_OFF; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index afd4e03e337e..4c8914927217 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -26,6 +26,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include "drmP.h" @@ -1103,33 +1105,26 @@ static void i915_report_and_clear_eir(struct drm_device *dev) if (!eir) return; - printk(KERN_ERR "render error detected, EIR: 0x%08x\n", - eir); + pr_err("render error detected, EIR: 0x%08x\n", eir); if (IS_G4X(dev)) { if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { u32 ipeir = I915_READ(IPEIR_I965); - printk(KERN_ERR " IPEIR: 0x%08x\n", - I915_READ(IPEIR_I965)); - printk(KERN_ERR " IPEHR: 0x%08x\n", - I915_READ(IPEHR_I965)); - printk(KERN_ERR " INSTDONE: 0x%08x\n", + pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); + pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); + pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE_I965)); - printk(KERN_ERR " INSTPS: 0x%08x\n", - I915_READ(INSTPS)); - printk(KERN_ERR " INSTDONE1: 0x%08x\n", - I915_READ(INSTDONE1)); - printk(KERN_ERR " ACTHD: 0x%08x\n", - I915_READ(ACTHD_I965)); + pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); + pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1)); + pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); POSTING_READ(IPEIR_I965); } if (eir & GM45_ERROR_PAGE_TABLE) { u32 pgtbl_err = I915_READ(PGTBL_ER); - printk(KERN_ERR "page table error\n"); - printk(KERN_ERR " PGTBL_ER: 0x%08x\n", - pgtbl_err); + pr_err("page table error\n"); + pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); I915_WRITE(PGTBL_ER, pgtbl_err); POSTING_READ(PGTBL_ER); } @@ -1138,53 +1133,42 @@ static void i915_report_and_clear_eir(struct drm_device *dev) if (!IS_GEN2(dev)) { if (eir & I915_ERROR_PAGE_TABLE) { u32 pgtbl_err = I915_READ(PGTBL_ER); - printk(KERN_ERR "page table error\n"); - printk(KERN_ERR " PGTBL_ER: 0x%08x\n", - pgtbl_err); + pr_err("page table error\n"); + pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); I915_WRITE(PGTBL_ER, pgtbl_err); POSTING_READ(PGTBL_ER); } } if (eir & I915_ERROR_MEMORY_REFRESH) { - printk(KERN_ERR "memory refresh error:\n"); + pr_err("memory refresh error:\n"); for_each_pipe(pipe) - printk(KERN_ERR "pipe %c stat: 0x%08x\n", + pr_err("pipe %c stat: 0x%08x\n", pipe_name(pipe), I915_READ(PIPESTAT(pipe))); /* pipestat has already been acked */ } if (eir & I915_ERROR_INSTRUCTION) { - printk(KERN_ERR "instruction error\n"); - printk(KERN_ERR " INSTPM: 0x%08x\n", - I915_READ(INSTPM)); + pr_err("instruction error\n"); + pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); if (INTEL_INFO(dev)->gen < 4) { u32 ipeir = I915_READ(IPEIR); - printk(KERN_ERR " IPEIR: 0x%08x\n", - I915_READ(IPEIR)); - printk(KERN_ERR " IPEHR: 0x%08x\n", - I915_READ(IPEHR)); - printk(KERN_ERR " INSTDONE: 0x%08x\n", - I915_READ(INSTDONE)); - printk(KERN_ERR " ACTHD: 0x%08x\n", - I915_READ(ACTHD)); + pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); + pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); + pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE)); + pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); I915_WRITE(IPEIR, ipeir); POSTING_READ(IPEIR); } else { u32 ipeir = I915_READ(IPEIR_I965); - printk(KERN_ERR " IPEIR: 0x%08x\n", - I915_READ(IPEIR_I965)); - printk(KERN_ERR " IPEHR: 0x%08x\n", - I915_READ(IPEHR_I965)); - printk(KERN_ERR " INSTDONE: 0x%08x\n", + pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); + pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); + pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE_I965)); - printk(KERN_ERR " INSTPS: 0x%08x\n", - I915_READ(INSTPS)); - printk(KERN_ERR " INSTDONE1: 0x%08x\n", - I915_READ(INSTDONE1)); - printk(KERN_ERR " ACTHD: 0x%08x\n", - I915_READ(ACTHD_I965)); + pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); + pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1)); + pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); POSTING_READ(IPEIR_I965); } diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 289140bc83cb..34929aeca66b 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -25,6 +25,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -355,7 +357,7 @@ static void intel_didl_outputs(struct drm_device *dev) } if (!acpi_video_bus) { - printk(KERN_WARNING "No ACPI video bus found\n"); + pr_warn("No ACPI video bus found\n"); return; } diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 7998ca6f594f..cad45ff8251b 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -28,6 +28,8 @@ * Chris Wilson */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include "intel_drv.h" @@ -172,7 +174,7 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev) /* XXX add code here to query mode clock or hardware clock * and program max PWM appropriately. */ - printk_once(KERN_WARNING "fixme: max PWM is zero.\n"); + pr_warn_once("fixme: max PWM is zero\n"); return 1; } -- cgit v1.2.3-59-g8ed1b From 741639079cb49d796be50827e13b592121184ff8 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 15 Feb 2012 23:50:21 +0100 Subject: drm/i915: split out dma mapping from global gtt bind/unbind functions Note that there's a functional change buried in this patch wrt the ilk dmar workaround: We now only idle the gpu while tearing down the dmar mappings, not while clearing the gtt. Keeping the current semantics would have made for some really ugly code and afaik the issue is only with the dmar unmapping that needs a fully idle gpu. Reviewed-and-tested-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 5 +++-- drivers/gpu/drm/i915/i915_gem.c | 6 +++-- drivers/gpu/drm/i915/i915_gem_gtt.c | 45 ++++++++++++++----------------------- 3 files changed, 24 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e7a00b7cd372..3619f76367b2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1291,10 +1291,11 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj); void i915_gem_restore_gtt_mappings(struct drm_device *dev); -int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); -void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, +int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); +void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); +void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1f441f5c2405..031ca5bc1be8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2102,6 +2102,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); obj->has_aliasing_ppgtt_mapping = 0; } + i915_gem_gtt_finish_object(obj); i915_gem_object_put_pages_gtt(obj); @@ -2746,7 +2747,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, return ret; } - ret = i915_gem_gtt_bind_object(obj); + ret = i915_gem_gtt_prepare_object(obj); if (ret) { i915_gem_object_put_pages_gtt(obj); drm_mm_put_block(obj->gtt_space); @@ -2757,6 +2758,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, goto search_free; } + i915_gem_gtt_bind_object(obj, obj->cache_level); list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); @@ -2950,7 +2952,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return ret; } - i915_gem_gtt_rebind_object(obj, cache_level); + i915_gem_gtt_bind_object(obj, cache_level); if (obj->has_aliasing_ppgtt_mapping) i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, obj, cache_level); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 2eacd78bb93b..bf33eaf045b2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -355,42 +355,28 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { i915_gem_clflush_object(obj); - i915_gem_gtt_rebind_object(obj, obj->cache_level); + i915_gem_gtt_bind_object(obj, obj->cache_level); } intel_gtt_chipset_flush(); } -int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) +int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level); - int ret; - if (dev_priv->mm.gtt->needs_dmar) { - ret = intel_gtt_map_memory(obj->pages, - obj->base.size >> PAGE_SHIFT, - &obj->sg_list, - &obj->num_sg); - if (ret != 0) - return ret; - - intel_gtt_insert_sg_entries(obj->sg_list, - obj->num_sg, - obj->gtt_space->start >> PAGE_SHIFT, - agp_type); - } else - intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT, - obj->pages, - agp_type); - - return 0; + if (dev_priv->mm.gtt->needs_dmar) + return intel_gtt_map_memory(obj->pages, + obj->base.size >> PAGE_SHIFT, + &obj->sg_list, + &obj->num_sg); + else + return 0; } -void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level) +void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -411,6 +397,12 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, } void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) +{ + intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT); +} + +void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -418,9 +410,6 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) interruptible = do_idling(dev_priv); - intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT); - if (obj->sg_list) { intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); obj->sg_list = NULL; -- cgit v1.2.3-59-g8ed1b From 74898d7edc701bdae3cbd099d783dfb80b42350f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 15 Feb 2012 23:50:22 +0100 Subject: drm/i915: bind objects to the global gtt only when needed And track the existence of such a binding similar to the aliasing ppgtt case. Speeds up binding/unbinding in the common case where we only need a ppgtt binding (which is accessed in a cpu coherent fashion by the gpu) and no gloabl gtt binding (which needs uc writes for the ptes). This patch just puts the required tracking in place. v2: Check that global gtt mappings exist in the error_state capture code (with Chris Wilson's llc reloc patches batchbuffers are no longer relocated as mappable in all situations, so this matters). Suggested by Chris Wilson. v3: Adapted to Chris' latest llc-reloc patches. v4: Fix a bug in the i915 error state capture code noticed by Chris Wilson. Reviewed-and-tested-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 12 ++++++++++-- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 ++++ drivers/gpu/drm/i915/i915_irq.c | 3 ++- 4 files changed, 17 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3619f76367b2..b6098b05b02e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -882,6 +882,7 @@ struct drm_i915_gem_object { unsigned int cache_level:2; unsigned int has_aliasing_ppgtt_mapping:1; + unsigned int has_global_gtt_mapping:1; struct page **pages; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 031ca5bc1be8..69009d1027fb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1153,6 +1153,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) goto unlock; } + if (!obj->has_global_gtt_mapping) + i915_gem_gtt_bind_object(obj, obj->cache_level); + if (obj->tiling_mode == I915_TILING_NONE) ret = i915_gem_object_put_fence(obj); else @@ -2097,7 +2100,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) trace_i915_gem_object_unbind(obj); - i915_gem_gtt_unbind_object(obj); + if (obj->has_global_gtt_mapping) + i915_gem_gtt_unbind_object(obj); if (obj->has_aliasing_ppgtt_mapping) { i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); obj->has_aliasing_ppgtt_mapping = 0; @@ -2952,7 +2956,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return ret; } - i915_gem_gtt_bind_object(obj, cache_level); + if (obj->has_global_gtt_mapping) + i915_gem_gtt_bind_object(obj, cache_level); if (obj->has_aliasing_ppgtt_mapping) i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, obj, cache_level); @@ -3342,6 +3347,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, return ret; } + if (!obj->has_global_gtt_mapping && map_and_fenceable) + i915_gem_gtt_bind_object(obj, obj->cache_level); + if (obj->pin_count++ == 0) { if (!obj->active) list_move_tail(&obj->mm_list, diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index bf33eaf045b2..72be80616298 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -394,12 +394,16 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, obj->base.size >> PAGE_SHIFT, obj->pages, agp_type); + + obj->has_global_gtt_mapping = 1; } void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) { intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, obj->base.size >> PAGE_SHIFT); + + obj->has_global_gtt_mapping = 0; } void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4c8914927217..998116e871f0 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -729,7 +729,8 @@ i915_error_object_create(struct drm_i915_private *dev_priv, goto unwind; local_irq_save(flags); - if (reloc_offset < dev_priv->mm.gtt_mappable_end) { + if (reloc_offset < dev_priv->mm.gtt_mappable_end && + src->has_global_gtt_mapping) { void __iomem *s; /* Simply ignore tiling or any overlapping fence. -- cgit v1.2.3-59-g8ed1b From 149c84077fe717af883bae459623ef1cebd86388 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 15 Feb 2012 23:50:23 +0100 Subject: drm/i915: implement SNB workaround for lazy global gtt PIPE_CONTROL on snb needs global gtt mappings in place to workaround a hw gotcha. No other commands need such a workaround. Luckily we can detect a PIPE_CONTROL commands easily because they have a write_domain = I915_GEM_DOMAIN_INSTRUCTION (and nothing else has that). v2: Binding the target of such a reloc into the global gtt actually works instead of binding the source, which is rather pointless ... v3: Kill a superflous has_global_gtt_mapping assignement noticed by Chris Wilson. Reviewed-and-tested-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 81687af00893..0e051eca3639 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -273,6 +273,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; struct drm_gem_object *target_obj; + struct drm_i915_gem_object *target_i915_obj; uint32_t target_offset; int ret = -EINVAL; @@ -281,7 +282,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, if (unlikely(target_obj == NULL)) return -ENOENT; - target_offset = to_intel_bo(target_obj)->gtt_offset; + target_i915_obj = to_intel_bo(target_obj); + target_offset = target_i915_obj->gtt_offset; /* The target buffer should have appeared before us in the * exec_object list, so it should have a GTT space bound by now. @@ -383,6 +385,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, io_mapping_unmap_atomic(reloc_page); } + /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and + * pipe_control writes because the gpu doesn't properly redirect them + * through the ppgtt for non_secure batchbuffers. */ + if (unlikely(IS_GEN6(dev) && + reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && + !target_i915_obj->has_global_gtt_mapping)) { + i915_gem_gtt_bind_object(target_i915_obj, + target_i915_obj->cache_level); + } + /* and update the user's relocation entry */ reloc->presumed_offset = target_offset; -- cgit v1.2.3-59-g8ed1b From 0ebb98299357e1dbeeea470eec29241263c8f244 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 15 Feb 2012 23:50:24 +0100 Subject: drm/i915: enable lazy global-gtt binding Now that everything is in place, only bind to the global gtt when actually required. Patch split-up suggested by Chris Wilson. Reviewed-and-tested-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 69009d1027fb..863e14ac0406 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2762,7 +2762,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, goto search_free; } - i915_gem_gtt_bind_object(obj, obj->cache_level); + + if (!dev_priv->mm.aliasing_ppgtt) + i915_gem_gtt_bind_object(obj, obj->cache_level); list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); -- cgit v1.2.3-59-g8ed1b From 777ee96f50d8c3ac4ff3dde9ad69c22779ac88cb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 15 Feb 2012 23:50:25 +0100 Subject: drm/i915: add HAS_ALIASING_PPGTT parameter for userspace On Sanybridge a few MI read/write commands only work when ppgtt is enabled. Userspace therefore needs to be able to check whether ppgtt is enabled. For added hilarity, you need to reset the "use global GTT" bit on snb when ppgtt is enabled, otherwise it won't work. Despite what bspec says about automatically using ppgtt ... Luckily PIPE_CONTROL (the only write cmd current userspace uses) is not affected by all this, as tested by tests/gem_pipe_control_store_loop. Reviewed-and-tested-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 3 +++ include/drm/i915_drm.h | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 3c086d707a91..fdff0097cf2b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -790,6 +790,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_LLC: value = HAS_LLC(dev); break; + case I915_PARAM_HAS_ALIASING_PPGTT: + value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index da929bb5b788..f3f82242bf1d 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -296,7 +296,8 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_EXEC_CONSTANTS 14 #define I915_PARAM_HAS_RELAXED_DELTA 15 #define I915_PARAM_HAS_GEN7_SOL_RESET 16 -#define I915_PARAM_HAS_LLC 17 +#define I915_PARAM_HAS_LLC 17 +#define I915_PARAM_HAS_ALIASING_PPGTT 18 typedef struct drm_i915_getparam { int param; -- cgit v1.2.3-59-g8ed1b From eb2c0c818a4109489de53fd7334c21f1f26f6635 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 15 Feb 2012 14:42:43 +0100 Subject: drm/i915: [dinq] shut up two instances -Wunitialized Introduced in commit 8461d226 and 8c59967c Signed-off-by: Ben Widawsky [danvet: s/fix/shut up/ in the commit msg.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 863e14ac0406..6b1859799e6c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -387,7 +387,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, char __user *user_data; ssize_t remain; loff_t offset; - int shmem_page_offset, page_length, ret; + int shmem_page_offset, page_length, ret = 0; int obj_do_bit17_swizzling, page_do_bit17_swizzling; user_data = (char __user *) (uintptr_t) args->data_ptr; @@ -794,7 +794,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, ssize_t remain; loff_t offset; char __user *user_data; - int shmem_page_offset, page_length, ret; + int shmem_page_offset, page_length, ret = 0; int obj_do_bit17_swizzling, page_do_bit17_swizzling; user_data = (char __user *) (uintptr_t) args->data_ptr; -- cgit v1.2.3-59-g8ed1b From 1a8c55d37268d8b7ab7797e6d378caa697dbd029 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 15 Feb 2012 14:42:42 +0100 Subject: drm/i915: [dinq] shut up six instances of -Warray-bounds Introduced in commits c1cd90ed and d27b1e0e Signed-off-by: Ben Widawsky [danvet: s/fix/shut up in the commit msg and add a comment to the BUG_ON.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index fdb7ccefffbd..66c90d4477a3 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -704,6 +704,7 @@ static void i915_ring_error_state(struct seq_file *m, struct drm_i915_error_state *error, unsigned ring) { + BUG_ON(ring > VCS); /* shut up confused gcc */ seq_printf(m, "%s command stream:\n", ring_str(ring)); seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); -- cgit v1.2.3-59-g8ed1b From b03543857fd75876b96e10d4320b775e95041bb7 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 20 Mar 2012 13:07:05 +0100 Subject: drm/i915: Check VBIOS value for determining LVDS dual channel mode, too Currently i915 driver checks [PCH_]LVDS register bits to decide whether to set up the dual-link or the single-link mode. This relies implicitly on that BIOS initializes the register properly at boot. However, BIOS doesn't initialize it always. When the machine is booted with the closed lid, BIOS skips the LVDS reg initialization. This ends up in blank output on a machine with a dual-link LVDS when you open the lid after the boot. This patch adds a workaround for that problem by checking the initial LVDS register value in VBT. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=37742 Tested-By: Paulo Zanoni Reviewed-by: Rodrigo Vivi Reviewed-by: Adam Jackson Signed-off-by: Takashi Iwai Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/intel_bios.c | 36 ++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_display.c | 30 ++++++++++++++++++++++++------ 3 files changed, 62 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b6098b05b02e..4cbed7f23e72 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -406,6 +406,8 @@ typedef struct drm_i915_private { unsigned int lvds_use_ssc:1; unsigned int display_clock_mode:1; int lvds_ssc_freq; + unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ + unsigned int lvds_val; /* used for checking LVDS channel mode */ struct { int rate; int lanes; diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 0ae76d6b6eab..e4317da848d5 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -173,6 +173,28 @@ get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data, return (struct lvds_dvo_timing *)(entry + dvo_timing_offset); } +/* get lvds_fp_timing entry + * this function may return NULL if the corresponding entry is invalid + */ +static const struct lvds_fp_timing * +get_lvds_fp_timing(const struct bdb_header *bdb, + const struct bdb_lvds_lfp_data *data, + const struct bdb_lvds_lfp_data_ptrs *ptrs, + int index) +{ + size_t data_ofs = (const u8 *)data - (const u8 *)bdb; + u16 data_size = ((const u16 *)data)[-1]; /* stored in header */ + size_t ofs; + + if (index >= ARRAY_SIZE(ptrs->ptr)) + return NULL; + ofs = ptrs->ptr[index].fp_timing_offset; + if (ofs < data_ofs || + ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size) + return NULL; + return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs); +} + /* Try to find integrated panel data */ static void parse_lfp_panel_data(struct drm_i915_private *dev_priv, @@ -182,6 +204,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, const struct bdb_lvds_lfp_data *lvds_lfp_data; const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; const struct lvds_dvo_timing *panel_dvo_timing; + const struct lvds_fp_timing *fp_timing; struct drm_display_mode *panel_fixed_mode; int i, downclock; @@ -243,6 +266,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, "Normal Clock %dKHz, downclock %dKHz\n", panel_fixed_mode->clock, 10*downclock); } + + fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data, + lvds_lfp_data_ptrs, + lvds_options->panel_type); + if (fp_timing) { + /* check the resolution, just to be sure */ + if (fp_timing->x_res == panel_fixed_mode->hdisplay && + fp_timing->y_res == panel_fixed_mode->vdisplay) { + dev_priv->bios_lvds_val = fp_timing->lvds_reg_val; + DRM_DEBUG_KMS("VBT initial LVDS value %x\n", + dev_priv->bios_lvds_val); + } + } } /* Try to find sdvo panel data */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 683002fb63d1..a76ac2eb9938 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -360,6 +360,27 @@ static const intel_limit_t intel_limits_ironlake_display_port = { .find_pll = intel_find_pll_ironlake_dp, }; +static bool is_dual_link_lvds(struct drm_i915_private *dev_priv, + unsigned int reg) +{ + unsigned int val; + + if (dev_priv->lvds_val) + val = dev_priv->lvds_val; + else { + /* BIOS should set the proper LVDS register value at boot, but + * in reality, it doesn't set the value when the lid is closed; + * we need to check "the value to be set" in VBT when LVDS + * register is uninitialized. + */ + val = I915_READ(reg); + if (!(val & ~LVDS_DETECTED)) + val = dev_priv->bios_lvds_val; + dev_priv->lvds_val = val; + } + return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; +} + static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, int refclk) { @@ -368,8 +389,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, const intel_limit_t *limit; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == - LVDS_CLKB_POWER_UP) { + if (is_dual_link_lvds(dev_priv, PCH_LVDS)) { /* LVDS dual channel */ if (refclk == 100000) limit = &intel_limits_ironlake_dual_lvds_100m; @@ -397,8 +417,7 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) const intel_limit_t *limit; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == - LVDS_CLKB_POWER_UP) + if (is_dual_link_lvds(dev_priv, LVDS)) /* LVDS with dual channel */ limit = &intel_limits_g4x_dual_channel_lvds; else @@ -536,8 +555,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, * reliably set up different single/dual channel state, if we * even can. */ - if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == - LVDS_CLKB_POWER_UP) + if (is_dual_link_lvds(dev_priv, LVDS)) clock.p2 = limit->p2.p2_fast; else clock.p2 = limit->p2.p2_slow; -- cgit v1.2.3-59-g8ed1b From 121d527a323f3fde313a8f522060ba859ee405b3 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 20 Mar 2012 13:07:06 +0100 Subject: drm/i915: Add lvds_channel module option Add a new module optoin lvds_channel to specify the LVDS channel mode explicitly instead of probing the LVDS register value set by BIOS. This will be helpful when VBT is broken or incompatible with the current code. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=42842 Reviewed-by: Eugeni Dodonov Reviewed-by: Adam Jackson Signed-off-by: Takashi Iwai Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 6 ++++++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_display.c | 4 ++++ 3 files changed, 11 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0e797d3cb5f4..8e2c52ec5a9e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -80,6 +80,12 @@ MODULE_PARM_DESC(lvds_downclock, "Use panel (LVDS/eDP) downclocking for power savings " "(default: false)"); +int i915_lvds_channel_mode __read_mostly; +module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600); +MODULE_PARM_DESC(lvds_channel_mode, + "Specify LVDS channel mode " + "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); + int i915_panel_use_ssc __read_mostly = -1; module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); MODULE_PARM_DESC(lvds_use_ssc, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4cbed7f23e72..f2f9dd933ce4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1064,6 +1064,7 @@ extern int i915_panel_ignore_lid __read_mostly; extern unsigned int i915_powersave __read_mostly; extern int i915_semaphores __read_mostly; extern unsigned int i915_lvds_downclock __read_mostly; +extern int i915_lvds_channel_mode __read_mostly; extern int i915_panel_use_ssc __read_mostly; extern int i915_vbt_sdvo_panel_type __read_mostly; extern int i915_enable_rc6 __read_mostly; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a76ac2eb9938..a0e31660381c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -365,6 +365,10 @@ static bool is_dual_link_lvds(struct drm_i915_private *dev_priv, { unsigned int val; + /* use the module option value if specified */ + if (i915_lvds_channel_mode > 0) + return i915_lvds_channel_mode == 2; + if (dev_priv->lvds_val) val = dev_priv->lvds_val; else { -- cgit v1.2.3-59-g8ed1b From a14917eeb2cc160d13f4fddefe5f7f9c80953ce1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 24 Feb 2012 21:13:38 +0000 Subject: drm/i915: Release the mmap offset when purging a buffer If we discard a buffer due to memory pressure, also release its alloted mmap address space. As it may be sometime before userspace wakes up and notices that it has buffers to purge from its cache, we may waste valuable address space on unusable objects for a period of time. Signed-off-by: Chris Wilson Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47738 Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6b1859799e6c..0a16366efaac 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1546,6 +1546,9 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) inode = obj->base.filp->f_path.dentry->d_inode; shmem_truncate_range(inode, 0, (loff_t)-1); + if (obj->base.map_list.map) + drm_gem_free_mmap_offset(&obj->base); + obj->madv = __I915_MADV_PURGED; } -- cgit v1.2.3-59-g8ed1b From b7d84096d3c45f4e397e913da4ce24ec9a32022e Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 22 Mar 2012 14:38:43 -0700 Subject: drm/i915: move NEEDS_FORCE_WAKE to i915_drv.c It's only used by the main read/write functions, so we can keep it with them. Signed-off-by: Jesse Barnes Acked-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 6 ++++++ drivers/gpu/drm/i915/i915_drv.h | 6 ------ 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 8e2c52ec5a9e..9a7f265db1a4 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -985,6 +985,12 @@ MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights"); +/* We give fast paths for the really cool registers */ +#define NEEDS_FORCE_WAKE(dev_priv, reg) \ + (((dev_priv)->info->gen >= 6) && \ + ((reg) < 0x40000) && \ + ((reg) != FORCEWAKE)) + #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ u##x val = 0; \ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f2f9dd933ce4..e16f4d6f235e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1435,12 +1435,6 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); -/* We give fast paths for the really cool registers */ -#define NEEDS_FORCE_WAKE(dev_priv, reg) \ - (((dev_priv)->info->gen >= 6) && \ - ((reg) < 0x40000) && \ - ((reg) != FORCEWAKE)) - #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); -- cgit v1.2.3-59-g8ed1b From 7eea1ddf6168cbe4507f429da53752229e9ff5b7 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 22 Mar 2012 14:38:44 -0700 Subject: drm/i915: re-order GT IIR bit definitions They were all over the place, order them by position and add a few. v2: add gen indications to the new bits (Ben) Signed-off-by: Jesse Barnes Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 52a06be1d98d..f3609f260e5f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3063,18 +3063,26 @@ #define DE_PIPEB_VBLANK_IVB (1<<5) #define DE_PIPEA_VBLANK_IVB (1<<0) +#define VLV_MASTER_IER 0x4400c /* Gunit master IER */ +#define MASTER_INTERRUPT_ENABLE (1<<31) + #define DEISR 0x44000 #define DEIMR 0x44004 #define DEIIR 0x44008 #define DEIER 0x4400c /* GT interrupt */ -#define GT_PIPE_NOTIFY (1 << 4) -#define GT_SYNC_STATUS (1 << 2) -#define GT_USER_INTERRUPT (1 << 0) -#define GT_BSD_USER_INTERRUPT (1 << 5) -#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12) -#define GT_BLT_USER_INTERRUPT (1 << 22) +#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26) +#define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25) +#define GT_BLT_USER_INTERRUPT (1 << 22) +#define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15) +#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12) +#define GT_BSD_USER_INTERRUPT (1 << 5) +#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5) +#define GT_PIPE_NOTIFY (1 << 4) +#define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3) +#define GT_SYNC_STATUS (1 << 2) +#define GT_USER_INTERRUPT (1 << 0) #define GTISR 0x44010 #define GTIMR 0x44014 -- cgit v1.2.3-59-g8ed1b From eef4eacb6ed3940b4a15daf3fbd060243e54056c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 23 Mar 2012 23:43:35 +0100 Subject: drm/i915/sdov: switch IS_SDVOB to a flag With valleyview we'll have these at yet another address, so keeping track of this with an ever-growing list of registers will get ugly. This way intel_sdvo.c is fully independent of the base address of the output ports display register blocks. While at it, do 2 closely related cleanups: - use SDVO_NAME some more - change the sdvo_reg variables to uint32_t like other registers. Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 6 +++--- drivers/gpu/drm/i915/intel_drv.h | 3 ++- drivers/gpu/drm/i915/intel_sdvo.c | 33 ++++++++++++++++++--------------- 3 files changed, 23 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a0e31660381c..a7c2ddc96f0e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7810,7 +7810,7 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(HDMIB) & PORT_DETECTED) { /* PCH SDVOB multiplex with HDMIB */ - found = intel_sdvo_init(dev, PCH_SDVOB); + found = intel_sdvo_init(dev, PCH_SDVOB, true); if (!found) intel_hdmi_init(dev, HDMIB); if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) @@ -7834,7 +7834,7 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(SDVOB) & SDVO_DETECTED) { DRM_DEBUG_KMS("probing SDVOB\n"); - found = intel_sdvo_init(dev, SDVOB); + found = intel_sdvo_init(dev, SDVOB, true); if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); intel_hdmi_init(dev, SDVOB); @@ -7850,7 +7850,7 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(SDVOB) & SDVO_DETECTED) { DRM_DEBUG_KMS("probing SDVOC\n"); - found = intel_sdvo_init(dev, SDVOC); + found = intel_sdvo_init(dev, SDVOC, false); } if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9cec6c3937fa..219efe3b9ad5 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -293,7 +293,8 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector) extern void intel_crt_init(struct drm_device *dev); extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); -extern bool intel_sdvo_init(struct drm_device *dev, int output_device); +extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, + bool is_sdvob); extern void intel_dvo_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev); extern void intel_mark_busy(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e36b171c1e7d..70fb275cd205 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -74,7 +74,7 @@ struct intel_sdvo { struct i2c_adapter ddc; /* Register for the SDVO device: SDVOB or SDVOC */ - int sdvo_reg; + uint32_t sdvo_reg; /* Active outputs controlled by this SDVO output */ uint16_t controlled_output; @@ -114,6 +114,9 @@ struct intel_sdvo { */ bool is_tv; + /* On different gens SDVOB is at different places. */ + bool is_sdvob; + /* This is for current tv format name */ int tv_format_index; @@ -403,8 +406,7 @@ static const struct _sdvo_cmd_name { SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), }; -#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) -#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC") +#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC") static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len) @@ -1893,7 +1895,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, { struct sdvo_device_mapping *mapping; - if (IS_SDVOB(reg)) + if (sdvo->is_sdvob) mapping = &(dev_priv->sdvo_mappings[0]); else mapping = &(dev_priv->sdvo_mappings[1]); @@ -1911,7 +1913,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, struct sdvo_device_mapping *mapping; u8 pin; - if (IS_SDVOB(reg)) + if (sdvo->is_sdvob) mapping = &dev_priv->sdvo_mappings[0]; else mapping = &dev_priv->sdvo_mappings[1]; @@ -1936,12 +1938,12 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) } static u8 -intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) +intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) { struct drm_i915_private *dev_priv = dev->dev_private; struct sdvo_device_mapping *my_mapping, *other_mapping; - if (IS_SDVOB(sdvo_reg)) { + if (sdvo->is_sdvob) { my_mapping = &dev_priv->sdvo_mappings[0]; other_mapping = &dev_priv->sdvo_mappings[1]; } else { @@ -1966,7 +1968,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) /* No SDVO device info is found for another DVO port, * so use mapping assumption we had before BIOS parsing. */ - if (IS_SDVOB(sdvo_reg)) + if (sdvo->is_sdvob) return 0x70; else return 0x72; @@ -2482,7 +2484,7 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, return i2c_add_adapter(&sdvo->ddc) == 0; } -bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) +bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *intel_encoder; @@ -2494,7 +2496,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) return false; intel_sdvo->sdvo_reg = sdvo_reg; - intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1; + intel_sdvo->is_sdvob = is_sdvob; + intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { kfree(intel_sdvo); @@ -2511,13 +2514,13 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) u8 byte; if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) { - DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", - IS_SDVOB(sdvo_reg) ? 'B' : 'C'); + DRM_DEBUG_KMS("No SDVO device found on %s\n", + SDVO_NAME(intel_sdvo)); goto err; } } - if (IS_SDVOB(sdvo_reg)) + if (intel_sdvo->is_sdvob) dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; else dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; @@ -2538,8 +2541,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) if (intel_sdvo_output_setup(intel_sdvo, intel_sdvo->caps.output_flags) != true) { - DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", - IS_SDVOB(sdvo_reg) ? 'B' : 'C'); + DRM_DEBUG_KMS("SDVO output failed to setup on %s\n", + SDVO_NAME(intel_sdvo)); goto err; } -- cgit v1.2.3-59-g8ed1b From 110447fc2f30c22dcaa1936828cb33cec5b72272 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 23 Mar 2012 23:43:36 +0100 Subject: drm/i915: add an explict mmio base for gpio/gmbus io Again, Valleyview modes these around, so make the mmio base more explicit to consolidate the base address computations to one HAS_PCH_SPLIT check. v2: Fix up the PCH_SPLIT braino ... it actually works that way round. Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 5 +++++ drivers/gpu/drm/i915/intel_i2c.c | 15 ++++++++------- 2 files changed, 13 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e16f4d6f235e..9c75a7b655ce 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -333,6 +333,11 @@ typedef struct drm_i915_private { * controller on different i2c buses. */ struct mutex gmbus_mutex; + /** + * Base address of the gmbus and gpio block. + */ + uint32_t gpio_mmio_base; + struct pci_dev *bridge_dev; struct intel_ring_buffer ring[I915_NUM_RINGS]; uint32_t next_seqno; diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 601c86e664af..1486c76e51ac 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -49,10 +49,7 @@ void intel_i2c_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (HAS_PCH_SPLIT(dev)) - I915_WRITE(PCH_GMBUS0, 0); - else - I915_WRITE(GMBUS0, 0); + I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); } static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) @@ -162,8 +159,7 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) algo = &bus->bit_algo; bus->gpio_reg = map_pin_to_reg[pin]; - if (HAS_PCH_SPLIT(dev_priv->dev)) - bus->gpio_reg += PCH_GPIOA - GPIOA; + bus->gpio_reg += dev_priv->gpio_mmio_base; bus->adapter.algo_data = algo; algo->setsda = set_data; @@ -219,7 +215,7 @@ gmbus_xfer(struct i2c_adapter *adapter, goto out; } - reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0; + reg_offset = dev_priv->gpio_mmio_base; I915_WRITE(GMBUS0 + reg_offset, bus->reg0); @@ -359,6 +355,11 @@ int intel_setup_gmbus(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int ret, i; + if (HAS_PCH_SPLIT(dev)) + dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; + else + dev_priv->gpio_mmio_base = 0; + dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus), GFP_KERNEL); if (dev_priv->gmbus == NULL) -- cgit v1.2.3-59-g8ed1b From 0fb3f969c8683505fb7323c06bf8a999a5a45a15 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 2 Mar 2012 19:38:30 +0100 Subject: drm/i915: enable gmbus on gen2 With the recent set of gmbus fixes, this seems to work on my i855gm. Acked-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_i2c.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 1486c76e51ac..0713cc289119 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -389,10 +389,6 @@ int intel_setup_gmbus(struct drm_device *dev) bus->reg0 = i | GMBUS_RATE_100KHZ; bus->has_gpio = intel_gpio_setup(bus, i); - - /* XXX force bit banging until GMBUS is fully debugged */ - if (bus->has_gpio && IS_GEN2(dev)) - bus->force_bit = true; } intel_i2c_reset(dev_priv->dev); -- cgit v1.2.3-59-g8ed1b From 1d83f4426fa0555c98c989915be6df01a8125aca Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 24 Mar 2012 20:12:53 +0000 Subject: drm/i915: Batch copy_from_user for relocation processing Originally the code tried to allocate a large enough array to perform the copy using vmalloc, performance wasn't great and throughput was improved by processing each individual relocation entry separately. This too is not as efficient as one would desire. A compromise would be to allocate a single page, or to allocate a few entries on the stack, and process the copy in batches. The latter gives simpler code and more consistent performance due to a lack of heuristic. x11perf -copywinwin10: n450/pnv i3-330m i5-2520m (cpu) before: 249000 785000 1280000 (80%) page: 264000 896000 1280000 (65%) on-stack: 264000 902000 1280000 (67%) v2: Use 512-bytes of stack for batching rather than allocate a page. v3: Tidy the code slightly with more descriptive variable names Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 42 +++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 0e051eca3639..1fa01313d89f 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -405,30 +405,46 @@ static int i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, struct eb_objects *eb) { +#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) + struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; struct drm_i915_gem_relocation_entry __user *user_relocs; struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; - int i, ret; + int remain, ret; user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; - for (i = 0; i < entry->relocation_count; i++) { - struct drm_i915_gem_relocation_entry reloc; - if (__copy_from_user_inatomic(&reloc, - user_relocs+i, - sizeof(reloc))) + remain = entry->relocation_count; + while (remain) { + struct drm_i915_gem_relocation_entry *r = stack_reloc; + int count = remain; + if (count > ARRAY_SIZE(stack_reloc)) + count = ARRAY_SIZE(stack_reloc); + remain -= count; + + if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) return -EFAULT; - ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc); - if (ret) - return ret; + do { + u64 offset = r->presumed_offset; - if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, - &reloc.presumed_offset, - sizeof(reloc.presumed_offset))) - return -EFAULT; + ret = i915_gem_execbuffer_relocate_entry(obj, eb, r); + if (ret) + return ret; + + if (r->presumed_offset != offset && + __copy_to_user_inatomic(&user_relocs->presumed_offset, + &r->presumed_offset, + sizeof(r->presumed_offset))) { + return -EFAULT; + } + + user_relocs++; + r++; + } while (--count); } return 0; +#undef N_RELOC } static int -- cgit v1.2.3-59-g8ed1b From d42c9e2c24f7e7897405b85816bdf4ac924881c0 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 22:56:14 +0200 Subject: drm/i915: reinstate GM45 TV detection fix This reverts commmit d4b74bf07873da2e94219a7b67a334fc1c3ce649 which reverted the origin fix fb8b5a39b6310379d7b54c0c7113703a8eaf4a57. We have at least 3 different bug reports that this fixes things and no indication what is exactly wrong with this. So try again. To make matters slightly more fun, the commit itself was cc: stable whereas the revert has not been. According to Peter Clifton he discussed this with Zhao Yakui and this seems to be in contradiction of the GM45 PRM, but rumours have it that this is how the BIOS does it ... let's see. Reviewed-by: Rodrigo Vivi Tested-by: Peter Clifton Cc: Zhao Yakui Cc: Dave Airlie Cc: Eric Anholt Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=16236 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=25913 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=14792 Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_tv.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 05f765ef5464..ca12c709f3eb 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1153,6 +1153,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv, DAC_B_0_7_V | DAC_C_0_7_V); + + /* + * The TV sense state should be cleared to zero on cantiga platform. Otherwise + * the TV is misdetected. This is hardware requirement. + */ + if (IS_GM45(dev)) + tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL | + TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL); + I915_WRITE(TV_CTL, tv_ctl); I915_WRITE(TV_DAC, tv_dac); POSTING_READ(TV_DAC); -- cgit v1.2.3-59-g8ed1b From 644ec02b5d135b63369a5dbf74976cdeef310dcd Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 26 Mar 2012 09:45:40 +0200 Subject: drm/i915: s/i915_gem_do_init/i915_gem_init_global_gtt ... because this is what it actually doesn now that we have the global gtt vs. ppgtt split. Also move it to the other global gtt functions in i915_gem_gtt.c Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 5 +++-- drivers/gpu/drm/i915/i915_drv.h | 8 ++++---- drivers/gpu/drm/i915/i915_gem.c | 22 ++-------------------- drivers/gpu/drm/i915/i915_gem_gtt.c | 19 +++++++++++++++++++ 4 files changed, 28 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index fdff0097cf2b..b6e0a4599db9 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1210,7 +1210,7 @@ static int i915_load_gem_init(struct drm_device *dev) /* For paranoia keep the guard page in between. */ gtt_size -= PAGE_SIZE; - i915_gem_do_init(dev, 0, mappable_size, gtt_size); + i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size); ret = i915_gem_init_aliasing_ppgtt(dev); if (ret) @@ -1226,7 +1226,8 @@ static int i915_load_gem_init(struct drm_device *dev) * should be enough to keep any prefetching inside of the * aperture. */ - i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); + i915_gem_init_global_gtt(dev, 0, mappable_size, + gtt_size - PAGE_SIZE); } ret = i915_gem_init_hw(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9c75a7b655ce..d680f09e872a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1253,10 +1253,6 @@ int __must_check i915_gem_init_hw(struct drm_device *dev); void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_init_ppgtt(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); -void i915_gem_do_init(struct drm_device *dev, - unsigned long start, - unsigned long mappable_end, - unsigned long end); int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire); int __must_check i915_gem_idle(struct drm_device *dev); int __must_check i915_add_request(struct intel_ring_buffer *ring, @@ -1305,6 +1301,10 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); +void i915_gem_init_global_gtt(struct drm_device *dev, + unsigned long start, + unsigned long mappable_end, + unsigned long end); /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0a16366efaac..90e3fc18b8b5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -125,25 +125,6 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) return obj->gtt_space && !obj->active && obj->pin_count == 0; } -void i915_gem_do_init(struct drm_device *dev, - unsigned long start, - unsigned long mappable_end, - unsigned long end) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - - drm_mm_init(&dev_priv->mm.gtt_space, start, end - start); - - dev_priv->mm.gtt_start = start; - dev_priv->mm.gtt_mappable_end = mappable_end; - dev_priv->mm.gtt_end = end; - dev_priv->mm.gtt_total = end - start; - dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; - - /* Take over this portion of the GTT */ - intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); -} - int i915_gem_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file) @@ -155,7 +136,8 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data, return -EINVAL; mutex_lock(&dev->struct_mutex); - i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end); + i915_gem_init_global_gtt(dev, args->gtt_start, + args->gtt_end, args->gtt_end); mutex_unlock(&dev->struct_mutex); return 0; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 72be80616298..98ed612d8d65 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -421,3 +421,22 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) undo_idling(dev_priv, interruptible); } + +void i915_gem_init_global_gtt(struct drm_device *dev, + unsigned long start, + unsigned long mappable_end, + unsigned long end) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + drm_mm_init(&dev_priv->mm.gtt_space, start, end - start); + + dev_priv->mm.gtt_start = start; + dev_priv->mm.gtt_mappable_end = mappable_end; + dev_priv->mm.gtt_end = end; + dev_priv->mm.gtt_total = end - start; + dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; + + /* Take over this portion of the GTT */ + intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); +} -- cgit v1.2.3-59-g8ed1b From 9021f284e9eaff1ed1d3e6bab7c90e3712201ac2 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 26 Mar 2012 09:45:41 +0200 Subject: drm/i915: the intel gtt is _not_ an agp bridge! So don't call it like that. Also rip out a confusing comment and instead explain what's really going on. Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index b6e0a4599db9..c2d9eaedff33 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1957,7 +1957,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv; int ret = 0, mmio_bar; - uint32_t agp_size; + uint32_t aperture_size; /* i915 has 4 more counters */ dev->counters += 4; @@ -2011,16 +2011,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_rmmap; } - agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; + aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; dev_priv->mm.gtt_mapping = - io_mapping_create_wc(dev->agp->base, agp_size); + io_mapping_create_wc(dev->agp->base, aperture_size); if (dev_priv->mm.gtt_mapping == NULL) { ret = -EIO; goto out_rmmap; } - i915_mtrr_setup(dev_priv, dev->agp->base, agp_size); + i915_mtrr_setup(dev_priv, dev->agp->base, aperture_size); /* The i915 workqueue is primarily used for batched retirement of * requests (and thus managing bo) once the task has been completed @@ -2272,7 +2272,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file) * mode setting case, we want to restore the kernel's initial mode (just * in case the last client left us in a bad state). * - * Additionally, in the non-mode setting case, we'll tear down the AGP + * Additionally, in the non-mode setting case, we'll tear down the GTT * and DMA structures, since the kernel won't be using them, and clea * up any GEM state. */ @@ -2350,16 +2350,10 @@ struct drm_ioctl_desc i915_ioctls[] = { int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); -/** - * Determine if the device really is AGP or not. - * - * All Intel graphics chipsets are treated as AGP, even if they are really - * PCI-e. - * - * \param dev The device to be tested. - * - * \returns - * A value of 1 is always retured to indictate every i9x5 is AGP. +/* + * This is really ugly: Because old userspace abused the linux agp interface to + * manage the gtt, we need to claim that all intel devices are agp. For + * otherwise the drm core refuses to initialize the agp support code. */ int i915_driver_device_is_agp(struct drm_device * dev) { -- cgit v1.2.3-59-g8ed1b From d1dd20a96524ac462ed4f28dae168b9637f079e5 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 26 Mar 2012 09:45:42 +0200 Subject: drm/i915: clear the entire gtt when using gem We've lost our guard page somewhere in the gtt rewrite, this patch here will restore it. Exercised by i-g-t/tests/gem_cs_prefetch. v2: Substract the guard page from the range we're supposed to manage with gem. Suggested by Chris Wilson to increase the odds of old ums + gem userspace not blowing up. To compensate for the loss of a page, don't substract the guard page in the modeset init code any longer. Tested-by: Chris Wilson Reviewed-by: Chris Wilson Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=44748 Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 4 +--- drivers/gpu/drm/i915/i915_gem_gtt.c | 5 +++-- 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index c2d9eaedff33..4f690374fffe 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1207,8 +1207,6 @@ static int i915_load_gem_init(struct drm_device *dev) /* PPGTT pdes are stolen from global gtt ptes, so shrink the * aperture accordingly when using aliasing ppgtt. */ gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; - /* For paranoia keep the guard page in between. */ - gtt_size -= PAGE_SIZE; i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size); @@ -1227,7 +1225,7 @@ static int i915_load_gem_init(struct drm_device *dev) * aperture. */ i915_gem_init_global_gtt(dev, 0, mappable_size, - gtt_size - PAGE_SIZE); + gtt_size); } ret = i915_gem_init_hw(dev); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 98ed612d8d65..5a626f7af0c7 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -429,7 +429,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev, { drm_i915_private_t *dev_priv = dev->dev_private; - drm_mm_init(&dev_priv->mm.gtt_space, start, end - start); + /* Substract the guard page ... */ + drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); dev_priv->mm.gtt_start = start; dev_priv->mm.gtt_mappable_end = mappable_end; @@ -437,6 +438,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev, dev_priv->mm.gtt_total = end - start; dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; - /* Take over this portion of the GTT */ + /* ... but ensure that we clear the entire range. */ intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); } -- cgit v1.2.3-59-g8ed1b From dabdfe021ab1e985e6566009c774fb03f14b568e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 26 Mar 2012 10:10:27 +0200 Subject: drm/i915: Avoid using mappable space for relocation processing through the CPU We try to avoid writing the relocations through the uncached GTT, if the buffer is currently in the CPU write domain and so will be flushed out to main memory afterwards anyway. Also on SandyBridge we can safely write to the pages in cacheable memory, so long as the buffer is LLC mapped. In either of these cases, we therefore do not need to force the reallocation of the buffer into the mappable region of the GTT, reducing the aperture pressure. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_gem.c | 4 +--- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 36 +++++++++++++++++++++--------- 3 files changed, 28 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d680f09e872a..93e06a3225cc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1266,6 +1266,8 @@ int __must_check i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); int __must_check +i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); +int __must_check i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, struct intel_ring_buffer *pipelined); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 90e3fc18b8b5..09c033e5e02b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -39,8 +39,6 @@ static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); -static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, - bool write); static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, uint64_t offset, uint64_t size); @@ -3073,7 +3071,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) * This function returns when the move is complete, including waiting on * flushes to occur. */ -static int +int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) { uint32_t old_write_domain, old_read_domains; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 1fa01313d89f..eb85860001ec 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -266,6 +266,12 @@ eb_destroy(struct eb_objects *eb) kfree(eb); } +static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) +{ + return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || + obj->cache_level != I915_CACHE_NONE); +} + static int i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, struct eb_objects *eb, @@ -354,11 +360,19 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, return ret; } + /* We can't wait for rendering with pagefaults disabled */ + if (obj->active && in_atomic()) + return -EFAULT; + reloc->delta += target_offset; - if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { + if (use_cpu_reloc(obj)) { uint32_t page_offset = reloc->offset & ~PAGE_MASK; char *vaddr; + ret = i915_gem_object_set_to_cpu_domain(obj, 1); + if (ret) + return ret; + vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); *(uint32_t *)(vaddr + page_offset) = reloc->delta; kunmap_atomic(vaddr); @@ -367,10 +381,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, uint32_t __iomem *reloc_entry; void __iomem *reloc_page; - /* We can't wait for rendering with pagefaults disabled */ - if (obj->active && in_atomic()) - return -EFAULT; - ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret) return ret; @@ -492,6 +502,13 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, #define __EXEC_OBJECT_HAS_FENCE (1<<31) +static int +need_reloc_mappable(struct drm_i915_gem_object *obj) +{ + struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + return entry->relocation_count && !use_cpu_reloc(obj); +} + static int pin_and_fence_object(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring) @@ -505,8 +522,7 @@ pin_and_fence_object(struct drm_i915_gem_object *obj, has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; - need_mappable = - entry->relocation_count ? true : need_fence; + need_mappable = need_fence || need_reloc_mappable(obj); ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); if (ret) @@ -563,8 +579,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; - need_mappable = - entry->relocation_count ? true : need_fence; + need_mappable = need_fence || need_reloc_mappable(obj); if (need_mappable) list_move(&obj->exec_list, &ordered_objects); @@ -604,8 +619,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; - need_mappable = - entry->relocation_count ? true : need_fence; + need_mappable = need_fence || need_reloc_mappable(obj); if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || (need_mappable && !obj->map_and_fenceable)) -- cgit v1.2.3-59-g8ed1b From e244a443bf72bccb44c75cadf04720101d1f313a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:28 +0200 Subject: drm/i915: merge shmem_pwrite slow&fast-path With the previous rewrite, they've become essential identical. v2: Simplify the page_do_bit17_swizzling logic as suggested by Chris Wilson. Tested-by: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 126 +++++++++++----------------------------- 1 file changed, 33 insertions(+), 93 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 09c033e5e02b..a2547528a9d4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -691,84 +691,11 @@ out_unpin_pages: return ret; } -/** - * This is the fast shmem pwrite path, which attempts to directly - * copy_from_user into the kmapped pages backing the object. - */ -static int -i915_gem_shmem_pwrite_fast(struct drm_device *dev, - struct drm_i915_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file) -{ - struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; - ssize_t remain; - loff_t offset; - char __user *user_data; - int page_offset, page_length; - - user_data = (char __user *) (uintptr_t) args->data_ptr; - remain = args->size; - - offset = args->offset; - obj->dirty = 1; - - while (remain > 0) { - struct page *page; - char *vaddr; - int ret; - - /* Operation in this page - * - * page_offset = offset within page - * page_length = bytes to copy for this page - */ - page_offset = offset_in_page(offset); - page_length = remain; - if ((page_offset + remain) > PAGE_SIZE) - page_length = PAGE_SIZE - page_offset; - - page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); - if (IS_ERR(page)) - return PTR_ERR(page); - - vaddr = kmap_atomic(page); - ret = __copy_from_user_inatomic(vaddr + page_offset, - user_data, - page_length); - kunmap_atomic(vaddr); - - set_page_dirty(page); - mark_page_accessed(page); - page_cache_release(page); - - /* If we get a fault while copying data, then (presumably) our - * source page isn't available. Return the error and we'll - * retry in the slow path. - */ - if (ret) - return -EFAULT; - - remain -= page_length; - user_data += page_length; - offset += page_length; - } - - return 0; -} - -/** - * This is the fallback shmem pwrite path, which uses get_user_pages to pin - * the memory and maps it using kmap_atomic for copying. - * - * This avoids taking mmap_sem for faulting on the user's address while the - * struct_mutex is held. - */ static int -i915_gem_shmem_pwrite_slow(struct drm_device *dev, - struct drm_i915_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file) +i915_gem_shmem_pwrite(struct drm_device *dev, + struct drm_i915_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file) { struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; ssize_t remain; @@ -776,6 +703,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, char __user *user_data; int shmem_page_offset, page_length, ret = 0; int obj_do_bit17_swizzling, page_do_bit17_swizzling; + int hit_slowpath = 0; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; @@ -785,8 +713,6 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, offset = args->offset; obj->dirty = 1; - mutex_unlock(&dev->struct_mutex); - while (remain > 0) { struct page *page; char *vaddr; @@ -811,6 +737,21 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, page_do_bit17_swizzling = obj_do_bit17_swizzling && (page_to_phys(page) & (1 << 17)) != 0; + if (!page_do_bit17_swizzling) { + vaddr = kmap_atomic(page); + ret = __copy_from_user_inatomic(vaddr + shmem_page_offset, + user_data, + page_length); + kunmap_atomic(vaddr); + + if (ret == 0) + goto next_page; + } + + hit_slowpath = 1; + + mutex_unlock(&dev->struct_mutex); + vaddr = kmap(page); if (page_do_bit17_swizzling) ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, @@ -822,6 +763,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, page_length); kunmap(page); + mutex_lock(&dev->struct_mutex); +next_page: set_page_dirty(page); mark_page_accessed(page); page_cache_release(page); @@ -837,15 +780,16 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, } out: - mutex_lock(&dev->struct_mutex); - /* Fixup: Kill any reinstated backing storage pages */ - if (obj->madv == __I915_MADV_PURGED) - i915_gem_object_truncate(obj); - /* and flush dirty cachelines in case the object isn't in the cpu write - * domain anymore. */ - if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { - i915_gem_clflush_object(obj); - intel_gtt_chipset_flush(); + if (hit_slowpath) { + /* Fixup: Kill any reinstated backing storage pages */ + if (obj->madv == __I915_MADV_PURGED) + i915_gem_object_truncate(obj); + /* and flush dirty cachelines in case the object isn't in the cpu write + * domain anymore. */ + if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { + i915_gem_clflush_object(obj); + intel_gtt_chipset_flush(); + } } return ret; @@ -939,11 +883,7 @@ out_unpin: if (ret) goto out; - ret = -EFAULT; - if (!i915_gem_object_needs_bit17_swizzle(obj)) - ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); - if (ret == -EFAULT) - ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); + ret = i915_gem_shmem_pwrite(dev, obj, args, file); out: drm_gem_object_unreference(&obj->base); -- cgit v1.2.3-59-g8ed1b From dbf7bff074d5fdc87c61b1b41d8e809109cf0bf8 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:29 +0200 Subject: drm/i915: merge shmem_pread slow&fast-path With the previous rewrite, they've become essential identical. v2: Simplify the page_do_bit17_swizzling logic as suggested by Chris Wilson. Tested-by: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 108 ++++++++++------------------------------ 1 file changed, 27 insertions(+), 81 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a2547528a9d4..1855e72859a8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -239,66 +239,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) obj->tiling_mode != I915_TILING_NONE; } -/** - * This is the fast shmem pread path, which attempts to copy_from_user directly - * from the backing pages of the object to the user's address space. On a - * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). - */ -static int -i915_gem_shmem_pread_fast(struct drm_device *dev, - struct drm_i915_gem_object *obj, - struct drm_i915_gem_pread *args, - struct drm_file *file) -{ - struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; - ssize_t remain; - loff_t offset; - char __user *user_data; - int page_offset, page_length; - - user_data = (char __user *) (uintptr_t) args->data_ptr; - remain = args->size; - - offset = args->offset; - - while (remain > 0) { - struct page *page; - char *vaddr; - int ret; - - /* Operation in this page - * - * page_offset = offset within page - * page_length = bytes to copy for this page - */ - page_offset = offset_in_page(offset); - page_length = remain; - if ((page_offset + remain) > PAGE_SIZE) - page_length = PAGE_SIZE - page_offset; - - page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); - if (IS_ERR(page)) - return PTR_ERR(page); - - vaddr = kmap_atomic(page); - ret = __copy_to_user_inatomic(user_data, - vaddr + page_offset, - page_length); - kunmap_atomic(vaddr); - - mark_page_accessed(page); - page_cache_release(page); - if (ret) - return -EFAULT; - - remain -= page_length; - user_data += page_length; - offset += page_length; - } - - return 0; -} - static inline int __copy_to_user_swizzled(char __user *cpu_vaddr, const char *gpu_vaddr, int gpu_offset, @@ -351,17 +291,11 @@ __copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset, return 0; } -/** - * This is the fallback shmem pread path, which allocates temporary storage - * in kernel space to copy_to_user into outside of the struct_mutex, so we - * can copy out of the object's backing pages while holding the struct mutex - * and not take page faults. - */ static int -i915_gem_shmem_pread_slow(struct drm_device *dev, - struct drm_i915_gem_object *obj, - struct drm_i915_gem_pread *args, - struct drm_file *file) +i915_gem_shmem_pread(struct drm_device *dev, + struct drm_i915_gem_object *obj, + struct drm_i915_gem_pread *args, + struct drm_file *file) { struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; char __user *user_data; @@ -369,6 +303,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, loff_t offset; int shmem_page_offset, page_length, ret = 0; int obj_do_bit17_swizzling, page_do_bit17_swizzling; + int hit_slowpath = 0; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; @@ -377,8 +312,6 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, offset = args->offset; - mutex_unlock(&dev->struct_mutex); - while (remain > 0) { struct page *page; char *vaddr; @@ -402,6 +335,20 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, page_do_bit17_swizzling = obj_do_bit17_swizzling && (page_to_phys(page) & (1 << 17)) != 0; + if (!page_do_bit17_swizzling) { + vaddr = kmap_atomic(page); + ret = __copy_to_user_inatomic(user_data, + vaddr + shmem_page_offset, + page_length); + kunmap_atomic(vaddr); + if (ret == 0) + goto next_page; + } + + hit_slowpath = 1; + + mutex_unlock(&dev->struct_mutex); + vaddr = kmap(page); if (page_do_bit17_swizzling) ret = __copy_to_user_swizzled(user_data, @@ -413,6 +360,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, page_length); kunmap(page); + mutex_lock(&dev->struct_mutex); +next_page: mark_page_accessed(page); page_cache_release(page); @@ -427,10 +376,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, } out: - mutex_lock(&dev->struct_mutex); - /* Fixup: Kill any reinstated backing storage pages */ - if (obj->madv == __I915_MADV_PURGED) - i915_gem_object_truncate(obj); + if (hit_slowpath) { + /* Fixup: Kill any reinstated backing storage pages */ + if (obj->madv == __I915_MADV_PURGED) + i915_gem_object_truncate(obj); + } return ret; } @@ -486,11 +436,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, if (ret) goto out; - ret = -EFAULT; - if (!i915_gem_object_needs_bit17_swizzle(obj)) - ret = i915_gem_shmem_pread_fast(dev, obj, args, file); - if (ret == -EFAULT) - ret = i915_gem_shmem_pread_slow(dev, obj, args, file); + ret = i915_gem_shmem_pread(dev, obj, args, file); out: drm_gem_object_unreference(&obj->base); -- cgit v1.2.3-59-g8ed1b From 6d5cd9cb1e32e4f4e4468704430b26bcb0bfb129 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:30 +0200 Subject: drm: add helper to clflush a virtual address range Useful when the page is already mapped to copy date in/out. For -stable because the next patch (fixing phys obj pwrite) needs this little helper function. Tested-by: Chris Wilson Reviewed-by: Chris Wilson Cc: dri-devel@lists.freedesktop.org Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_cache.c | 23 +++++++++++++++++++++++ include/drm/drmP.h | 1 + 2 files changed, 24 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 592865381c6e..c7c8f6b5786f 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -98,3 +98,26 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages) #endif } EXPORT_SYMBOL(drm_clflush_pages); + +void +drm_clflush_virt_range(char *addr, unsigned long length) +{ +#if defined(CONFIG_X86) + if (cpu_has_clflush) { + char *end = addr + length; + mb(); + for (; addr < end; addr += boot_cpu_data.x86_clflush_size) + clflush(addr); + clflush(end - 1); + mb(); + return; + } + + if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0) + printk(KERN_ERR "Timed out waiting for cache flush.\n"); +#else + printk(KERN_ERR "Architecture has no drm_cache.c support\n"); + WARN_ON_ONCE(1); +#endif +} +EXPORT_SYMBOL(drm_clflush_virt_range); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 92f0981b5fb8..d33597bcc77c 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1332,6 +1332,7 @@ extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic); /* Cache management (drm_cache.c) */ void drm_clflush_pages(struct page *pages[], unsigned long num_pages); +void drm_clflush_virt_range(char *addr, unsigned long length); /* Locking IOCTL support (drm_lock.h) */ extern int drm_lock(struct drm_device *dev, void *data, -- cgit v1.2.3-59-g8ed1b From 8489731c9bd22c27ab17a2190cd7444604abf95f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:31 +0200 Subject: drm/i915: move clflushing into shmem_pread This is obviously gonna slow down pread. But for a half-way realistic micro-benchmark, it doesn't matter: Non-broken userspace reads back data from the gpu once before the gpu again dirties it. So all this ranged clflush tracking is just a waste of time. No pread performance change (neglecting the dumb benchmark of constantly reading the same data) measured. As an added bonus, this avoids clflush on read on coherent objects. Which means that partial preads on snb are now roughly 4x as fast. This will be usefull for e.g. the libva encoder - when I finally get around to fix that up. v2: Properly sync with the gpu on LLC machines. Tested-by: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1855e72859a8..9cdeeef5d6d7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -304,12 +304,25 @@ i915_gem_shmem_pread(struct drm_device *dev, int shmem_page_offset, page_length, ret = 0; int obj_do_bit17_swizzling, page_do_bit17_swizzling; int hit_slowpath = 0; + int needs_clflush = 0; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { + /* If we're not in the cpu read domain, set ourself into the gtt + * read domain and manually flush cachelines (if required). This + * optimizes for the case when the gpu will dirty the data + * anyway again before the next pread happens. */ + if (obj->cache_level == I915_CACHE_NONE) + needs_clflush = 1; + ret = i915_gem_object_set_to_gtt_domain(obj, false); + if (ret) + return ret; + } + offset = args->offset; while (remain > 0) { @@ -337,6 +350,9 @@ i915_gem_shmem_pread(struct drm_device *dev, if (!page_do_bit17_swizzling) { vaddr = kmap_atomic(page); + if (needs_clflush) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); ret = __copy_to_user_inatomic(user_data, vaddr + shmem_page_offset, page_length); @@ -350,6 +366,10 @@ i915_gem_shmem_pread(struct drm_device *dev, mutex_unlock(&dev->struct_mutex); vaddr = kmap(page); + if (needs_clflush) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); + if (page_do_bit17_swizzling) ret = __copy_to_user_swizzled(user_data, vaddr, shmem_page_offset, @@ -430,12 +450,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, trace_i915_gem_object_pread(obj, args->offset, args->size); - ret = i915_gem_object_set_cpu_read_domain_range(obj, - args->offset, - args->size); - if (ret) - goto out; - ret = i915_gem_shmem_pread(dev, obj, args, file); out: -- cgit v1.2.3-59-g8ed1b From a0356fc373517bc19713c9ff3aeedccbcc2ef7a4 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:32 +0200 Subject: drm/i915: kill ranged cpu read domain support No longer needed. Tested-by: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 7 --- drivers/gpu/drm/i915/i915_gem.c | 117 ---------------------------------------- 2 files changed, 124 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 93e06a3225cc..e11fcb09aea2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -927,13 +927,6 @@ struct drm_i915_gem_object { /** Record of address bit 17 of each page at last unbind. */ unsigned long *bit_17; - - /** - * If present, while GEM_DOMAIN_CPU is in the read domain this array - * flags which individual pages are valid. - */ - uint8_t *page_cpu_valid; - /** User space pin count and filp owning the pin */ uint32_t user_pin_count; struct drm_file *pin_filp; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9cdeeef5d6d7..34ef339158c1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -39,10 +39,6 @@ static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); -static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, - uint64_t offset, - uint64_t size); -static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, bool map_and_fenceable); @@ -2990,11 +2986,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) i915_gem_object_flush_gtt_write_domain(obj); - /* If we have a partially-valid cache of the object in the CPU, - * finish invalidating it and free the per-page flags. - */ - i915_gem_object_set_to_full_cpu_read_domain(obj); - old_write_domain = obj->base.write_domain; old_read_domains = obj->base.read_domains; @@ -3025,113 +3016,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) return 0; } -/** - * Moves the object from a partially CPU read to a full one. - * - * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(), - * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). - */ -static void -i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj) -{ - if (!obj->page_cpu_valid) - return; - - /* If we're partially in the CPU read domain, finish moving it in. - */ - if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) { - int i; - - for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) { - if (obj->page_cpu_valid[i]) - continue; - drm_clflush_pages(obj->pages + i, 1); - } - } - - /* Free the page_cpu_valid mappings which are now stale, whether - * or not we've got I915_GEM_DOMAIN_CPU. - */ - kfree(obj->page_cpu_valid); - obj->page_cpu_valid = NULL; -} - -/** - * Set the CPU read domain on a range of the object. - * - * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's - * not entirely valid. The page_cpu_valid member of the object flags which - * pages have been flushed, and will be respected by - * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping - * of the whole object. - * - * This function returns when the move is complete, including waiting on - * flushes to occur. - */ -static int -i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, - uint64_t offset, uint64_t size) -{ - uint32_t old_read_domains; - int i, ret; - - if (offset == 0 && size == obj->base.size) - return i915_gem_object_set_to_cpu_domain(obj, 0); - - ret = i915_gem_object_flush_gpu_write_domain(obj); - if (ret) - return ret; - - ret = i915_gem_object_wait_rendering(obj); - if (ret) - return ret; - - i915_gem_object_flush_gtt_write_domain(obj); - - /* If we're already fully in the CPU read domain, we're done. */ - if (obj->page_cpu_valid == NULL && - (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0) - return 0; - - /* Otherwise, create/clear the per-page CPU read domain flag if we're - * newly adding I915_GEM_DOMAIN_CPU - */ - if (obj->page_cpu_valid == NULL) { - obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE, - GFP_KERNEL); - if (obj->page_cpu_valid == NULL) - return -ENOMEM; - } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) - memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE); - - /* Flush the cache on any pages that are still invalid from the CPU's - * perspective. - */ - for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; - i++) { - if (obj->page_cpu_valid[i]) - continue; - - drm_clflush_pages(obj->pages + i, 1); - - obj->page_cpu_valid[i] = 1; - } - - /* It should now be out of any other write domains, and we can update - * the domain values for our changes. - */ - BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); - - old_read_domains = obj->base.read_domains; - obj->base.read_domains |= I915_GEM_DOMAIN_CPU; - - trace_i915_gem_object_change_domain(obj, - old_read_domains, - obj->base.write_domain); - - return 0; -} - /* Throttle our rendering by waiting until the ring has completed our requests * emitted over 20 msec ago. * @@ -3556,7 +3440,6 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) drm_gem_object_release(&obj->base); i915_gem_info_remove_obj(dev_priv, obj->base.size); - kfree(obj->page_cpu_valid); kfree(obj->bit_17); kfree(obj); } -- cgit v1.2.3-59-g8ed1b From 3ae5378330f581466b05bcea2dd3bc8731a598cb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:33 +0200 Subject: drm/i915: don't use gtt_pwrite on LLC cached objects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ~120 µs instead fo ~210 µs to write 1mb on my snb. I like this. Tested-by: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 34ef339158c1..75e3b841fffb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -808,6 +808,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, } if (obj->gtt_space && + obj->cache_level == I915_CACHE_NONE && obj->base.write_domain != I915_GEM_DOMAIN_CPU) { ret = i915_gem_object_pin(obj, 0, true); if (ret) -- cgit v1.2.3-59-g8ed1b From 692a576b9ddf8006f1559e14a5022c0a100440f1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:34 +0200 Subject: drm/i915: don't call shmem_read_mapping unnecessarily MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This speeds up pwrite and pread from ~120 µs ro ~100 µs for reading/writing 1mb on my snb (if the backing storage pages are already pinned, of course). v2: Chris Wilson pointed out a glaring page reference bug - I've unconditionally dropped the reference. With that fixed (and the associated reduction of dirt in dmesg) it's now even a notch faster. v3: Unconditionaly grab a page reference when dropping dev->struct_mutex to simplify the code-flow. Tested-by: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 42 +++++++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 75e3b841fffb..b253257c028e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -301,6 +301,7 @@ i915_gem_shmem_pread(struct drm_device *dev, int obj_do_bit17_swizzling, page_do_bit17_swizzling; int hit_slowpath = 0; int needs_clflush = 0; + int release_page; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; @@ -335,10 +336,16 @@ i915_gem_shmem_pread(struct drm_device *dev, if ((shmem_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - shmem_page_offset; - page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto out; + if (obj->pages) { + page = obj->pages[offset >> PAGE_SHIFT]; + release_page = 0; + } else { + page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto out; + } + release_page = 1; } page_do_bit17_swizzling = obj_do_bit17_swizzling && @@ -358,7 +365,7 @@ i915_gem_shmem_pread(struct drm_device *dev, } hit_slowpath = 1; - + page_cache_get(page); mutex_unlock(&dev->struct_mutex); vaddr = kmap(page); @@ -377,9 +384,11 @@ i915_gem_shmem_pread(struct drm_device *dev, kunmap(page); mutex_lock(&dev->struct_mutex); + page_cache_release(page); next_page: mark_page_accessed(page); - page_cache_release(page); + if (release_page) + page_cache_release(page); if (ret) { ret = -EFAULT; @@ -660,6 +669,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, int shmem_page_offset, page_length, ret = 0; int obj_do_bit17_swizzling, page_do_bit17_swizzling; int hit_slowpath = 0; + int release_page; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; @@ -684,10 +694,16 @@ i915_gem_shmem_pwrite(struct drm_device *dev, if ((shmem_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - shmem_page_offset; - page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto out; + if (obj->pages) { + page = obj->pages[offset >> PAGE_SHIFT]; + release_page = 0; + } else { + page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto out; + } + release_page = 1; } page_do_bit17_swizzling = obj_do_bit17_swizzling && @@ -705,7 +721,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, } hit_slowpath = 1; - + page_cache_get(page); mutex_unlock(&dev->struct_mutex); vaddr = kmap(page); @@ -720,10 +736,12 @@ i915_gem_shmem_pwrite(struct drm_device *dev, kunmap(page); mutex_lock(&dev->struct_mutex); + page_cache_release(page); next_page: set_page_dirty(page); mark_page_accessed(page); - page_cache_release(page); + if (release_page) + page_cache_release(page); if (ret) { ret = -EFAULT; -- cgit v1.2.3-59-g8ed1b From 935aaa692ec5e4b7261ed7f17f962d7e978c542b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:35 +0200 Subject: drm/i915: drop gtt slowpath With the proper prefault, it's extremely unlikely that we fall back to the gtt slowpath. So just kill it and use the shmem_pwrite path as fallback. To further clean up the code, move the preparatory gem calls into the respective pwrite functions. This way the gtt_fast->shmem fallback is much more obvious. Tested-by: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 183 +++++++--------------------------------- 1 file changed, 30 insertions(+), 153 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b253257c028e..23f1a6bcee73 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -484,30 +484,6 @@ fast_user_write(struct io_mapping *mapping, return unwritten; } -/* Here's the write path which can sleep for - * page faults - */ - -static inline void -slow_kernel_write(struct io_mapping *mapping, - loff_t gtt_base, int gtt_offset, - struct page *user_page, int user_offset, - int length) -{ - char __iomem *dst_vaddr; - char *src_vaddr; - - dst_vaddr = io_mapping_map_wc(mapping, gtt_base); - src_vaddr = kmap(user_page); - - memcpy_toio(dst_vaddr + gtt_offset, - src_vaddr + user_offset, - length); - - kunmap(user_page); - io_mapping_unmap(dst_vaddr); -} - /** * This is the fast pwrite path, where we copy the data directly from the * user into the GTT, uncached. @@ -522,7 +498,19 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, ssize_t remain; loff_t offset, page_base; char __user *user_data; - int page_offset, page_length; + int page_offset, page_length, ret; + + ret = i915_gem_object_pin(obj, 0, true); + if (ret) + goto out; + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + goto out_unpin; + + ret = i915_gem_object_put_fence(obj); + if (ret) + goto out_unpin; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; @@ -547,112 +535,19 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, * retry in the slow path. */ if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, - page_offset, user_data, page_length)) - return -EFAULT; + page_offset, user_data, page_length)) { + ret = -EFAULT; + goto out_unpin; + } remain -= page_length; user_data += page_length; offset += page_length; } - return 0; -} - -/** - * This is the fallback GTT pwrite path, which uses get_user_pages to pin - * the memory and maps it using kmap_atomic for copying. - * - * This code resulted in x11perf -rgb10text consuming about 10% more CPU - * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). - */ -static int -i915_gem_gtt_pwrite_slow(struct drm_device *dev, - struct drm_i915_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - ssize_t remain; - loff_t gtt_page_base, offset; - loff_t first_data_page, last_data_page, num_pages; - loff_t pinned_pages, i; - struct page **user_pages; - struct mm_struct *mm = current->mm; - int gtt_page_offset, data_page_offset, data_page_index, page_length; - int ret; - uint64_t data_ptr = args->data_ptr; - - remain = args->size; - - /* Pin the user pages containing the data. We can't fault while - * holding the struct mutex, and all of the pwrite implementations - * want to hold it while dereferencing the user data. - */ - first_data_page = data_ptr / PAGE_SIZE; - last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; - num_pages = last_data_page - first_data_page + 1; - - user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); - if (user_pages == NULL) - return -ENOMEM; - - mutex_unlock(&dev->struct_mutex); - down_read(&mm->mmap_sem); - pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, - num_pages, 0, 0, user_pages, NULL); - up_read(&mm->mmap_sem); - mutex_lock(&dev->struct_mutex); - if (pinned_pages < num_pages) { - ret = -EFAULT; - goto out_unpin_pages; - } - - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - goto out_unpin_pages; - - ret = i915_gem_object_put_fence(obj); - if (ret) - goto out_unpin_pages; - - offset = obj->gtt_offset + args->offset; - - while (remain > 0) { - /* Operation in this page - * - * gtt_page_base = page offset within aperture - * gtt_page_offset = offset within page in aperture - * data_page_index = page number in get_user_pages return - * data_page_offset = offset with data_page_index page. - * page_length = bytes to copy for this page - */ - gtt_page_base = offset & PAGE_MASK; - gtt_page_offset = offset_in_page(offset); - data_page_index = data_ptr / PAGE_SIZE - first_data_page; - data_page_offset = offset_in_page(data_ptr); - - page_length = remain; - if ((gtt_page_offset + page_length) > PAGE_SIZE) - page_length = PAGE_SIZE - gtt_page_offset; - if ((data_page_offset + page_length) > PAGE_SIZE) - page_length = PAGE_SIZE - data_page_offset; - - slow_kernel_write(dev_priv->mm.gtt_mapping, - gtt_page_base, gtt_page_offset, - user_pages[data_page_index], - data_page_offset, - page_length); - - remain -= page_length; - offset += page_length; - data_ptr += page_length; - } - -out_unpin_pages: - for (i = 0; i < pinned_pages; i++) - page_cache_release(user_pages[i]); - drm_free_large(user_pages); - +out_unpin: + i915_gem_object_unpin(obj); +out: return ret; } @@ -671,6 +566,10 @@ i915_gem_shmem_pwrite(struct drm_device *dev, int hit_slowpath = 0; int release_page; + ret = i915_gem_object_set_to_cpu_domain(obj, 1); + if (ret) + return ret; + user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; @@ -814,6 +713,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, trace_i915_gem_object_pwrite(obj, args->offset, args->size); + ret = -EFAULT; /* We can only do the GTT pwrite on untiled buffers, as otherwise * it would end up going through the fenced access, and we'll get * different detiling behavior between reading and writing. @@ -828,37 +728,14 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (obj->gtt_space && obj->cache_level == I915_CACHE_NONE && obj->base.write_domain != I915_GEM_DOMAIN_CPU) { - ret = i915_gem_object_pin(obj, 0, true); - if (ret) - goto out; - - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - goto out_unpin; - - ret = i915_gem_object_put_fence(obj); - if (ret) - goto out_unpin; - ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); - if (ret == -EFAULT) - ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file); - -out_unpin: - i915_gem_object_unpin(obj); - - if (ret != -EFAULT) - goto out; - /* Fall through to the shmfs paths because the gtt paths might - * fail with non-page-backed user pointers (e.g. gtt mappings - * when moving data between textures). */ + /* Note that the gtt paths might fail with non-page-backed user + * pointers (e.g. gtt mappings when moving data between + * textures). Fallback to the shmem path in that case. */ } - ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret) - goto out; - - ret = i915_gem_shmem_pwrite(dev, obj, args, file); + if (ret == -EFAULT) + ret = i915_gem_shmem_pwrite(dev, obj, args, file); out: drm_gem_object_unreference(&obj->base); -- cgit v1.2.3-59-g8ed1b From 96d79b52701758404cf8701986891afc99ce810b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:36 +0200 Subject: drm/i915: don't clobber userspace memory before commiting to the pread The pagemap.h prefault helpers do the prefaulting by simply writing some data into every page. Hence we should not prefault when we're not yet commited to to actually writing data to userspace. The problem is now that - we can't prefault while holding dev->struct_mutex for we could deadlock with our own pagefault handler - we need to grab dev->struct_mutex before copying to sync up with any outsanding gpu writes. Therefore only prefault when we're dropping the lock the first time in the pread slowpath - at that point we're committed to the write, don't wait on the gpu anymore and hence won't return early (with e.g. -EINTR). Tested-by: Chris Wilson Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 23f1a6bcee73..292a74f2fa87 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -300,6 +300,7 @@ i915_gem_shmem_pread(struct drm_device *dev, int shmem_page_offset, page_length, ret = 0; int obj_do_bit17_swizzling, page_do_bit17_swizzling; int hit_slowpath = 0; + int prefaulted = 0; int needs_clflush = 0; int release_page; @@ -368,6 +369,16 @@ i915_gem_shmem_pread(struct drm_device *dev, page_cache_get(page); mutex_unlock(&dev->struct_mutex); + if (!prefaulted) { + ret = fault_in_pages_writeable(user_data, remain); + /* Userspace is tricking us, but we've already clobbered + * its pages with the prefault and promised to write the + * data up to the first fault. Hence ignore any errors + * and just continue. */ + (void)ret; + prefaulted = 1; + } + vaddr = kmap(page); if (needs_clflush) drm_clflush_virt_range(vaddr + shmem_page_offset, @@ -431,11 +442,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, args->size)) return -EFAULT; - ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr, - args->size); - if (ret) - return -EFAULT; - ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; -- cgit v1.2.3-59-g8ed1b From 586428852a4fe64d77dc3e34c446fba33a2ca971 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:37 +0200 Subject: drm/i915: implement inline clflush for pwrite In micro-benchmarking of the usual pwrite use-pattern of alternating pwrites with gtt domain reads from the gpu, this yields around 30% improvement of pwrite throughput across all buffers size. The trick is that we can avoid clflush cachelines that we will overwrite completely anyway. Furthermore for partial pwrites it gives a proportional speedup on top of the 30% percent because we only clflush back the part of the buffer we're actually writing. v2: Simplify the clflush-before-write logic, as suggested by Chris Wilson. v3: Finishing touches suggested by Chris Wilson: - add comment to needs_clflush_before and only set this if the bo is uncached. - s/needs_clflush/needs_clflush_after/ in the write paths to clearly differentiate it from needs_clflush_before. Tested-by: Chris Wilson Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 46 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 42 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 292a74f2fa87..83dfb4407c8f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -570,23 +570,39 @@ i915_gem_shmem_pwrite(struct drm_device *dev, int shmem_page_offset, page_length, ret = 0; int obj_do_bit17_swizzling, page_do_bit17_swizzling; int hit_slowpath = 0; + int needs_clflush_after = 0; + int needs_clflush_before = 0; int release_page; - ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret) - return ret; - user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { + /* If we're not in the cpu write domain, set ourself into the gtt + * write domain and manually flush cachelines (if required). This + * optimizes for the case when the gpu will use the data + * right away and we therefore have to clflush anyway. */ + if (obj->cache_level == I915_CACHE_NONE) + needs_clflush_after = 1; + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ret; + } + /* Same trick applies for invalidate partially written cachelines before + * writing. */ + if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU) + && obj->cache_level == I915_CACHE_NONE) + needs_clflush_before = 1; + offset = args->offset; obj->dirty = 1; while (remain > 0) { struct page *page; char *vaddr; + int partial_cacheline_write; /* Operation in this page * @@ -599,6 +615,13 @@ i915_gem_shmem_pwrite(struct drm_device *dev, if ((shmem_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - shmem_page_offset; + /* If we don't overwrite a cacheline completely we need to be + * careful to have up-to-date data by first clflushing. Don't + * overcomplicate things and flush the entire patch. */ + partial_cacheline_write = needs_clflush_before && + ((shmem_page_offset | page_length) + & (boot_cpu_data.x86_clflush_size - 1)); + if (obj->pages) { page = obj->pages[offset >> PAGE_SHIFT]; release_page = 0; @@ -616,9 +639,15 @@ i915_gem_shmem_pwrite(struct drm_device *dev, if (!page_do_bit17_swizzling) { vaddr = kmap_atomic(page); + if (partial_cacheline_write) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); ret = __copy_from_user_inatomic(vaddr + shmem_page_offset, user_data, page_length); + if (needs_clflush_after) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); kunmap_atomic(vaddr); if (ret == 0) @@ -630,6 +659,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev, mutex_unlock(&dev->struct_mutex); vaddr = kmap(page); + if (partial_cacheline_write) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); if (page_do_bit17_swizzling) ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, user_data, @@ -638,6 +670,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ret = __copy_from_user(vaddr + shmem_page_offset, user_data, page_length); + if (needs_clflush_after) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); kunmap(page); mutex_lock(&dev->struct_mutex); @@ -671,6 +706,9 @@ out: } } + if (needs_clflush_after) + intel_gtt_chipset_flush(); + return ret; } -- cgit v1.2.3-59-g8ed1b From ffc62976d215870e3082d9778660167c5fa3946d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:38 +0200 Subject: drm/i915: fall back to shmem pwrite when the buffer is not accessible It's too expensive to move it around just for that pwrite, especially when we're trashing on the mappable gtt part like crazy. Tested-by: Chris Wilson Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 83dfb4407c8f..7f33e6af6ecb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -771,6 +771,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (obj->gtt_space && obj->cache_level == I915_CACHE_NONE && + obj->map_and_fenceable && obj->base.write_domain != I915_GEM_DOMAIN_CPU) { ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); /* Note that the gtt paths might fail with non-page-backed user -- cgit v1.2.3-59-g8ed1b From 117babcdd51ed8a9bb8d4921b292918acb6e0d70 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:39 +0200 Subject: drm/i915: use uncached writes in pwrite It's around 20% faster. Tested-by: Chris Wilson Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7f33e6af6ecb..c84060ce6bf8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -642,9 +642,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev, if (partial_cacheline_write) drm_clflush_virt_range(vaddr + shmem_page_offset, page_length); - ret = __copy_from_user_inatomic(vaddr + shmem_page_offset, - user_data, - page_length); + ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset, + user_data, + page_length); if (needs_clflush_after) drm_clflush_virt_range(vaddr + shmem_page_offset, page_length); -- cgit v1.2.3-59-g8ed1b From d174bd6472d79fb5603dc8bd35e5184d83194ea8 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:40 +0200 Subject: drm/i915: extract copy helpers from shmem_pread|pwrite While moving around things, this two functions slowly grew out of any sane bounds. So extract a few lines that do the copying and clflushing. Also add a few comments to explain what's going on. v2: Again do s/needs_clflush/needs_clflush_after/ in the write paths as suggested by Chris Wilson. Tested-by: Chris Wilson Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 196 ++++++++++++++++++++++++++++------------ 1 file changed, 136 insertions(+), 60 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c84060ce6bf8..e9cac478cced 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -287,6 +287,60 @@ __copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset, return 0; } +/* Per-page copy function for the shmem pread fastpath. + * Flushes invalid cachelines before reading the target if + * needs_clflush is set. */ +static int +shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, + char __user *user_data, + bool page_do_bit17_swizzling, bool needs_clflush) +{ + char *vaddr; + int ret; + + if (page_do_bit17_swizzling) + return -EINVAL; + + vaddr = kmap_atomic(page); + if (needs_clflush) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); + ret = __copy_to_user_inatomic(user_data, + vaddr + shmem_page_offset, + page_length); + kunmap_atomic(vaddr); + + return ret; +} + +/* Only difference to the fast-path function is that this can handle bit17 + * and uses non-atomic copy and kmap functions. */ +static int +shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, + char __user *user_data, + bool page_do_bit17_swizzling, bool needs_clflush) +{ + char *vaddr; + int ret; + + vaddr = kmap(page); + if (needs_clflush) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); + + if (page_do_bit17_swizzling) + ret = __copy_to_user_swizzled(user_data, + vaddr, shmem_page_offset, + page_length); + else + ret = __copy_to_user(user_data, + vaddr + shmem_page_offset, + page_length); + kunmap(page); + + return ret; +} + static int i915_gem_shmem_pread(struct drm_device *dev, struct drm_i915_gem_object *obj, @@ -325,7 +379,6 @@ i915_gem_shmem_pread(struct drm_device *dev, while (remain > 0) { struct page *page; - char *vaddr; /* Operation in this page * @@ -352,18 +405,11 @@ i915_gem_shmem_pread(struct drm_device *dev, page_do_bit17_swizzling = obj_do_bit17_swizzling && (page_to_phys(page) & (1 << 17)) != 0; - if (!page_do_bit17_swizzling) { - vaddr = kmap_atomic(page); - if (needs_clflush) - drm_clflush_virt_range(vaddr + shmem_page_offset, - page_length); - ret = __copy_to_user_inatomic(user_data, - vaddr + shmem_page_offset, - page_length); - kunmap_atomic(vaddr); - if (ret == 0) - goto next_page; - } + ret = shmem_pread_fast(page, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + needs_clflush); + if (ret == 0) + goto next_page; hit_slowpath = 1; page_cache_get(page); @@ -379,20 +425,9 @@ i915_gem_shmem_pread(struct drm_device *dev, prefaulted = 1; } - vaddr = kmap(page); - if (needs_clflush) - drm_clflush_virt_range(vaddr + shmem_page_offset, - page_length); - - if (page_do_bit17_swizzling) - ret = __copy_to_user_swizzled(user_data, - vaddr, shmem_page_offset, - page_length); - else - ret = __copy_to_user(user_data, - vaddr + shmem_page_offset, - page_length); - kunmap(page); + ret = shmem_pread_slow(page, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + needs_clflush); mutex_lock(&dev->struct_mutex); page_cache_release(page); @@ -557,6 +592,70 @@ out: return ret; } +/* Per-page copy function for the shmem pwrite fastpath. + * Flushes invalid cachelines before writing to the target if + * needs_clflush_before is set and flushes out any written cachelines after + * writing if needs_clflush is set. */ +static int +shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, + char __user *user_data, + bool page_do_bit17_swizzling, + bool needs_clflush_before, + bool needs_clflush_after) +{ + char *vaddr; + int ret; + + if (page_do_bit17_swizzling) + return -EINVAL; + + vaddr = kmap_atomic(page); + if (needs_clflush_before) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); + ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset, + user_data, + page_length); + if (needs_clflush_after) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); + kunmap_atomic(vaddr); + + return ret; +} + +/* Only difference to the fast-path function is that this can handle bit17 + * and uses non-atomic copy and kmap functions. */ +static int +shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, + char __user *user_data, + bool page_do_bit17_swizzling, + bool needs_clflush_before, + bool needs_clflush_after) +{ + char *vaddr; + int ret; + + vaddr = kmap(page); + if (needs_clflush_before) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); + if (page_do_bit17_swizzling) + ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, + user_data, + page_length); + else + ret = __copy_from_user(vaddr + shmem_page_offset, + user_data, + page_length); + if (needs_clflush_after) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); + kunmap(page); + + return ret; +} + static int i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_i915_gem_object *obj, @@ -601,7 +700,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev, while (remain > 0) { struct page *page; - char *vaddr; int partial_cacheline_write; /* Operation in this page @@ -637,43 +735,21 @@ i915_gem_shmem_pwrite(struct drm_device *dev, page_do_bit17_swizzling = obj_do_bit17_swizzling && (page_to_phys(page) & (1 << 17)) != 0; - if (!page_do_bit17_swizzling) { - vaddr = kmap_atomic(page); - if (partial_cacheline_write) - drm_clflush_virt_range(vaddr + shmem_page_offset, - page_length); - ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset, - user_data, - page_length); - if (needs_clflush_after) - drm_clflush_virt_range(vaddr + shmem_page_offset, - page_length); - kunmap_atomic(vaddr); - - if (ret == 0) - goto next_page; - } + ret = shmem_pwrite_fast(page, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + partial_cacheline_write, + needs_clflush_after); + if (ret == 0) + goto next_page; hit_slowpath = 1; page_cache_get(page); mutex_unlock(&dev->struct_mutex); - vaddr = kmap(page); - if (partial_cacheline_write) - drm_clflush_virt_range(vaddr + shmem_page_offset, - page_length); - if (page_do_bit17_swizzling) - ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, - user_data, - page_length); - else - ret = __copy_from_user(vaddr + shmem_page_offset, - user_data, - page_length); - if (needs_clflush_after) - drm_clflush_virt_range(vaddr + shmem_page_offset, - page_length); - kunmap(page); + ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + partial_cacheline_write, + needs_clflush_after); mutex_lock(&dev->struct_mutex); page_cache_release(page); -- cgit v1.2.3-59-g8ed1b From f56f821feb7b36223f309e0ec05986bb137ce418 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:41 +0200 Subject: mm: extend prefault helpers to fault in more than PAGE_SIZE drm/i915 wants to read/write more than one page in its fastpath and hence needs to prefault more than PAGE_SIZE bytes. Add new functions in filemap.h to make that possible. Also kill a copy&pasted spurious space in both functions while at it. v2: As suggested by Andrew Morton, add a multipage parameter to both functions to avoid the additional branch for the pagemap.c hotpath. My gcc 4.6 here seems to dtrt and indeed reap these branches where not needed. v3: Becaus I couldn't find a way around adding a uaddr += PAGE_SIZE to the filemap.c hotpaths (that the compiler couldn't remove again), let's go with separate new functions for the multipage use-case. v4: Adjust comment to CodingStlye and fix spelling. Acked-by: Andrew Morton Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 6 +-- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- include/linux/pagemap.h | 64 +++++++++++++++++++++++++++++- 3 files changed, 66 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e9cac478cced..6dc832902f53 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -416,7 +416,7 @@ i915_gem_shmem_pread(struct drm_device *dev, mutex_unlock(&dev->struct_mutex); if (!prefaulted) { - ret = fault_in_pages_writeable(user_data, remain); + ret = fault_in_multipages_writeable(user_data, remain); /* Userspace is tricking us, but we've already clobbered * its pages with the prefault and promised to write the * data up to the first fault. Hence ignore any errors @@ -809,8 +809,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, args->size)) return -EFAULT; - ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, - args->size); + ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr, + args->size); if (ret) return -EFAULT; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index eb85860001ec..8e0b686d3afb 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -997,7 +997,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, if (!access_ok(VERIFY_WRITE, ptr, length)) return -EFAULT; - if (fault_in_pages_readable(ptr, length)) + if (fault_in_multipages_readable(ptr, length)) return -EFAULT; } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index cfaaa6949b8b..c93a9a9bcd35 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -426,7 +426,7 @@ static inline int fault_in_pages_writeable(char __user *uaddr, int size) */ if (((unsigned long)uaddr & PAGE_MASK) != ((unsigned long)end & PAGE_MASK)) - ret = __put_user(0, end); + ret = __put_user(0, end); } return ret; } @@ -445,13 +445,73 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) if (((unsigned long)uaddr & PAGE_MASK) != ((unsigned long)end & PAGE_MASK)) { - ret = __get_user(c, end); + ret = __get_user(c, end); (void)c; } } return ret; } +/* + * Multipage variants of the above prefault helpers, useful if more than + * PAGE_SIZE of data needs to be prefaulted. These are separate from the above + * functions (which only handle up to PAGE_SIZE) to avoid clobbering the + * filemap.c hotpaths. + */ +static inline int fault_in_multipages_writeable(char __user *uaddr, int size) +{ + int ret; + const char __user *end = uaddr + size - 1; + + if (unlikely(size == 0)) + return 0; + + /* + * Writing zeroes into userspace here is OK, because we know that if + * the zero gets there, we'll be overwriting it. + */ + while (uaddr <= end) { + ret = __put_user(0, uaddr); + if (ret != 0) + return ret; + uaddr += PAGE_SIZE; + } + + /* Check whether the range spilled into the next page. */ + if (((unsigned long)uaddr & PAGE_MASK) == + ((unsigned long)end & PAGE_MASK)) + ret = __put_user(0, end); + + return ret; +} + +static inline int fault_in_multipages_readable(const char __user *uaddr, + int size) +{ + volatile char c; + int ret; + const char __user *end = uaddr + size - 1; + + if (unlikely(size == 0)) + return 0; + + while (uaddr <= end) { + ret = __get_user(c, uaddr); + if (ret != 0) + return ret; + uaddr += PAGE_SIZE; + } + + /* Check whether the range spilled into the next page. */ + if (((unsigned long)uaddr & PAGE_MASK) == + ((unsigned long)end & PAGE_MASK)) { + ret = __get_user(c, end); + (void)c; + } + + return ret; +} + int add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, -- cgit v1.2.3-59-g8ed1b From 23c18c71da801fb7ce11acc3041e4f10a1bb5cb0 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:42 +0200 Subject: drm/i915: fixup in-line clflushing on bit17 swizzled bos The issue is that with inline clflushing the clflushing isn't properly swizzled. Fix this by - always clflushing entire 128 byte chunks and - unconditionally flush before writes when swizzling a given page. We could be clever and check whether we pwrite a partial 128 byte chunk instead of a partial cacheline, but I've figured that's not worth it. Now the usual approach is to fold this into the original patch series, but I've opted against this because - this fixes a corner case only very old userspace relies on and - I'd like to not invalidate all the testing the pwrite rewrite has gotten. This fixes the regression notice by tests/gem_tiled_partial_prite_pread from i-g-t. Unfortunately it doesn't fix the issues with partial pwrites to tiled buffers on bit17 swizzling machines. But that is also broken without the pwrite patches, so likely a different issue (or a problem with the testcase). v2: Simplify the patch by dropping the overly clever partial write logic for swizzled pages. Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 39 ++++++++++++++++++++++++++++++++------- 1 file changed, 32 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6dc832902f53..c964dfbdb577 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -313,6 +313,28 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, return ret; } +static void +shmem_clflush_swizzled_range(char *addr, unsigned long length, + bool swizzled) +{ + if (swizzled) { + unsigned long start = (unsigned long) addr; + unsigned long end = (unsigned long) addr + length; + + /* For swizzling simply ensure that we always flush both + * channels. Lame, but simple and it works. Swizzled + * pwrite/pread is far from a hotpath - current userspace + * doesn't use it at all. */ + start = round_down(start, 128); + end = round_up(end, 128); + + drm_clflush_virt_range((void *)start, end - start); + } else { + drm_clflush_virt_range(addr, length); + } + +} + /* Only difference to the fast-path function is that this can handle bit17 * and uses non-atomic copy and kmap functions. */ static int @@ -325,8 +347,9 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, vaddr = kmap(page); if (needs_clflush) - drm_clflush_virt_range(vaddr + shmem_page_offset, - page_length); + shmem_clflush_swizzled_range(vaddr + shmem_page_offset, + page_length, + page_do_bit17_swizzling); if (page_do_bit17_swizzling) ret = __copy_to_user_swizzled(user_data, @@ -637,9 +660,10 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, int ret; vaddr = kmap(page); - if (needs_clflush_before) - drm_clflush_virt_range(vaddr + shmem_page_offset, - page_length); + if (needs_clflush_before || page_do_bit17_swizzling) + shmem_clflush_swizzled_range(vaddr + shmem_page_offset, + page_length, + page_do_bit17_swizzling); if (page_do_bit17_swizzling) ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, user_data, @@ -649,8 +673,9 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, user_data, page_length); if (needs_clflush_after) - drm_clflush_virt_range(vaddr + shmem_page_offset, - page_length); + shmem_clflush_swizzled_range(vaddr + shmem_page_offset, + page_length, + page_do_bit17_swizzling); kunmap(page); return ret; -- cgit v1.2.3-59-g8ed1b From e7e58eb5c0d1d7d1a42fcb2b5a247d28ec08b47e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 25 Mar 2012 19:47:43 +0200 Subject: drm/i915: mark pwrite/pread slowpaths with unlikely Beside helping the compiler untangle this maze they double-up as documentation for which parts of the code aren't performance-critical but just around to keep old (but already dead-slow) userspace from breaking. Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c964dfbdb577..b8c6248b25c6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -298,7 +298,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, char *vaddr; int ret; - if (page_do_bit17_swizzling) + if (unlikely(page_do_bit17_swizzling)) return -EINVAL; vaddr = kmap_atomic(page); @@ -317,7 +317,7 @@ static void shmem_clflush_swizzled_range(char *addr, unsigned long length, bool swizzled) { - if (swizzled) { + if (unlikely(swizzled)) { unsigned long start = (unsigned long) addr; unsigned long end = (unsigned long) addr + length; @@ -629,7 +629,7 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, char *vaddr; int ret; - if (page_do_bit17_swizzling) + if (unlikely(page_do_bit17_swizzling)) return -EINVAL; vaddr = kmap_atomic(page); @@ -660,7 +660,7 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, int ret; vaddr = kmap(page); - if (needs_clflush_before || page_do_bit17_swizzling) + if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) shmem_clflush_swizzled_range(vaddr + shmem_page_offset, page_length, page_do_bit17_swizzling); -- cgit v1.2.3-59-g8ed1b From 924a93edc96b7f7e08a0af7a2f9afe4827bab103 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Wed, 28 Mar 2012 02:36:10 +0800 Subject: drm/i915/intel_i2c: refactor gmbus_xfer Split out gmbus_xfer_read/write() helper functions. Signed-off-by: Daniel Kurtz Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_i2c.c | 151 ++++++++++++++++++++++++--------------- 1 file changed, 92 insertions(+), 59 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 0713cc289119..30675ce18056 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -197,6 +197,82 @@ intel_i2c_quirk_xfer(struct intel_gmbus *bus, return ret; } +static int +gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, + bool last) +{ + int reg_offset = dev_priv->gpio_mmio_base; + u16 len = msg->len; + u8 *buf = msg->buf; + + I915_WRITE(GMBUS1 + reg_offset, + GMBUS_CYCLE_WAIT | + (last ? GMBUS_CYCLE_STOP : 0) | + (len << GMBUS_BYTE_COUNT_SHIFT) | + (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | + GMBUS_SLAVE_READ | GMBUS_SW_RDY); + POSTING_READ(GMBUS2 + reg_offset); + do { + u32 val, loop = 0; + + if (wait_for(I915_READ(GMBUS2 + reg_offset) & + (GMBUS_SATOER | GMBUS_HW_RDY), + 50)) + return -ETIMEDOUT; + if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) + return -ENXIO; + + val = I915_READ(GMBUS3 + reg_offset); + do { + *buf++ = val & 0xff; + val >>= 8; + } while (--len && ++loop < 4); + } while (len); + + return 0; +} + +static int +gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg, + bool last) +{ + int reg_offset = dev_priv->gpio_mmio_base; + u16 len = msg->len; + u8 *buf = msg->buf; + u32 val, loop; + + val = loop = 0; + do { + val |= *buf++ << (8 * loop); + } while (--len && ++loop < 4); + + I915_WRITE(GMBUS3 + reg_offset, val); + I915_WRITE(GMBUS1 + reg_offset, + GMBUS_CYCLE_WAIT | + (last ? GMBUS_CYCLE_STOP : 0) | + (msg->len << GMBUS_BYTE_COUNT_SHIFT) | + (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | + GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); + POSTING_READ(GMBUS2 + reg_offset); + while (len) { + if (wait_for(I915_READ(GMBUS2 + reg_offset) & + (GMBUS_SATOER | GMBUS_HW_RDY), + 50)) + return -ETIMEDOUT; + if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) + return -ENXIO; + + val = loop = 0; + do { + val |= *buf++ << (8 * loop); + } while (--len && ++loop < 4); + + I915_WRITE(GMBUS3 + reg_offset, val); + POSTING_READ(GMBUS2 + reg_offset); + } + return 0; +} + static int gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, @@ -220,65 +296,22 @@ gmbus_xfer(struct i2c_adapter *adapter, I915_WRITE(GMBUS0 + reg_offset, bus->reg0); for (i = 0; i < num; i++) { - u16 len = msgs[i].len; - u8 *buf = msgs[i].buf; - - if (msgs[i].flags & I2C_M_RD) { - I915_WRITE(GMBUS1 + reg_offset, - GMBUS_CYCLE_WAIT | - (i + 1 == num ? GMBUS_CYCLE_STOP : 0) | - (len << GMBUS_BYTE_COUNT_SHIFT) | - (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | - GMBUS_SLAVE_READ | GMBUS_SW_RDY); - POSTING_READ(GMBUS2+reg_offset); - do { - u32 val, loop = 0; - - if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) - goto timeout; - if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) - goto clear_err; - - val = I915_READ(GMBUS3 + reg_offset); - do { - *buf++ = val & 0xff; - val >>= 8; - } while (--len && ++loop < 4); - } while (len); - } else { - u32 val, loop; - - val = loop = 0; - do { - val |= *buf++ << (8 * loop); - } while (--len && ++loop < 4); - - I915_WRITE(GMBUS3 + reg_offset, val); - I915_WRITE(GMBUS1 + reg_offset, - GMBUS_CYCLE_WAIT | - (i + 1 == num ? GMBUS_CYCLE_STOP : 0) | - (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) | - (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | - GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); - POSTING_READ(GMBUS2+reg_offset); - - while (len) { - if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) - goto timeout; - if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) - goto clear_err; - - val = loop = 0; - do { - val |= *buf++ << (8 * loop); - } while (--len && ++loop < 4); - - I915_WRITE(GMBUS3 + reg_offset, val); - POSTING_READ(GMBUS2+reg_offset); - } - } - - if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50)) + bool last = i + 1 == num; + + if (msgs[i].flags & I2C_M_RD) + ret = gmbus_xfer_read(dev_priv, &msgs[i], last); + else + ret = gmbus_xfer_write(dev_priv, &msgs[i], last); + + if (ret == -ETIMEDOUT) + goto timeout; + if (ret == -ENXIO) + goto clear_err; + + if (!last && + wait_for(I915_READ(GMBUS2 + reg_offset) & + (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), + 50)) goto timeout; if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) goto clear_err; -- cgit v1.2.3-59-g8ed1b From 874e3cc90ba2fe1420945c1ac93781aa8dd33b53 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Wed, 28 Mar 2012 02:36:11 +0800 Subject: drm/i915/intel_i2c: cleanup error messages and comments Signed-off-by: Daniel Kurtz Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_i2c.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 30675ce18056..e6c090bff9f2 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -333,17 +333,20 @@ done: * till then let it sleep. */ if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10)) - DRM_INFO("GMBUS timed out waiting for idle\n"); + DRM_INFO("GMBUS [%s] timed out waiting for idle\n", + bus->adapter.name); I915_WRITE(GMBUS0 + reg_offset, 0); ret = i; goto out; timeout: - DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n", - bus->reg0 & 0xff, bus->adapter.name); + DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n", + bus->adapter.name, bus->reg0 & 0xff); I915_WRITE(GMBUS0 + reg_offset, 0); - /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ + /* Hardware may not support GMBUS over these pins? + * Try GPIO bitbanging instead. + */ if (!bus->has_gpio) { ret = -EIO; } else { -- cgit v1.2.3-59-g8ed1b From e4fd17af6156b46fae3f9115830e1a35a62cd8e7 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Wed, 28 Mar 2012 02:36:12 +0800 Subject: drm/i915/intel_i2c: assign HDMI port D to pin pair 6 According to i915 documentation [1], "Port D" (DP/HDMI Port D) is actually gmbus pin pair 6 (gmbus0.2:0 == 110b GPIOF), not 7 (111b). Pin pair 7 is a reserved pair. [1] Documentation for [DevSNB+] and [DevIBX], as found on http://intellinuxgraphics.org: [DevSNB+]: http://intellinuxgraphics.org/documentation/SNB/IHD_OS_Vol3_Part3.pdf Section 2.2.2 lists the 6 gmbus ports (gpio pin pairs): [ 5: HDMI/DPD, 4: HDMIB, 3: HDMI/DPC, 2: LVDS, 1: SSC, 0: VGA ] 2.2.2.1 lists the GPIO registers to control these 6 ports. 2.2.3.1 lists the mapping between 5 of these gmbus ports and the 3 Pin_Pair_Select bits (of the GMBUS0 register). This table is missing HDMIB (port 101). [DevIBX]: http://intellinuxgraphics.org/IHD_OS_Vol3_Part3r2.pdf Section 2.2.2 lists the same 6 gmbus ports plus two 'reserved' gpio ports. 2.2.2.1 lists 8 GPIO registers... however, it says the size of the block is 6x32, which implies that those 2 reserved GPIO registers (GPIO_6 & GPIO_7) don't actually exist (or are irrelevant). 2.2.3.1 lists the mapping between the 6 named gmbus ports and the 3 Pin_Pair_Select bits (of the GMBUS0 register). This table has HDMIB. Note: the "reserved" and "disabled" pairs do not actually map to a physical pair of pins, nor GPIO regs and shouldn't be initialized or used. Fixing this is left for a later patch. This bug had not been noticed earlier for two reasons: 1) Until recently, "gmbus" mode was disabled - all transfers actually used "bit-bang" mode on GPIO port 5 (the "HDMI/DPD CTLDATA/CLK" pair), at register 0x5024 (defined as GPIOF i915_reg.h). Since this is the correct pair of pins for HDMI1, transfers succeed. 2) Even if gmbus mode is re-enabled, the first attempted transaction will fail because it tries to use the wrong ("Reserved") pin pair. However, the driver immediately falls back again to the bit-bang method, which correctly uses GPIOF, so again, transfers succeed. However, if gmbus mode is re-enabled and the GPIO fall-back mode is disabled, then reading an attached monitor's EDID fail. Signed-off-by: Daniel Kurtz Reviewed-by: Daniel Vetter Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 6 +++--- drivers/gpu/drm/i915/intel_i2c.c | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f3609f260e5f..accd8ee48f9d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -742,9 +742,9 @@ #define GMBUS_PORT_PANEL 3 #define GMBUS_PORT_DPC 4 /* HDMIC */ #define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */ - /* 6 reserved */ -#define GMBUS_PORT_DPD 7 /* HDMID */ -#define GMBUS_NUM_PORTS 8 +#define GMBUS_PORT_DPD 6 /* HDMID */ +#define GMBUS_PORT_RESERVED 7 /* 7 reserved */ +#define GMBUS_NUM_PORTS 8 #define GMBUS1 0x5104 /* command/status */ #define GMBUS_SW_CLR_INT (1<<31) #define GMBUS_SW_RDY (1<<30) diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index e6c090bff9f2..9347281633f5 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -148,8 +148,8 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) GPIOC, GPIOD, GPIOE, - 0, GPIOF, + 0, }; struct i2c_algo_bit_data *algo; @@ -385,8 +385,8 @@ int intel_setup_gmbus(struct drm_device *dev) "panel", "dpc", "dpb", - "reserved", "dpd", + "reserved", }; struct drm_i915_private *dev_priv = dev->dev_private; int ret, i; -- cgit v1.2.3-59-g8ed1b From 489fbc107f5fb041d598f30f367ea3ca714ff133 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Wed, 28 Mar 2012 02:36:13 +0800 Subject: drm/i915/intel_i2c: use i2c pre/post_xfer functions to setup gpio xfers Instead of rolling our own custom quirk_xfer function, use the bit_algo pre_xfer and post_xfer functions to setup and teardown bit-banged i2c transactions. Signed-off-by: Daniel Kurtz Reviewed-by: Daniel Vetter Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_i2c.c | 60 +++++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 9347281633f5..1bb63624a793 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -137,6 +137,35 @@ static void set_data(void *data, int state_high) POSTING_READ(bus->gpio_reg); } +static int +intel_gpio_pre_xfer(struct i2c_adapter *adapter) +{ + struct intel_gmbus *bus = container_of(adapter, + struct intel_gmbus, + adapter); + struct drm_i915_private *dev_priv = bus->dev_priv; + + intel_i2c_reset(dev_priv->dev); + intel_i2c_quirk_set(dev_priv, true); + set_data(bus, 1); + set_clock(bus, 1); + udelay(I2C_RISEFALL_TIME); + return 0; +} + +static void +intel_gpio_post_xfer(struct i2c_adapter *adapter) +{ + struct intel_gmbus *bus = container_of(adapter, + struct intel_gmbus, + adapter); + struct drm_i915_private *dev_priv = bus->dev_priv; + + set_data(bus, 1); + set_clock(bus, 1); + intel_i2c_quirk_set(dev_priv, false); +} + static bool intel_gpio_setup(struct intel_gmbus *bus, u32 pin) { @@ -166,6 +195,8 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) algo->setscl = set_clock; algo->getsda = get_data; algo->getscl = get_clock; + algo->pre_xfer = intel_gpio_pre_xfer; + algo->post_xfer = intel_gpio_post_xfer; algo->udelay = I2C_RISEFALL_TIME; algo->timeout = usecs_to_jiffies(2200); algo->data = bus; @@ -173,30 +204,6 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) return true; } -static int -intel_i2c_quirk_xfer(struct intel_gmbus *bus, - struct i2c_msg *msgs, - int num) -{ - struct drm_i915_private *dev_priv = bus->dev_priv; - int ret; - - intel_i2c_reset(dev_priv->dev); - - intel_i2c_quirk_set(dev_priv, true); - set_data(bus, 1); - set_clock(bus, 1); - udelay(I2C_RISEFALL_TIME); - - ret = i2c_bit_algo.master_xfer(&bus->adapter, msgs, num); - - set_data(bus, 1); - set_clock(bus, 1); - intel_i2c_quirk_set(dev_priv, false); - - return ret; -} - static int gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, bool last) @@ -287,7 +294,7 @@ gmbus_xfer(struct i2c_adapter *adapter, mutex_lock(&dev_priv->gmbus_mutex); if (bus->force_bit) { - ret = intel_i2c_quirk_xfer(bus, msgs, num); + ret = i2c_bit_algo.master_xfer(adapter, msgs, num); goto out; } @@ -351,8 +358,9 @@ timeout: ret = -EIO; } else { bus->force_bit = true; - ret = intel_i2c_quirk_xfer(bus, msgs, num); + ret = i2c_bit_algo.master_xfer(adapter, msgs, num); } + out: mutex_unlock(&dev_priv->gmbus_mutex); return ret; -- cgit v1.2.3-59-g8ed1b From 3bd7d90938f1fe77de5991dc4b727843c4980b2a Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Wed, 28 Mar 2012 02:36:14 +0800 Subject: drm/i915/intel_i2c: refactor using intel_gmbus_get_adapter Instead of letting other modules directly access the ->gmbus array, introduce intel_gmbus_get_adapter() for looking up an i2c_adapter for a given gmbus port identifier. This will enable later refactoring of the gmbus port list. Note: Before requesting an adapter for a given gmbus port number, the driver must first check its validity using i2c_intel_gmbus_is_port_valid(). If this check fails, a call to intel_gmbus_get_adapter() will WARN_ON and return NULL. This is relevant for parts of the driver that read a port from VBIOS, which might be improperly initialized and contain an invalid port. In these cases, the driver must fall back to using a safer default port. Signed-off-by: Daniel Kurtz Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 7 +++++++ drivers/gpu/drm/i915/intel_bios.c | 4 ++-- drivers/gpu/drm/i915/intel_crt.c | 14 ++++++++------ drivers/gpu/drm/i915/intel_dvo.c | 6 +++--- drivers/gpu/drm/i915/intel_hdmi.c | 9 ++++++--- drivers/gpu/drm/i915/intel_i2c.c | 8 ++++++++ drivers/gpu/drm/i915/intel_lvds.c | 7 ++++--- drivers/gpu/drm/i915/intel_modes.c | 3 ++- drivers/gpu/drm/i915/intel_sdvo.c | 9 +++++---- 9 files changed, 45 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e11fcb09aea2..44e6430af3d4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1342,6 +1342,13 @@ extern int i915_restore_state(struct drm_device *dev); /* intel_i2c.c */ extern int intel_setup_gmbus(struct drm_device *dev); extern void intel_teardown_gmbus(struct drm_device *dev); +extern inline bool intel_gmbus_is_port_valid(unsigned port) +{ + return (port >= GMBUS_PORT_DISABLED && port <= GMBUS_PORT_RESERVED); +} + +extern struct i2c_adapter *intel_gmbus_get_adapter( + struct drm_i915_private *dev_priv, unsigned port); extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index e4317da848d5..871aa272bfef 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -372,11 +372,11 @@ parse_general_definitions(struct drm_i915_private *dev_priv, if (block_size >= sizeof(*general)) { int bus_pin = general->crt_ddc_gmbus_pin; DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); - if (bus_pin >= 1 && bus_pin <= 6) + if (intel_gmbus_is_port_valid(bus_pin)) dev_priv->crt_ddc_pin = bus_pin; } else { DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", - block_size); + block_size); } } } diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 4d3d736a4f56..d54d2327a1b3 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -278,9 +278,10 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { struct edid *edid; bool is_digital = false; + struct i2c_adapter *i2c; - edid = drm_get_edid(connector, - &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); + i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); + edid = drm_get_edid(connector, i2c); /* * This may be a DVI-I connector with a shared DDC * link between analog and digital outputs, so we @@ -483,15 +484,16 @@ static int intel_crt_get_modes(struct drm_connector *connector) struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; + struct i2c_adapter *i2c; - ret = intel_ddc_get_modes(connector, - &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); + i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); + ret = intel_ddc_get_modes(connector, i2c); if (ret || !IS_G4X(dev)) return ret; /* Try to probe digital port for output in DVI-I -> VGA mode. */ - return intel_ddc_get_modes(connector, - &dev_priv->gmbus[GMBUS_PORT_DPB].adapter); + i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); + return intel_ddc_get_modes(connector, i2c); } static int intel_crt_set_property(struct drm_connector *connector, diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 020a7d7f744d..60ba50b956f2 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -243,7 +243,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector) * that's not the case. */ intel_ddc_get_modes(connector, - &dev_priv->gmbus[GMBUS_PORT_DPC].adapter); + intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC)); if (!list_empty(&connector->probed_modes)) return 1; @@ -375,7 +375,7 @@ void intel_dvo_init(struct drm_device *dev) * special cases, but otherwise default to what's defined * in the spec. */ - if (dvo->gpio != 0) + if (intel_gmbus_is_port_valid(dvo->gpio)) gpio = dvo->gpio; else if (dvo->type == INTEL_DVO_CHIP_LVDS) gpio = GMBUS_PORT_SSC; @@ -386,7 +386,7 @@ void intel_dvo_init(struct drm_device *dev) * It appears that everything is on GPIOE except for panels * on i830 laptops, which are on GPIOB (DVOA). */ - i2c = &dev_priv->gmbus[gpio].adapter; + i2c = intel_gmbus_get_adapter(dev_priv, gpio); intel_dvo->dev = *dvo; if (!dvo->dev_ops->init(&intel_dvo->dev, i2c)) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index cae3e5f17a49..1d00f61adce6 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -334,7 +334,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) intel_hdmi->has_hdmi_sink = false; intel_hdmi->has_audio = false; edid = drm_get_edid(connector, - &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); + intel_gmbus_get_adapter(dev_priv, + intel_hdmi->ddc_bus)); if (edid) { if (edid->input & DRM_EDID_INPUT_DIGITAL) { @@ -367,7 +368,8 @@ static int intel_hdmi_get_modes(struct drm_connector *connector) */ return intel_ddc_get_modes(connector, - &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); + intel_gmbus_get_adapter(dev_priv, + intel_hdmi->ddc_bus)); } static bool @@ -379,7 +381,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector) bool has_audio = false; edid = drm_get_edid(connector, - &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); + intel_gmbus_get_adapter(dev_priv, + intel_hdmi->ddc_bus)); if (edid) { if (edid->input & DRM_EDID_INPUT_DIGITAL) has_audio = drm_detect_monitor_audio(edid); diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 1bb63624a793..2f65d01340ff 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -449,6 +449,14 @@ err: return ret; } +struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, + unsigned port) +{ + WARN_ON(!intel_gmbus_is_port_valid(port)); + return (intel_gmbus_is_port_valid(port)) ? + &dev_priv->gmbus[port].adapter : NULL; +} + void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) { struct intel_gmbus *bus = to_intel_gmbus(adapter); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index c5c0973af8a1..4f92a11fc29a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -837,8 +837,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev, child->device_type != DEVICE_TYPE_LFP) continue; - if (child->i2c_pin) - *i2c_pin = child->i2c_pin; + if (intel_gmbus_is_port_valid(child->i2c_pin)) + *i2c_pin = child->i2c_pin; /* However, we cannot trust the BIOS writers to populate * the VBT correctly. Since LVDS requires additional @@ -979,7 +979,8 @@ bool intel_lvds_init(struct drm_device *dev) * preferred mode is the right one. */ intel_lvds->edid = drm_get_edid(connector, - &dev_priv->gmbus[pin].adapter); + intel_gmbus_get_adapter(dev_priv, + pin)); if (intel_lvds->edid) { if (drm_add_edid_modes(connector, intel_lvds->edid)) { diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 2978a3f61b58..cc682a02fa2f 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -55,7 +55,8 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus) } }; - return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2; + return i2c_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus), + msgs, 2) == 2; } /** diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 70fb275cd205..830c8b5d90b8 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1254,7 +1254,8 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector) struct drm_i915_private *dev_priv = connector->dev->dev_private; return drm_get_edid(connector, - &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); + intel_gmbus_get_adapter(dev_priv, + dev_priv->crt_ddc_pin)); } enum drm_connector_status @@ -1922,12 +1923,12 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, if (mapping->initialized) pin = mapping->i2c_pin; - if (pin < GMBUS_NUM_PORTS) { - sdvo->i2c = &dev_priv->gmbus[pin].adapter; + if (intel_gmbus_is_port_valid(pin)) { + sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ); intel_gmbus_force_bit(sdvo->i2c, true); } else { - sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; + sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); } } -- cgit v1.2.3-59-g8ed1b From 2ed06c93a1fce057808894d73167aae03c76deaf Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Wed, 28 Mar 2012 02:36:15 +0800 Subject: drm/i915/intel_i2c: gmbus disabled and reserved ports are invalid There is no GMBUS "disabled" port 0, nor "reserved" port 7. For the other 6 ports there is a fixed 1:1 mapping between pin pairs and gmbus ports, which means every real gmbus port has a gpio pin. Given these realizations, clean up gmbus initialization. Tested on Sandybridge (gen 6, PCH == CougarPoint) hardware. Signed-off-by: Daniel Kurtz Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 3 +- drivers/gpu/drm/i915/i915_reg.h | 2 +- drivers/gpu/drm/i915/intel_i2c.c | 70 ++++++++++++++++------------------------ 3 files changed, 29 insertions(+), 46 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 44e6430af3d4..c5ad7b96b065 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -303,7 +303,6 @@ struct intel_fbc_work; struct intel_gmbus { struct i2c_adapter adapter; bool force_bit; - bool has_gpio; u32 reg0; u32 gpio_reg; struct i2c_algo_bit_data bit_algo; @@ -1344,7 +1343,7 @@ extern int intel_setup_gmbus(struct drm_device *dev); extern void intel_teardown_gmbus(struct drm_device *dev); extern inline bool intel_gmbus_is_port_valid(unsigned port) { - return (port >= GMBUS_PORT_DISABLED && port <= GMBUS_PORT_RESERVED); + return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); } extern struct i2c_adapter *intel_gmbus_get_adapter( diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index accd8ee48f9d..a8d218ca7d1d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -744,7 +744,7 @@ #define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */ #define GMBUS_PORT_DPD 6 /* HDMID */ #define GMBUS_PORT_RESERVED 7 /* 7 reserved */ -#define GMBUS_NUM_PORTS 8 +#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1) #define GMBUS1 0x5104 /* command/status */ #define GMBUS_SW_CLR_INT (1<<31) #define GMBUS_SW_RDY (1<<30) diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 2f65d01340ff..dcde6f64777a 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -35,6 +35,20 @@ #include "i915_drm.h" #include "i915_drv.h" +struct gmbus_port { + const char *name; + int reg; +}; + +static const struct gmbus_port gmbus_ports[] = { + { "ssc", GPIOB }, + { "vga", GPIOA }, + { "panel", GPIOC }, + { "dpc", GPIOD }, + { "dpb", GPIOE }, + { "dpd", GPIOF }, +}; + /* Intel GPIO access functions */ #define I2C_RISEFALL_TIME 10 @@ -166,29 +180,16 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter) intel_i2c_quirk_set(dev_priv, false); } -static bool +static void intel_gpio_setup(struct intel_gmbus *bus, u32 pin) { struct drm_i915_private *dev_priv = bus->dev_priv; - static const int map_pin_to_reg[] = { - 0, - GPIOB, - GPIOA, - GPIOC, - GPIOD, - GPIOE, - GPIOF, - 0, - }; struct i2c_algo_bit_data *algo; - if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin]) - return false; - algo = &bus->bit_algo; - bus->gpio_reg = map_pin_to_reg[pin]; - bus->gpio_reg += dev_priv->gpio_mmio_base; + /* -1 to map pin pair to gmbus index */ + bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg; bus->adapter.algo_data = algo; algo->setsda = set_data; @@ -200,8 +201,6 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) algo->udelay = I2C_RISEFALL_TIME; algo->timeout = usecs_to_jiffies(2200); algo->data = bus; - - return true; } static int @@ -351,15 +350,9 @@ timeout: bus->adapter.name, bus->reg0 & 0xff); I915_WRITE(GMBUS0 + reg_offset, 0); - /* Hardware may not support GMBUS over these pins? - * Try GPIO bitbanging instead. - */ - if (!bus->has_gpio) { - ret = -EIO; - } else { - bus->force_bit = true; - ret = i2c_bit_algo.master_xfer(adapter, msgs, num); - } + /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ + bus->force_bit = true; + ret = i2c_bit_algo.master_xfer(adapter, msgs, num); out: mutex_unlock(&dev_priv->gmbus_mutex); @@ -386,16 +379,6 @@ static const struct i2c_algorithm gmbus_algorithm = { */ int intel_setup_gmbus(struct drm_device *dev) { - static const char *names[GMBUS_NUM_PORTS] = { - "disabled", - "ssc", - "vga", - "panel", - "dpc", - "dpb", - "dpd", - "reserved", - }; struct drm_i915_private *dev_priv = dev->dev_private; int ret, i; @@ -413,13 +396,14 @@ int intel_setup_gmbus(struct drm_device *dev) for (i = 0; i < GMBUS_NUM_PORTS; i++) { struct intel_gmbus *bus = &dev_priv->gmbus[i]; + u32 port = i + 1; /* +1 to map gmbus index to pin pair */ bus->adapter.owner = THIS_MODULE; bus->adapter.class = I2C_CLASS_DDC; snprintf(bus->adapter.name, sizeof(bus->adapter.name), "i915 gmbus %s", - names[i]); + gmbus_ports[i].name); bus->adapter.dev.parent = &dev->pdev->dev; bus->dev_priv = dev_priv; @@ -430,9 +414,9 @@ int intel_setup_gmbus(struct drm_device *dev) goto err; /* By default use a conservative clock rate */ - bus->reg0 = i | GMBUS_RATE_100KHZ; + bus->reg0 = port | GMBUS_RATE_100KHZ; - bus->has_gpio = intel_gpio_setup(bus, i); + intel_gpio_setup(bus, port); } intel_i2c_reset(dev_priv->dev); @@ -453,8 +437,9 @@ struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned port) { WARN_ON(!intel_gmbus_is_port_valid(port)); + /* -1 to map pin pair to gmbus index */ return (intel_gmbus_is_port_valid(port)) ? - &dev_priv->gmbus[port].adapter : NULL; + &dev_priv->gmbus[port - 1].adapter : NULL; } void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) @@ -468,8 +453,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - if (bus->has_gpio) - bus->force_bit = force_bit; + bus->force_bit = force_bit; } void intel_teardown_gmbus(struct drm_device *dev) -- cgit v1.2.3-59-g8ed1b From f2c9677be3158c31ba19f527e2be0f7a519e19d1 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Wed, 28 Mar 2012 02:36:16 +0800 Subject: drm/i915/intel_i2c: allocate gmbus array as part of drm_i915_private This memory is always allocated, and it is always a fixed size, so just allocate it along with the rest of the driver state. Signed-off-by: Daniel Kurtz Reviewed-by: Daniel Vetter Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/intel_i2c.c | 10 ---------- 2 files changed, 1 insertion(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c5ad7b96b065..6983b4bcbdea 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -326,7 +326,7 @@ typedef struct drm_i915_private { /** gt_lock is also taken in irq contexts. */ struct spinlock gt_lock; - struct intel_gmbus *gmbus; + struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; /** gmbus_mutex protects against concurrent usage of the single hw gmbus * controller on different i2c buses. */ diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index dcde6f64777a..c12db7265893 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -387,11 +387,6 @@ int intel_setup_gmbus(struct drm_device *dev) else dev_priv->gpio_mmio_base = 0; - dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus), - GFP_KERNEL); - if (dev_priv->gmbus == NULL) - return -ENOMEM; - mutex_init(&dev_priv->gmbus_mutex); for (i = 0; i < GMBUS_NUM_PORTS; i++) { @@ -428,8 +423,6 @@ err: struct intel_gmbus *bus = &dev_priv->gmbus[i]; i2c_del_adapter(&bus->adapter); } - kfree(dev_priv->gmbus); - dev_priv->gmbus = NULL; return ret; } @@ -468,7 +461,4 @@ void intel_teardown_gmbus(struct drm_device *dev) struct intel_gmbus *bus = &dev_priv->gmbus[i]; i2c_del_adapter(&bus->adapter); } - - kfree(dev_priv->gmbus); - dev_priv->gmbus = NULL; } -- cgit v1.2.3-59-g8ed1b From 23f54beafee1c31c7f0127650ec2903d80b3dfeb Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 23 Mar 2012 17:38:49 +0000 Subject: drm/i915: Initialise GTT MTRR to -1 Fixes a regression from 9e984bc1 (drm/i915: Don't do MTRR setup if PAT is enabled) where we left the MTRR as 0 and so tried to free a MTRR we did not own during unload. Reported-and-tested-by: Ben Widawsky Reviewed-by: Adam Jackson Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 4f690374fffe..a9caf62b5dd2 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1923,6 +1923,8 @@ static void i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base, unsigned long size) { + dev_priv->mm.gtt_mtrr = -1; + #if defined(CONFIG_X86_PAT) if (cpu_has_pat) return; -- cgit v1.2.3-59-g8ed1b From 93e537a10f2c8c0f2e74409b6cb473fc221758fa Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 28 Mar 2012 23:11:26 +0200 Subject: drm/i915: split LVDS update code out of i9xx_crtc_mode_set Just to make things clearer and reduce the size of this monstrosity. v2: make sure 8xx PLL update function calls update_lvds too (Daniel) Signed-off-by: Jesse Barnes danvet: fixed patch ordering to avoid breaking bisect. Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 106 +++++++++++++++++++---------------- 1 file changed, 58 insertions(+), 48 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a7c2ddc96f0e..d6a394fdeb26 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5134,6 +5134,62 @@ static void i9xx_update_pll_dividers(struct drm_crtc *crtc, } } +static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 temp, lvds_sync = 0; + + temp = I915_READ(LVDS); + temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; + if (pipe == 1) { + temp |= LVDS_PIPEB_SELECT; + } else { + temp &= ~LVDS_PIPEB_SELECT; + } + /* set the corresponsding LVDS_BORDER bit */ + temp |= dev_priv->lvds_border_bits; + /* Set the B0-B3 data pairs corresponding to whether we're going to + * set the DPLLs for dual-channel mode or not. + */ + if (clock->p2 == 7) + temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; + else + temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); + + /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) + * appropriately here, but we need to look more thoroughly into how + * panels behave in the two modes. + */ + /* set the dithering flag on LVDS as needed */ + if (INTEL_INFO(dev)->gen >= 4) { + if (dev_priv->lvds_dither) + temp |= LVDS_ENABLE_DITHER; + else + temp &= ~LVDS_ENABLE_DITHER; + } + if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) + lvds_sync |= LVDS_HSYNC_POLARITY; + if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) + lvds_sync |= LVDS_VSYNC_POLARITY; + if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) + != lvds_sync) { + char flags[2] = "-+"; + DRM_INFO("Changing LVDS panel from " + "(%chsync, %cvsync) to (%chsync, %cvsync)\n", + flags[!(temp & LVDS_HSYNC_POLARITY)], + flags[!(temp & LVDS_VSYNC_POLARITY)], + flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], + flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); + temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); + temp |= lvds_sync; + } + I915_WRITE(LVDS, temp); +} + static int i9xx_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -5155,7 +5211,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, const intel_limit_t *limit; int ret; u32 temp; - u32 lvds_sync = 0; list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { if (encoder->base.crtc != crtc) @@ -5341,53 +5396,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, * This is an exception to the general rule that mode_set doesn't turn * things on. */ - if (is_lvds) { - temp = I915_READ(LVDS); - temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; - if (pipe == 1) { - temp |= LVDS_PIPEB_SELECT; - } else { - temp &= ~LVDS_PIPEB_SELECT; - } - /* set the corresponsding LVDS_BORDER bit */ - temp |= dev_priv->lvds_border_bits; - /* Set the B0-B3 data pairs corresponding to whether we're going to - * set the DPLLs for dual-channel mode or not. - */ - if (clock.p2 == 7) - temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; - else - temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); - - /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) - * appropriately here, but we need to look more thoroughly into how - * panels behave in the two modes. - */ - /* set the dithering flag on LVDS as needed */ - if (INTEL_INFO(dev)->gen >= 4) { - if (dev_priv->lvds_dither) - temp |= LVDS_ENABLE_DITHER; - else - temp &= ~LVDS_ENABLE_DITHER; - } - if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) - lvds_sync |= LVDS_HSYNC_POLARITY; - if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) - lvds_sync |= LVDS_VSYNC_POLARITY; - if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) - != lvds_sync) { - char flags[2] = "-+"; - DRM_INFO("Changing LVDS panel from " - "(%chsync, %cvsync) to (%chsync, %cvsync)\n", - flags[!(temp & LVDS_HSYNC_POLARITY)], - flags[!(temp & LVDS_VSYNC_POLARITY)], - flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], - flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); - temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); - temp |= lvds_sync; - } - I915_WRITE(LVDS, temp); - } + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + intel_update_lvds(crtc, &clock, adjusted_mode); if (is_dp) { intel_dp_set_m_n(crtc, mode, adjusted_mode); -- cgit v1.2.3-59-g8ed1b From eb1cbe4848b01f9f073064377875bc7d71eb401b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 28 Mar 2012 23:12:16 +0200 Subject: drm/i915: split PLL update code out of i9xx_crtc_mode_set Makes it more readable and maintainable. ValleyView will add its own PLL update function in a later patch. v2: split LVDS bits out of this patch (Daniel) v3: fix dropped DP dithering hunk (Daniel) Acked-by: Ben Widawsky Signed-off-by: Jesse Barnes danvet: - fixup spurious whitespace change - reorder patches to fix bisect breakage Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 296 +++++++++++++++++++++-------------- 1 file changed, 179 insertions(+), 117 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d6a394fdeb26..fedded7669c5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5190,6 +5190,177 @@ static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock, I915_WRITE(LVDS, temp); } +static void i9xx_update_pll(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + intel_clock_t *clock, intel_clock_t *reduced_clock, + int num_connectors) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 dpll; + bool is_sdvo; + + is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); + + dpll = DPLL_VGA_MODE_DIS; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + dpll |= DPLLB_MODE_LVDS; + else + dpll |= DPLLB_MODE_DAC_SERIAL; + if (is_sdvo) { + int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); + if (pixel_multiplier > 1) { + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; + } + dpll |= DPLL_DVO_HIGH_SPEED; + } + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) + dpll |= DPLL_DVO_HIGH_SPEED; + + /* compute bitmask from p1 value */ + if (IS_PINEVIEW(dev)) + dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; + else { + dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; + if (IS_G4X(dev) && reduced_clock) + dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; + } + switch (clock->p2) { + case 5: + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; + break; + case 7: + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; + break; + case 10: + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; + break; + case 14: + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; + break; + } + if (INTEL_INFO(dev)->gen >= 4) + dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); + + if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) + dpll |= PLL_REF_INPUT_TVCLKINBC; + else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) + /* XXX: just matching BIOS for now */ + /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ + dpll |= 3; + else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && + intel_panel_use_ssc(dev_priv) && num_connectors < 2) + dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; + else + dpll |= PLL_REF_INPUT_DREFCLK; + + dpll |= DPLL_VCO_ENABLE; + I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); + POSTING_READ(DPLL(pipe)); + udelay(150); + + /* The LVDS pin pair needs to be on before the DPLLs are enabled. + * This is an exception to the general rule that mode_set doesn't turn + * things on. + */ + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + intel_update_lvds(crtc, clock, adjusted_mode); + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) + intel_dp_set_m_n(crtc, mode, adjusted_mode); + + I915_WRITE(DPLL(pipe), dpll); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(DPLL(pipe)); + udelay(150); + + if (INTEL_INFO(dev)->gen >= 4) { + u32 temp = 0; + if (is_sdvo) { + temp = intel_mode_get_pixel_multiplier(adjusted_mode); + if (temp > 1) + temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; + else + temp = 0; + } + I915_WRITE(DPLL_MD(pipe), temp); + } else { + /* The pixel multiplier can only be updated once the + * DPLL is enabled and the clocks are stable. + * + * So write it again. + */ + I915_WRITE(DPLL(pipe), dpll); + } +} + +static void i8xx_update_pll(struct drm_crtc *crtc, + struct drm_display_mode *adjusted_mode, + intel_clock_t *clock, + int num_connectors) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 dpll; + + dpll = DPLL_VGA_MODE_DIS; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; + } else { + if (clock->p1 == 2) + dpll |= PLL_P1_DIVIDE_BY_TWO; + else + dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; + if (clock->p2 == 4) + dpll |= PLL_P2_DIVIDE_BY_4; + } + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) + /* XXX: just matching BIOS for now */ + /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ + dpll |= 3; + else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && + intel_panel_use_ssc(dev_priv) && num_connectors < 2) + dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; + else + dpll |= PLL_REF_INPUT_DREFCLK; + + dpll |= DPLL_VCO_ENABLE; + I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); + POSTING_READ(DPLL(pipe)); + udelay(150); + + I915_WRITE(DPLL(pipe), dpll); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(DPLL(pipe)); + udelay(150); + + /* The LVDS pin pair needs to be on before the DPLLs are enabled. + * This is an exception to the general rule that mode_set doesn't turn + * things on. + */ + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + intel_update_lvds(crtc, clock, adjusted_mode); + + /* The pixel multiplier can only be updated once the + * DPLL is enabled and the clocks are stable. + * + * So write it again. + */ + I915_WRITE(DPLL(pipe), dpll); +} + static int i9xx_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -5203,14 +5374,13 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, int plane = intel_crtc->plane; int refclk, num_connectors = 0; intel_clock_t clock, reduced_clock; - u32 dpll, dspcntr, pipeconf, vsyncshift; - bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; - bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; + u32 dspcntr, pipeconf, vsyncshift; + bool ok, has_reduced_clock = false, is_sdvo = false; + bool is_lvds = false, is_tv = false, is_dp = false; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; const intel_limit_t *limit; int ret; - u32 temp; list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { if (encoder->base.crtc != crtc) @@ -5226,15 +5396,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, if (encoder->needs_tv_clock) is_tv = true; break; - case INTEL_OUTPUT_DVO: - is_dvo = true; - break; case INTEL_OUTPUT_TVOUT: is_tv = true; break; - case INTEL_OUTPUT_ANALOG: - is_crt = true; - break; case INTEL_OUTPUT_DISPLAYPORT: is_dp = true; break; @@ -5281,71 +5445,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ? &reduced_clock : NULL); - dpll = DPLL_VGA_MODE_DIS; - - if (!IS_GEN2(dev)) { - if (is_lvds) - dpll |= DPLLB_MODE_LVDS; - else - dpll |= DPLLB_MODE_DAC_SERIAL; - if (is_sdvo) { - int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); - if (pixel_multiplier > 1) { - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; - } - dpll |= DPLL_DVO_HIGH_SPEED; - } - if (is_dp) - dpll |= DPLL_DVO_HIGH_SPEED; - - /* compute bitmask from p1 value */ - if (IS_PINEVIEW(dev)) - dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; - else { - dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; - if (IS_G4X(dev) && has_reduced_clock) - dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; - } - switch (clock.p2) { - case 5: - dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; - break; - case 7: - dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; - break; - case 10: - dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; - break; - case 14: - dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; - break; - } - if (INTEL_INFO(dev)->gen >= 4) - dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); - } else { - if (is_lvds) { - dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; - } else { - if (clock.p1 == 2) - dpll |= PLL_P1_DIVIDE_BY_TWO; - else - dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; - if (clock.p2 == 4) - dpll |= PLL_P2_DIVIDE_BY_4; - } - } - - if (is_sdvo && is_tv) - dpll |= PLL_REF_INPUT_TVCLKINBC; - else if (is_tv) - /* XXX: just matching BIOS for now */ - /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ - dpll |= 3; - else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) - dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; + if (IS_GEN2(dev)) + i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors); else - dpll |= PLL_REF_INPUT_DREFCLK; + i9xx_update_pll(crtc, mode, adjusted_mode, &clock, + has_reduced_clock ? &reduced_clock : NULL, + num_connectors); /* setup pipeconf */ pipeconf = I915_READ(PIPECONF(pipe)); @@ -5382,52 +5487,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, } } - dpll |= DPLL_VCO_ENABLE; - DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); drm_mode_debug_printmodeline(mode); - I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); - - POSTING_READ(DPLL(pipe)); - udelay(150); - - /* The LVDS pin pair needs to be on before the DPLLs are enabled. - * This is an exception to the general rule that mode_set doesn't turn - * things on. - */ - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) - intel_update_lvds(crtc, &clock, adjusted_mode); - - if (is_dp) { - intel_dp_set_m_n(crtc, mode, adjusted_mode); - } - - I915_WRITE(DPLL(pipe), dpll); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(DPLL(pipe)); - udelay(150); - - if (INTEL_INFO(dev)->gen >= 4) { - temp = 0; - if (is_sdvo) { - temp = intel_mode_get_pixel_multiplier(adjusted_mode); - if (temp > 1) - temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; - else - temp = 0; - } - I915_WRITE(DPLL_MD(pipe), temp); - } else { - /* The pixel multiplier can only be updated once the - * DPLL is enabled and the clocks are stable. - * - * So write it again. - */ - I915_WRITE(DPLL(pipe), dpll); - } - if (HAS_PIPE_CXSR(dev)) { if (intel_crtc->lowfreq_avail) { DRM_DEBUG_KMS("enabling CxSR downclocking\n"); -- cgit v1.2.3-59-g8ed1b From 70a3eb7a3e598f92604267d8ed695057f257ddb0 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 28 Mar 2012 13:39:21 -0700 Subject: drm/i915: add ValleyView driver structs and IS_VALLEYVIEW macro For use by the rest of the ValleyView code. v2: fix desktop variant to not set is_mobile (Ben) Acked-by: Ben Widawsky Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 18 ++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 2 ++ 2 files changed, 20 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9a7f265db1a4..77f11b9dd14e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -254,6 +254,24 @@ static const struct intel_device_info intel_ivybridge_m_info = { .has_llc = 1, }; +static const struct intel_device_info intel_valleyview_m_info = { + .gen = 7, .is_mobile = 1, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_fbc = 0, + .has_bsd_ring = 1, + .has_blt_ring = 1, + .is_valleyview = 1, +}; + +static const struct intel_device_info intel_valleyview_d_info = { + .gen = 7, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_fbc = 0, + .has_bsd_ring = 1, + .has_blt_ring = 1, + .is_valleyview = 1, +}; + static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6983b4bcbdea..30612f52b93b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -255,6 +255,7 @@ struct intel_device_info { u8 is_broadwater:1; u8 is_crestline:1; u8 is_ivybridge:1; + u8 is_valleyview:1; u8 has_fbc:1; u8 has_pipe_cxsr:1; u8 has_hotplug:1; @@ -1002,6 +1003,7 @@ struct drm_i915_file_private { #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) +#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) /* -- cgit v1.2.3-59-g8ed1b From ceb042468763db2d88c0c3f329204a3a71fd0479 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 28 Mar 2012 13:39:22 -0700 Subject: drm/i915: ValleyView watermark support Add support for ValleyView watermark handling. v2: remove unused reg & bit definitions (Ben) Acked-by: Ben Widawsky Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 6 ++++ drivers/gpu/drm/i915/intel_display.c | 65 ++++++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a8d218ca7d1d..7ce595fdbd8a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1043,6 +1043,9 @@ #define RAMCLK_GATE_D 0x6210 /* CRL only */ #define DEUC 0x6214 /* CRL only */ +#define FW_BLC_SELF_VLV 0x6500 +#define FW_CSPWRDWNEN (1<<15) + /* * Palette regs */ @@ -2495,6 +2498,7 @@ #define I915_FIFO_LINE_SIZE 64 #define I830_FIFO_LINE_SIZE 32 +#define VALLEYVIEW_FIFO_SIZE 255 #define G4X_FIFO_SIZE 127 #define I965_FIFO_SIZE 512 #define I945_FIFO_SIZE 127 @@ -2502,6 +2506,7 @@ #define I855GM_FIFO_SIZE 127 /* In cachelines */ #define I830_FIFO_SIZE 95 +#define VALLEYVIEW_MAX_WM 0xff #define G4X_MAX_WM 0x3f #define I915_MAX_WM 0x3f @@ -2516,6 +2521,7 @@ #define PINEVIEW_CURSOR_DFT_WM 0 #define PINEVIEW_CURSOR_GUARD_WM 5 +#define VALLEYVIEW_CURSOR_MAX_WM 64 #define I965_CURSOR_FIFO 64 #define I965_CURSOR_MAX_WM 32 #define I965_CURSOR_DFT_WM 8 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index fedded7669c5..453d444fdfbd 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3636,6 +3636,20 @@ static const struct intel_watermark_params g4x_cursor_wm_info = { 2, G4X_FIFO_LINE_SIZE, }; +static const struct intel_watermark_params valleyview_wm_info = { + VALLEYVIEW_FIFO_SIZE, + VALLEYVIEW_MAX_WM, + VALLEYVIEW_MAX_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params valleyview_cursor_wm_info = { + I965_CURSOR_FIFO, + VALLEYVIEW_CURSOR_MAX_WM, + I965_CURSOR_DFT_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; static const struct intel_watermark_params i965_cursor_wm_info = { I965_CURSOR_FIFO, I965_CURSOR_MAX_WM, @@ -4160,6 +4174,55 @@ static bool g4x_compute_srwm(struct drm_device *dev, #define single_plane_enabled(mask) is_power_of_2(mask) +static void valleyview_update_wm(struct drm_device *dev) +{ + static const int sr_latency_ns = 12000; + struct drm_i915_private *dev_priv = dev->dev_private; + int planea_wm, planeb_wm, cursora_wm, cursorb_wm; + int plane_sr, cursor_sr; + unsigned int enabled = 0; + + if (g4x_compute_wm0(dev, 0, + &valleyview_wm_info, latency_ns, + &valleyview_cursor_wm_info, latency_ns, + &planea_wm, &cursora_wm)) + enabled |= 1; + + if (g4x_compute_wm0(dev, 1, + &valleyview_wm_info, latency_ns, + &valleyview_cursor_wm_info, latency_ns, + &planeb_wm, &cursorb_wm)) + enabled |= 2; + + plane_sr = cursor_sr = 0; + if (single_plane_enabled(enabled) && + g4x_compute_srwm(dev, ffs(enabled) - 1, + sr_latency_ns, + &valleyview_wm_info, + &valleyview_cursor_wm_info, + &plane_sr, &cursor_sr)) + I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); + else + I915_WRITE(FW_BLC_SELF_VLV, + I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", + planea_wm, cursora_wm, + planeb_wm, cursorb_wm, + plane_sr, cursor_sr); + + I915_WRITE(DSPFW1, + (plane_sr << DSPFW_SR_SHIFT) | + (cursorb_wm << DSPFW_CURSORB_SHIFT) | + (planeb_wm << DSPFW_PLANEB_SHIFT) | + planea_wm); + I915_WRITE(DSPFW2, + (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | + (cursora_wm << DSPFW_CURSORA_SHIFT)); + I915_WRITE(DSPFW3, + (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT))); +} + static void g4x_update_wm(struct drm_device *dev) { static const int sr_latency_ns = 12000; @@ -9019,6 +9082,8 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.write_eld = ironlake_write_eld; } else dev_priv->display.update_wm = NULL; + } else if (IS_VALLEYVIEW(dev)) { + dev_priv->display.update_wm = valleyview_update_wm; } else if (IS_PINEVIEW(dev)) { if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, -- cgit v1.2.3-59-g8ed1b From 25eb05fc5ac7a432e1a3a723f9af206142cd07fa Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 28 Mar 2012 13:39:23 -0700 Subject: drm/i915: PLL defines for VLV Add register definitions for the new VLV PLL bits. v2: remove unused bits & regs (Ben) Acked-by: Ben Widawsky Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 4 ++++ drivers/gpu/drm/i915/intel_display.c | 10 +++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7ce595fdbd8a..7abdc15b1ad7 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -796,7 +796,9 @@ #define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) #define DPLL_VCO_ENABLE (1 << 31) #define DPLL_DVO_HIGH_SPEED (1 << 30) +#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30) #define DPLL_SYNCLOCK_ENABLE (1 << 29) +#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29) #define DPLL_VGA_MODE_DIS (1 << 28) #define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ #define DPLLB_MODE_LVDS (2 << 26) /* i915 */ @@ -808,6 +810,7 @@ #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ +#define DPLL_INTEGRATED_CLOCK_VLV (1<<13) #define SRX_INDEX 0x3c4 #define SRX_DATA 0x3c5 @@ -903,6 +906,7 @@ #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 #define _DPLL_B_MD 0x06020 /* 965+ only */ #define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) + #define _FPA0 0x06040 #define _FPA1 0x06044 #define _FPB0 0x06048 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 453d444fdfbd..04e1e9ab203c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3487,6 +3487,11 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, return true; } +static int valleyview_get_display_clock_speed(struct drm_device *dev) +{ + return 400000; /* FIXME */ +} + static int i945_get_display_clock_speed(struct drm_device *dev) { return 400000; @@ -8987,7 +8992,10 @@ static void intel_init_display(struct drm_device *dev) } /* Returns the core display clock speed */ - if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) + if (IS_VALLEYVIEW(dev)) + dev_priv->display.get_display_clock_speed = + valleyview_get_display_clock_speed; + else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) dev_priv->display.get_display_clock_speed = i945_get_display_clock_speed; else if (IS_I915G(dev)) -- cgit v1.2.3-59-g8ed1b From 57f350b6722f9569f407872f6ead56e2d221d98a Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 28 Mar 2012 13:39:25 -0700 Subject: drm/i915: add DPIO support ValleyView puts some display related registers like the PLL controls and dividers behind the DPIO bus. Add simple indirect register access routines to get to those registers. v2: move new wait_for macro to intel_drv.h (Ben) fix DPIO_PKT double write (Ben) add debugfs file Reviewed-by: Ben Widawsky Reviewed-by: Eugeni Dodonov Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 48 ++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 4 +++ drivers/gpu/drm/i915/i915_reg.h | 55 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_display.c | 61 ++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_drv.h | 14 +++++++++ 5 files changed, 182 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 66c90d4477a3..e74674b3097d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1503,6 +1503,53 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) return 0; } +static int i915_dpio_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + + if (!IS_VALLEYVIEW(dev)) { + seq_printf(m, "unsupported\n"); + return 0; + } + + ret = mutex_lock_interruptible(&dev->mode_config.mutex); + if (ret) + return ret; + + seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); + + seq_printf(m, "DPIO_DIV_A: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_DIV_A)); + seq_printf(m, "DPIO_DIV_B: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_DIV_B)); + + seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_REFSFR_A)); + seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_REFSFR_B)); + + seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); + seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); + + seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A)); + seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B)); + + seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", + intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); + + mutex_unlock(&dev->mode_config.mutex); + + return 0; +} + static int i915_debugfs_common_open(struct inode *inode, struct file *filp) @@ -1845,6 +1892,7 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, {"i915_swizzle_info", i915_swizzle_info, 0}, {"i915_ppgtt_info", i915_ppgtt_info, 0}, + {"i915_dpio", i915_dpio_info, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 30612f52b93b..32f3731b1a18 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -360,6 +360,10 @@ typedef struct drm_i915_private { /* protects the irq masks */ spinlock_t irq_lock; + + /* DPIO indirect register protection */ + spinlock_t dpio_lock; + /** Cached value of IMR to avoid reads in updating the bitfield */ u32 pipestat[2]; u32 irq_mask; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7abdc15b1ad7..65f5849f2ad6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -301,6 +301,61 @@ #define DEBUG_RESET_RENDER (1<<8) #define DEBUG_RESET_DISPLAY (1<<9) +/* + * DPIO - a special bus for various display related registers to hide behind: + * 0x800c: m1, m2, n, p1, p2, k dividers + * 0x8014: REF and SFR select + * 0x8014: N divider, VCO select + * 0x801c/3c: core clock bits + * 0x8048/68: low pass filter coefficients + * 0x8100: fast clock controls + */ +#define DPIO_PKT 0x2100 +#define DPIO_RID (0<<24) +#define DPIO_OP_WRITE (1<<16) +#define DPIO_OP_READ (0<<16) +#define DPIO_PORTID (0x12<<8) +#define DPIO_BYTE (0xf<<4) +#define DPIO_BUSY (1<<0) /* status only */ +#define DPIO_DATA 0x2104 +#define DPIO_REG 0x2108 +#define DPIO_CTL 0x2110 +#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ +#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ +#define DPIO_SFR_BYPASS (1<<1) +#define DPIO_RESET (1<<0) + +#define _DPIO_DIV_A 0x800c +#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */ +#define DPIO_K_SHIFT (24) /* 4 bits */ +#define DPIO_P1_SHIFT (21) /* 3 bits */ +#define DPIO_P2_SHIFT (16) /* 5 bits */ +#define DPIO_N_SHIFT (12) /* 4 bits */ +#define DPIO_ENABLE_CALIBRATION (1<<11) +#define DPIO_M1DIV_SHIFT (8) /* 3 bits */ +#define DPIO_M2DIV_MASK 0xff +#define _DPIO_DIV_B 0x802c +#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B) + +#define _DPIO_REFSFR_A 0x8014 +#define DPIO_REFSEL_OVERRIDE 27 +#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ +#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ +#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */ +#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ +#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ +#define _DPIO_REFSFR_B 0x8034 +#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B) + +#define _DPIO_CORE_CLK_A 0x801c +#define _DPIO_CORE_CLK_B 0x803c +#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B) + +#define _DPIO_LFP_COEFF_A 0x8048 +#define _DPIO_LFP_COEFF_B 0x8068 +#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B) + +#define DPIO_FASTCLK_DISABLE 0x8100 /* * Fence registers diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 04e1e9ab203c..37ad4e239fc3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -360,6 +360,64 @@ static const intel_limit_t intel_limits_ironlake_display_port = { .find_pll = intel_find_pll_ironlake_dp, }; +u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg) +{ + unsigned long flags; + u32 val = 0; + + spin_lock_irqsave(&dev_priv->dpio_lock, flags); + if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { + DRM_ERROR("DPIO idle wait timed out\n"); + goto out_unlock; + } + + I915_WRITE(DPIO_REG, reg); + I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID | + DPIO_BYTE); + if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { + DRM_ERROR("DPIO read wait timed out\n"); + goto out_unlock; + } + val = I915_READ(DPIO_DATA); + +out_unlock: + spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); + return val; +} + +static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, + u32 val) +{ + unsigned long flags; + + spin_lock_irqsave(&dev_priv->dpio_lock, flags); + if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { + DRM_ERROR("DPIO idle wait timed out\n"); + goto out_unlock; + } + + I915_WRITE(DPIO_DATA, val); + I915_WRITE(DPIO_REG, reg); + I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID | + DPIO_BYTE); + if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) + DRM_ERROR("DPIO write wait timed out\n"); + +out_unlock: + spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); +} + +static void vlv_init_dpio(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Reset the DPIO config */ + I915_WRITE(DPIO_CTL, 0); + POSTING_READ(DPIO_CTL); + I915_WRITE(DPIO_CTL, 1); + POSTING_READ(DPIO_CTL); +} + static bool is_dual_link_lvds(struct drm_i915_private *dev_priv, unsigned int reg) { @@ -9375,6 +9433,9 @@ void intel_modeset_cleanup(struct drm_device *dev) if (IS_IRONLAKE_M(dev)) ironlake_disable_rc6(dev); + if (IS_VALLEYVIEW(dev)) + vlv_init_dpio(dev); + mutex_unlock(&dev->struct_mutex); /* Disable the irq before mode object teardown, for the irq might diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 219efe3b9ad5..beee177dd41a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -45,6 +45,18 @@ ret__; \ }) +#define wait_for_atomic_us(COND, US) ({ \ + int i, ret__ = -ETIMEDOUT; \ + for (i = 0; i < (US); i++) { \ + if ((COND)) { \ + ret__ = 0; \ + break; \ + } \ + udelay(1); \ + } \ + ret__; \ +}) + #define wait_for(COND, MS) _wait_for(COND, MS, 1) #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) @@ -420,4 +432,6 @@ extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg); + #endif /* __INTEL_DRV_H__ */ -- cgit v1.2.3-59-g8ed1b From fb046853ad66e64c96a2598f3fdd4cf5fbabc0d1 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 28 Mar 2012 13:39:26 -0700 Subject: drm/i915: add ValleyView clock gating init Set required clock gating and chicken bits on VLV. v2: set PIXEL_SUBSPAN_COLLECT_OPT_DISABLE too (Ben) move function below ivb version to pretend to be consistent (Ben) Reviewed-by: Ben Widawsky Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 18 +++++++++++++ drivers/gpu/drm/i915/intel_display.c | 50 ++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 65f5849f2ad6..58914b4f5357 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -633,6 +633,9 @@ #define ECO_GATING_CX_ONLY (1<<3) #define ECO_FLIP_DONE (1<<0) +#define CACHE_MODE_1 0x7004 /* IVB+ */ +#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) + /* GEN6 interrupt control */ #define GEN6_RENDER_HWSTAM 0x2098 #define GEN6_RENDER_IMR 0x20a8 @@ -3184,6 +3187,20 @@ #define DISP_TILE_SURFACE_SWIZZLING (1<<13) #define DISP_FBC_WM_DIS (1<<15) +/* GEN7 chicken */ +#define GEN7_COMMON_SLICE_CHICKEN1 0x7010 +# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) + +#define GEN7_L3CNTLREG1 0xB01C +#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C + +#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 +#define GEN7_WA_L3_CHICKEN_MODE 0x20000000 + +/* WaCatErrorRejectionIssue */ +#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 +#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) + /* PCH */ /* south display engine interrupt */ @@ -3787,6 +3804,7 @@ #define GT_FIFO_NUM_RESERVED_ENTRIES 20 #define GEN6_UCGCTL2 0x9404 +# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13) # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 37ad4e239fc3..2af082db744c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8796,6 +8796,54 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) } } +static void valleyview_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. + * This implements the WaDisableRCZUnitClockGating workaround. + */ + I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); + + I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); + + I915_WRITE(IVB_CHICKEN3, + CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | + CHICKEN3_DGMG_DONE_FIX_DISABLE); + + /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ + I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, + GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); + + /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ + I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); + I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); + + /* This is required by WaCatErrorRejectionIssue */ + I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, + I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | + GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); + + for_each_pipe(pipe) { + I915_WRITE(DSPCNTR(pipe), + I915_READ(DSPCNTR(pipe)) | + DISPPLANE_TRICKLE_FEED_DISABLE); + intel_flush_display_plane(dev_priv, pipe); + } + + I915_WRITE(CACHE_MODE_1, I915_READ(CACHE_MODE_1) | + (PIXEL_SUBSPAN_COLLECT_OPT_DISABLE << 16) | + PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); +} + static void g4x_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -9150,6 +9198,8 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.update_wm = NULL; } else if (IS_VALLEYVIEW(dev)) { dev_priv->display.update_wm = valleyview_update_wm; + dev_priv->display.init_clock_gating = + valleyview_init_clock_gating; } else if (IS_PINEVIEW(dev)) { if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, -- cgit v1.2.3-59-g8ed1b From 12a3c0551137425a9678d1b9f0495b625550f092 Mon Sep 17 00:00:00 2001 From: Gajanan Bhat Date: Wed, 28 Mar 2012 13:39:30 -0700 Subject: drm/i915: program drain latency regs on ValleyView This patch adds support for programming drain latency registers of Pondicherry memory arbiter of Valleyview. v2: clarify function names (Daniel) fix summary typo (Daniel) v3: add parens (Ben) make drain function return bool (Ben) Acked-by: Ben Widawsky Signed-off-by: Gajanan Bhat Reviewed-by: Shobhit Kumar Reviewed-by: Vijay Purushothaman Reviewed-by: Jesse Barnes Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 16 ++++++++ drivers/gpu/drm/i915/intel_display.c | 77 ++++++++++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 58914b4f5357..2f6576d7ba20 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2555,6 +2555,22 @@ #define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) #define DSPFW_HPLL_SR_MASK (0x1ff) +/* drain latency register values*/ +#define DRAIN_LATENCY_PRECISION_32 32 +#define DRAIN_LATENCY_PRECISION_16 16 +#define VLV_DDL1 0x70050 +#define DDL_CURSORA_PRECISION_32 (1<<31) +#define DDL_CURSORA_PRECISION_16 (0<<31) +#define DDL_CURSORA_SHIFT 24 +#define DDL_PLANEA_PRECISION_32 (1<<7) +#define DDL_PLANEA_PRECISION_16 (0<<7) +#define VLV_DDL2 0x70054 +#define DDL_CURSORB_PRECISION_32 (1<<31) +#define DDL_CURSORB_PRECISION_16 (0<<31) +#define DDL_CURSORB_SHIFT 24 +#define DDL_PLANEB_PRECISION_32 (1<<7) +#define DDL_PLANEB_PRECISION_16 (0<<7) + /* FIFO watermark sizes etc */ #define G4X_FIFO_LINE_SIZE 64 #define I915_FIFO_LINE_SIZE 64 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2af082db744c..6cd5744c1d9a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4235,6 +4235,81 @@ static bool g4x_compute_srwm(struct drm_device *dev, display, cursor); } +static bool vlv_compute_drain_latency(struct drm_device *dev, + int plane, + int *plane_prec_mult, + int *plane_dl, + int *cursor_prec_mult, + int *cursor_dl) +{ + struct drm_crtc *crtc; + int clock, pixel_size; + int entries; + + crtc = intel_get_crtc_for_plane(dev, plane); + if (crtc->fb == NULL || !crtc->enabled) + return false; + + clock = crtc->mode.clock; /* VESA DOT Clock */ + pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */ + + entries = (clock / 1000) * pixel_size; + *plane_prec_mult = (entries > 256) ? + DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; + *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) * + pixel_size); + + entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */ + *cursor_prec_mult = (entries > 256) ? + DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; + *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4); + + return true; +} + +/* + * Update drain latency registers of memory arbiter + * + * Valleyview SoC has a new memory arbiter and needs drain latency registers + * to be programmed. Each plane has a drain latency multiplier and a drain + * latency value. + */ + +static void vlv_update_drain_latency(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int planea_prec, planea_dl, planeb_prec, planeb_dl; + int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl; + int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is + either 16 or 32 */ + + /* For plane A, Cursor A */ + if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl, + &cursor_prec_mult, &cursora_dl)) { + cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16; + planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16; + + I915_WRITE(VLV_DDL1, cursora_prec | + (cursora_dl << DDL_CURSORA_SHIFT) | + planea_prec | planea_dl); + } + + /* For plane B, Cursor B */ + if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl, + &cursor_prec_mult, &cursorb_dl)) { + cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16; + planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16; + + I915_WRITE(VLV_DDL2, cursorb_prec | + (cursorb_dl << DDL_CURSORB_SHIFT) | + planeb_prec | planeb_dl); + } +} + #define single_plane_enabled(mask) is_power_of_2(mask) static void valleyview_update_wm(struct drm_device *dev) @@ -4245,6 +4320,8 @@ static void valleyview_update_wm(struct drm_device *dev) int plane_sr, cursor_sr; unsigned int enabled = 0; + vlv_update_drain_latency(dev); + if (g4x_compute_wm0(dev, 0, &valleyview_wm_info, latency_ns, &valleyview_cursor_wm_info, latency_ns, -- cgit v1.2.3-59-g8ed1b From 90b107c8f7ea75ef55db4e0515dda86b245f8978 Mon Sep 17 00:00:00 2001 From: Shobhit Kumar Date: Wed, 28 Mar 2012 13:39:32 -0700 Subject: drm/i915: Enable HDMI on ValleyView HDMI register offsets are different in Valleyview. Add support for the same. v2: drop superfluous comments in HDMI init (Daniel) Signed-off-by: Beeresh G Signed-off-by: Shobhit Kumar Reviewed-by: Vijay Purushothaman Reviewed-by: Jesse Barnes Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 16 ++++++++++++++++ drivers/gpu/drm/i915/intel_hdmi.c | 37 ++++++++++++++++++++++++++++++++++++- 2 files changed, 52 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 2f6576d7ba20..841d0d115a00 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3419,6 +3419,21 @@ #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) +#define VLV_VIDEO_DIP_CTL_A 0x60220 +#define VLV_VIDEO_DIP_DATA_A 0x60208 +#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210 + +#define VLV_VIDEO_DIP_CTL_B 0x61170 +#define VLV_VIDEO_DIP_DATA_B 0x61174 +#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178 + +#define VLV_TVIDEO_DIP_CTL(pipe) \ + _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B) +#define VLV_TVIDEO_DIP_DATA(pipe) \ + _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B) +#define VLV_TVIDEO_DIP_GCP(pipe) \ + _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B) + #define _TRANS_HTOTAL_B 0xe1000 #define _TRANS_HBLANK_B 0xe1004 #define _TRANS_HSYNC_B 0xe1008 @@ -3639,6 +3654,7 @@ #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) /* or SDVOB */ +#define VLV_HDMIB 0x61140 #define HDMIB 0xe1140 #define PORT_ENABLE (1 << 31) #define TRANSCODER(pipe) ((pipe) << 30) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 1d00f61adce6..7de2d3b85b32 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -177,6 +177,37 @@ static void ironlake_write_infoframe(struct drm_encoder *encoder, I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); } + +static void vlv_write_infoframe(struct drm_encoder *encoder, + struct dip_infoframe *frame) +{ + uint32_t *data = (uint32_t *)frame; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = encoder->crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); + unsigned i, len = DIP_HEADER_SIZE + frame->len; + u32 flags, val = I915_READ(reg); + + intel_wait_for_vblank(dev, intel_crtc->pipe); + + flags = intel_infoframe_index(frame); + + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + + I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); + + for (i = 0; i < len; i += 4) { + I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); + data++; + } + + flags |= intel_infoframe_flags(frame); + + I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); +} + static void intel_set_infoframe(struct drm_encoder *encoder, struct dip_infoframe *frame) { @@ -552,7 +583,11 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) if (!HAS_PCH_SPLIT(dev)) { intel_hdmi->write_infoframe = i9xx_write_infoframe; I915_WRITE(VIDEO_DIP_CTL, 0); - } else { + } else if (IS_VALLEYVIEW(dev)) { + intel_hdmi->write_infoframe = vlv_write_infoframe; + for_each_pipe(i) + I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0); + } else { intel_hdmi->write_infoframe = ironlake_write_infoframe; for_each_pipe(i) I915_WRITE(TVIDEO_DIP_CTL(i), 0); -- cgit v1.2.3-59-g8ed1b From 4b60d29ee00cb2114075e8b5c2c23928bbd76c28 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 28 Mar 2012 13:39:33 -0700 Subject: agp/intel: map more registers for use by the GTT code We need to flush the Gunit TLB when we update GTT PTEs on VLV, but the register for doing so is above the range we normally map. Map the whole register space to make sure we can get it. v2: only map the larger space on gen7+ (Daniel) Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/char/agp/intel-gtt.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 5cf47ac2d401..269cb0287b10 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1206,12 +1206,16 @@ static inline int needs_idle_maps(void) static int i9xx_setup(void) { u32 reg_addr; + int size = KB(512); pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr); reg_addr &= 0xfff80000; - intel_private.registers = ioremap(reg_addr, 128 * 4096); + if (INTEL_GTT_GEN >= 7) + size = MB(2); + + intel_private.registers = ioremap(reg_addr, size); if (!intel_private.registers) return -ENOMEM; -- cgit v1.2.3-59-g8ed1b From 64757876215fcc515403639fa0bd19e8da7ab06b Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 28 Mar 2012 13:39:34 -0700 Subject: agp/intel: add ValleyView AGP driver ... and bind it right to the PCI id. Note that there are still a few things to fix here: - we need to move the tlb flush to a better place in drm/i915. - we need to check snoop support on vlv and implement it. Signed-off-by: Jesse Barnes [danvet: squash follow-on patch and add todo items to commit msg.] Signed-off-by: Daniel Vetter --- drivers/char/agp/intel-agp.c | 1 + drivers/char/agp/intel-agp.h | 3 +++ drivers/char/agp/intel-gtt.c | 25 +++++++++++++++++++++++++ 3 files changed, 29 insertions(+) (limited to 'drivers') diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 962e75dc4781..74c2d9274c53 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -907,6 +907,7 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB), ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB), ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB), + ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB), { } }; diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 5da67f165afa..41d9ee15d465 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -96,6 +96,7 @@ #define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN) #define GFX_FLSH_CNTL 0x2170 /* 915+ */ +#define GFX_FLSH_CNTL_VLV 0x101008 #define I810_DRAM_CTL 0x3000 #define I810_DRAM_ROW_0 0x00000001 @@ -234,6 +235,8 @@ #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166 #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */ #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A +#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */ +#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30 int intel_gmch_probe(struct pci_dev *pdev, struct agp_bridge_data *bridge); diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 269cb0287b10..08336ba18cac 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1179,6 +1179,20 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry, writel(addr | pte_flags, intel_private.gtt + entry); } +static void valleyview_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + u32 pte_flags; + + pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; + + /* gen6 has bit11-4 for physical addr bit39-32 */ + addr |= (addr >> 28) & 0xff0; + writel(addr | pte_flags, intel_private.gtt + entry); + + writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV); +} + static void gen6_cleanup(void) { } @@ -1359,6 +1373,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = { .check_flags = gen6_check_flags, .chipset_flush = i9xx_chipset_flush, }; +static const struct intel_gtt_driver valleyview_gtt_driver = { + .gen = 7, + .setup = i9xx_setup, + .cleanup = gen6_cleanup, + .write_entry = valleyview_write_entry, + .dma_mask_size = 40, + .check_flags = gen6_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of * driver and gmch_driver must be non-null, and find_gmch will determine @@ -1463,6 +1486,8 @@ static const struct intel_gtt_driver_description { "Ivybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG, "Ivybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG, + "ValleyView", &valleyview_gtt_driver }, { 0, NULL, NULL } }; -- cgit v1.2.3-59-g8ed1b From 575155a9af9ba5e384caa6979cd918387d712221 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 28 Mar 2012 13:39:37 -0700 Subject: drm/i915: add ValleyView specific force wake get/put functions ValleyView handles force wake differently than previous chipsets, so add a couple of new functions for it. But leave it disabled by default until we test it (need a chip with the Punit enabled first). Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 28 +++++++++++++++++++++++++++- drivers/gpu/drm/i915/i915_drv.h | 3 +++ drivers/gpu/drm/i915/i915_reg.h | 2 ++ drivers/gpu/drm/i915/intel_display.c | 2 ++ 4 files changed, 34 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 77f11b9dd14e..0d92e5eb1295 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -465,6 +465,31 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) return ret; } +void vlv_force_wake_get(struct drm_i915_private *dev_priv) +{ + int count; + + count = 0; + + /* Already awake? */ + if ((I915_READ(0x130094) & 0xa1) == 0xa1) + return; + + I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff); + POSTING_READ(FORCEWAKE_VLV); + + count = 0; + while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0) + udelay(10); +} + +void vlv_force_wake_put(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000); + /* FIXME: confirm VLV behavior with Punit folks */ + POSTING_READ(FORCEWAKE_VLV); +} + static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1007,7 +1032,8 @@ MODULE_LICENSE("GPL and additional rights"); #define NEEDS_FORCE_WAKE(dev_priv, reg) \ (((dev_priv)->info->gen >= 6) && \ ((reg) < 0x40000) && \ - ((reg) != FORCEWAKE)) + ((reg) != FORCEWAKE)) && \ + (!IS_VALLEYVIEW((dev_priv)->dev)) #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 32f3731b1a18..48ca0d1e306c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1406,6 +1406,9 @@ extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv); extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv); +extern void vlv_force_wake_get(struct drm_i915_private *dev_priv); +extern void vlv_force_wake_put(struct drm_i915_private *dev_priv); + /* overlay */ #ifdef CONFIG_DEBUG_FS extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 841d0d115a00..3aadace73ddb 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3820,6 +3820,8 @@ #define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22) #define FORCEWAKE 0xA18C +#define FORCEWAKE_VLV 0x1300b0 +#define FORCEWAKE_ACK_VLV 0x1300b4 #define FORCEWAKE_ACK 0x130090 #define FORCEWAKE_MT 0xa188 /* multi-threaded */ #define FORCEWAKE_MT_ACK 0x130040 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 6cd5744c1d9a..d86362a44590 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9277,6 +9277,8 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.update_wm = valleyview_update_wm; dev_priv->display.init_clock_gating = valleyview_init_clock_gating; + dev_priv->display.force_wake_get = vlv_force_wake_get; + dev_priv->display.force_wake_put = vlv_force_wake_put; } else if (IS_PINEVIEW(dev)) { if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, -- cgit v1.2.3-59-g8ed1b From c46ce4d7e69d129c02640c118f45a86a4412774b Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 28 Mar 2012 13:39:24 -0700 Subject: drm/i915: interrupt bit definitions for VLV Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3aadace73ddb..0bcad74de7be 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2485,23 +2485,30 @@ #define PIPECONF_DITHER_TYPE_TEMP (3<<2) #define _PIPEASTAT 0x70024 #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) +#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30) #define PIPE_CRC_ERROR_ENABLE (1UL<<29) #define PIPE_CRC_DONE_ENABLE (1UL<<28) #define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) +#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26) #define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) #define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) #define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) #define PIPE_DPST_EVENT_ENABLE (1UL<<23) +#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26) #define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) #define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) #define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) #define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ #define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ #define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) +#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) #define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) +#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15) +#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15) #define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) #define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) #define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) +#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10) #define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) #define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) #define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) @@ -2526,6 +2533,40 @@ #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) +#define DPFLIPSTAT_VLV 0x70028 +#define PIPEB_LINE_COMPARE_STATUS (1<<29) +#define PIPEB_HLINE_INT_EN (1<<28) +#define PIPEB_VBLANK_INT_EN (1<<27) +#define SPRITED_FLIPDONE_INT_EN (1<<26) +#define SPRITEC_FLIPDONE_INT_EN (1<<25) +#define PLANEB_FLIPDONE_INT_EN (1<<24) +#define PIPEA_LINE_COMPARE_STATUS (1<<21) +#define PIPEA_HLINE_INT_EN (1<<20) +#define PIPEA_VBLANK_INT_EN (1<<19) +#define SPRITEB_FLIPDONE_INT_EN (1<<18) +#define SPRITEA_FLIPDONE_INT_EN (1<<17) +#define PLANEA_FLIPDONE_INT_EN (1<<16) + +#define DPINVGTT 0x7002c /* VLV only */ +#define CURSORB_INVALID_GTT_INT_EN (1<<23) +#define CURSORA_INVALID_GTT_INT_EN (1<<22) +#define SPRITED_INVALID_GTT_INT_EN (1<<21) +#define SPRITEC_INVALID_GTT_INT_EN (1<<20) +#define PLANEB_INVALID_GTT_INT_EN (1<<19) +#define SPRITEB_INVALID_GTT_INT_EN (1<<18) +#define SPRITEA_INVALID_GTT_INT_EN (1<<17) +#define PLANEA_INVALID_GTT_INT_EN (1<<16) +#define DPINVGTT_EN_MASK 0xff0000 +#define CURSORB_INVALID_GTT_STATUS (1<<7) +#define CURSORA_INVALID_GTT_STATUS (1<<6) +#define SPRITED_INVALID_GTT_STATUS (1<<5) +#define SPRITEC_INVALID_GTT_STATUS (1<<4) +#define PLANEB_INVALID_GTT_STATUS (1<<3) +#define SPRITEB_INVALID_GTT_STATUS (1<<2) +#define SPRITEA_INVALID_GTT_STATUS (1<<1) +#define PLANEA_INVALID_GTT_STATUS (1<<0) +#define DPINVGTT_STATUS_MASK 0xff + #define DSPARB 0x70030 #define DSPARB_CSTART_MASK (0x7f << 7) #define DSPARB_CSTART_SHIFT 7 -- cgit v1.2.3-59-g8ed1b From 7e231dbe0c2a4359472f929732d4200479d8f939 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 28 Mar 2012 13:39:38 -0700 Subject: drm/i915: ValleyView IRQ support ValleyView has a new interrupt architecture; best to put it in a new set of functions. Also make sure the ring mask functions handle ValleyView. FIXME: fix flipping; need to enable interrupts and call prepare/finish Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 40 +++- drivers/gpu/drm/i915/i915_irq.c | 338 +++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/i915_reg.h | 7 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 4 +- 4 files changed, 383 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e74674b3097d..d226f2f2f7bc 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -468,7 +468,45 @@ static int i915_interrupt_info(struct seq_file *m, void *data) if (ret) return ret; - if (!HAS_PCH_SPLIT(dev)) { + if (IS_VALLEYVIEW(dev)) { + seq_printf(m, "Display IER:\t%08x\n", + I915_READ(VLV_IER)); + seq_printf(m, "Display IIR:\t%08x\n", + I915_READ(VLV_IIR)); + seq_printf(m, "Display IIR_RW:\t%08x\n", + I915_READ(VLV_IIR_RW)); + seq_printf(m, "Display IMR:\t%08x\n", + I915_READ(VLV_IMR)); + for_each_pipe(pipe) + seq_printf(m, "Pipe %c stat:\t%08x\n", + pipe_name(pipe), + I915_READ(PIPESTAT(pipe))); + + seq_printf(m, "Master IER:\t%08x\n", + I915_READ(VLV_MASTER_IER)); + + seq_printf(m, "Render IER:\t%08x\n", + I915_READ(GTIER)); + seq_printf(m, "Render IIR:\t%08x\n", + I915_READ(GTIIR)); + seq_printf(m, "Render IMR:\t%08x\n", + I915_READ(GTIMR)); + + seq_printf(m, "PM IER:\t\t%08x\n", + I915_READ(GEN6_PMIER)); + seq_printf(m, "PM IIR:\t\t%08x\n", + I915_READ(GEN6_PMIIR)); + seq_printf(m, "PM IMR:\t\t%08x\n", + I915_READ(GEN6_PMIMR)); + + seq_printf(m, "Port hotplug:\t%08x\n", + I915_READ(PORT_HOTPLUG_EN)); + seq_printf(m, "DPFLIPSTAT:\t%08x\n", + I915_READ(VLV_DPFLIPSTAT)); + seq_printf(m, "DPINVGTT:\t%08x\n", + I915_READ(DPINVGTT)); + + } else if (!HAS_PCH_SPLIT(dev)) { seq_printf(m, "Interrupt enable: %08x\n", I915_READ(IER)); seq_printf(m, "Interrupt identity: %08x\n", diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 998116e871f0..01fb65097977 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -120,6 +120,10 @@ void intel_enable_asle(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; unsigned long irqflags; + /* FIXME: opregion/asle for VLV */ + if (IS_VALLEYVIEW(dev)) + return; + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) @@ -426,6 +430,119 @@ static void gen6_pm_rps_work(struct work_struct *work) mutex_unlock(&dev_priv->dev->struct_mutex); } +static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) +{ + struct drm_device *dev = (struct drm_device *) arg; + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 iir, gt_iir, pm_iir; + irqreturn_t ret = IRQ_NONE; + unsigned long irqflags; + int pipe; + u32 pipe_stats[I915_MAX_PIPES]; + u32 vblank_status; + int vblank = 0; + bool blc_event; + + atomic_inc(&dev_priv->irq_received); + + vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS | + PIPE_VBLANK_INTERRUPT_STATUS; + + while (true) { + iir = I915_READ(VLV_IIR); + gt_iir = I915_READ(GTIIR); + pm_iir = I915_READ(GEN6_PMIIR); + + if (gt_iir == 0 && pm_iir == 0 && iir == 0) + goto out; + + ret = IRQ_HANDLED; + + if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) + notify_ring(dev, &dev_priv->ring[RCS]); + if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[VCS]); + if (gt_iir & GT_BLT_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[BCS]); + + if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | + GT_GEN6_BSD_CS_ERROR_INTERRUPT | + GT_RENDER_CS_ERROR_INTERRUPT)) { + DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); + i915_handle_error(dev, false); + } + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + for_each_pipe(pipe) { + int reg = PIPESTAT(pipe); + pipe_stats[pipe] = I915_READ(reg); + + /* + * Clear the PIPE*STAT regs before the IIR + */ + if (pipe_stats[pipe] & 0x8000ffff) { + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG_DRIVER("pipe %c underrun\n", + pipe_name(pipe)); + I915_WRITE(reg, pipe_stats[pipe]); + } + } + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + /* Consume port. Then clear IIR or we'll miss events */ + if (iir & I915_DISPLAY_PORT_INTERRUPT) { + u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", + hotplug_status); + if (hotplug_status & dev_priv->hotplug_supported_mask) + queue_work(dev_priv->wq, + &dev_priv->hotplug_work); + + I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + I915_READ(PORT_HOTPLUG_STAT); + } + + + if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) { + drm_handle_vblank(dev, 0); + vblank++; + if (!dev_priv->flip_pending_is_done) { + intel_finish_page_flip(dev, 0); + } + } + + if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) { + drm_handle_vblank(dev, 1); + vblank++; + if (!dev_priv->flip_pending_is_done) { + intel_finish_page_flip(dev, 0); + } + } + + if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) + blc_event = true; + + if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { + unsigned long flags; + spin_lock_irqsave(&dev_priv->rps_lock, flags); + WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); + dev_priv->pm_iir |= pm_iir; + I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); + POSTING_READ(GEN6_PMIMR); + spin_unlock_irqrestore(&dev_priv->rps_lock, flags); + queue_work(dev_priv->wq, &dev_priv->rps_work); + } + + I915_WRITE(GTIIR, gt_iir); + I915_WRITE(GEN6_PMIIR, pm_iir); + I915_WRITE(VLV_IIR, iir); + } + +out: + return ret; +} + static void pch_irq_handler(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -1567,6 +1684,32 @@ static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) return 0; } +static int valleyview_enable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long irqflags; + u32 dpfl, imr; + + if (!i915_pipe_enabled(dev, pipe)) + return -EINVAL; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + dpfl = I915_READ(VLV_DPFLIPSTAT); + imr = I915_READ(VLV_IMR); + if (pipe == 0) { + dpfl |= PIPEA_VBLANK_INT_EN; + imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; + } else { + dpfl |= PIPEA_VBLANK_INT_EN; + imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + } + I915_WRITE(VLV_DPFLIPSTAT, dpfl); + I915_WRITE(VLV_IMR, imr); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + return 0; +} + /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ @@ -1608,6 +1751,28 @@ static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } +static void valleyview_disable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long irqflags; + u32 dpfl, imr; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + dpfl = I915_READ(VLV_DPFLIPSTAT); + imr = I915_READ(VLV_IMR); + if (pipe == 0) { + dpfl &= ~PIPEA_VBLANK_INT_EN; + imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; + } else { + dpfl &= ~PIPEB_VBLANK_INT_EN; + imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + } + I915_WRITE(VLV_IMR, imr); + I915_WRITE(VLV_DPFLIPSTAT, dpfl); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + + /* Set the vblank monitor pipe */ int i915_vblank_pipe_set(struct drm_device *dev, void *data, @@ -1817,6 +1982,53 @@ static void ironlake_irq_preinstall(struct drm_device *dev) POSTING_READ(SDEIER); } +static void valleyview_irq_preinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + atomic_set(&dev_priv->irq_received, 0); + + INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); + INIT_WORK(&dev_priv->error_work, i915_error_work_func); + + /* VLV magic */ + I915_WRITE(VLV_IMR, 0); + I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); + I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); + I915_WRITE(RING_IMR(BLT_RING_BASE), 0); + + if (IS_GEN6(dev) || IS_GEN7(dev)) { + /* Workaround stalls observed on Sandy Bridge GPUs by + * making the blitter command streamer generate a + * write to the Hardware Status Page for + * MI_USER_INTERRUPT. This appears to serialize the + * previous seqno write out before the interrupt + * happens. + */ + I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT); + I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT); + } + + /* and GT */ + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, 0xffffffff); + I915_WRITE(GTIER, 0x0); + POSTING_READ(GTIER); + + I915_WRITE(DPINVGTT, 0xff); + + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0xffff); + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(VLV_IMR, 0xffffffff); + I915_WRITE(VLV_IER, 0x0); + POSTING_READ(VLV_IER); +} + /* * Enable digital hotplug on the PCH, and configure the DP short pulse * duration to 2ms (which is the minimum in the Display Port spec) @@ -1963,6 +2175,96 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) return 0; } +static int valleyview_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 render_irqs; + u32 enable_mask; + u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); + u16 msid; + + enable_mask = I915_DISPLAY_PORT_INTERRUPT; + enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | + I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + + dev_priv->irq_mask = ~enable_mask; + + + DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue); + DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue); + DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue); + + dev_priv->pipestat[0] = 0; + dev_priv->pipestat[1] = 0; + + dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; + + /* Hack for broken MSIs on VLV */ + pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); + pci_read_config_word(dev->pdev, 0x98, &msid); + msid &= 0xff; /* mask out delivery bits */ + msid |= (1<<14); + pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); + + I915_WRITE(VLV_IMR, dev_priv->irq_mask); + I915_WRITE(VLV_IER, enable_mask); + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(PIPESTAT(0), 0xffff); + I915_WRITE(PIPESTAT(1), 0xffff); + POSTING_READ(VLV_IER); + + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(VLV_IIR, 0xffffffff); + + render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT | + GT_GEN6_BLT_CS_ERROR_INTERRUPT | + GT_BLT_USER_INTERRUPT | + GT_GEN6_BSD_USER_INTERRUPT | + GT_GEN6_BSD_CS_ERROR_INTERRUPT | + GT_GEN7_L3_PARITY_ERROR_INTERRUPT | + GT_PIPE_NOTIFY | + GT_RENDER_CS_ERROR_INTERRUPT | + GT_SYNC_STATUS | + GT_USER_INTERRUPT; + + dev_priv->gt_irq_mask = ~render_irqs; + + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, 0); + I915_WRITE(GTIER, render_irqs); + POSTING_READ(GTIER); + + /* ack & enable invalid PTE error interrupts */ +#if 0 /* FIXME: add support to irq handler for checking these bits */ + I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); + I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); +#endif + + I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); +#if 0 /* FIXME: check register definitions; some have moved */ + /* Note HDMI and DP share bits */ + if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) + hotplug_en |= HDMID_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { + hotplug_en |= CRT_HOTPLUG_INT_EN; + hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; + } +#endif + + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); + + return 0; +} + static void i915_driver_irq_preinstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -2066,6 +2368,30 @@ static int i915_driver_irq_postinstall(struct drm_device *dev) return 0; } +static void valleyview_irq_uninstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + if (!dev_priv) + return; + + dev_priv->vblank_pipe = 0; + + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0xffff); + + I915_WRITE(HWSTAM, 0xffffffff); + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0xffff); + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(VLV_IMR, 0xffffffff); + I915_WRITE(VLV_IER, 0x0); + POSTING_READ(VLV_IER); +} + static void ironlake_irq_uninstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -2121,7 +2447,8 @@ void intel_irq_init(struct drm_device *dev) { dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { + if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev) || + IS_VALLEYVIEW(dev)) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = gm45_get_vblank_counter; } @@ -2132,7 +2459,14 @@ void intel_irq_init(struct drm_device *dev) dev->driver->get_vblank_timestamp = NULL; dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; - if (IS_IVYBRIDGE(dev)) { + if (IS_VALLEYVIEW(dev)) { + dev->driver->irq_handler = valleyview_irq_handler; + dev->driver->irq_preinstall = valleyview_irq_preinstall; + dev->driver->irq_postinstall = valleyview_irq_postinstall; + dev->driver->irq_uninstall = valleyview_irq_uninstall; + dev->driver->enable_vblank = valleyview_enable_vblank; + dev->driver->disable_vblank = valleyview_disable_vblank; + } else if (IS_IVYBRIDGE(dev)) { /* Share pre & uninstall handlers with ILK/SNB */ dev->driver->irq_handler = ivybridge_irq_handler; dev->driver->irq_preinstall = ironlake_irq_preinstall; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0bcad74de7be..908550c72a0d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -510,6 +510,11 @@ #define IIR 0x020a4 #define IMR 0x020a8 #define ISR 0x020ac +#define VLV_IIR_RW 0x182084 +#define VLV_IER 0x1820a0 +#define VLV_IIR 0x1820a4 +#define VLV_IMR 0x1820a8 +#define VLV_ISR 0x1820ac #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) #define I915_DISPLAY_PORT_INTERRUPT (1<<17) #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) @@ -2533,7 +2538,7 @@ #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) -#define DPFLIPSTAT_VLV 0x70028 +#define VLV_DPFLIPSTAT 0x70028 #define PIPEB_LINE_COMPARE_STATUS (1<<29) #define PIPEB_HLINE_INT_EN (1<<28) #define PIPEB_VBLANK_INT_EN (1<<27) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b7b50a5b0478..75081f146390 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -687,7 +687,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring) spin_lock(&ring->irq_lock); if (ring->irq_refcount++ == 0) { - if (HAS_PCH_SPLIT(dev)) + if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev)) ironlake_enable_irq(dev_priv, GT_PIPE_NOTIFY | GT_USER_INTERRUPT); else @@ -706,7 +706,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring) spin_lock(&ring->irq_lock); if (--ring->irq_refcount == 0) { - if (HAS_PCH_SPLIT(dev)) + if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev)) ironlake_disable_irq(dev_priv, GT_USER_INTERRUPT | GT_PIPE_NOTIFY); -- cgit v1.2.3-59-g8ed1b From 23e3f9b37e7368ee8530ba99907508363feebc14 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 28 Mar 2012 13:39:39 -0700 Subject: drm/i915: check for disabled interrupts on ValleyView Haven't seen this yet, but it doesn't hurt. Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b8c6248b25c6..54ca125a405d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1875,6 +1875,8 @@ i915_wait_request(struct intel_ring_buffer *ring, if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { if (HAS_PCH_SPLIT(ring->dev)) ier = I915_READ(DEIER) | I915_READ(GTIER); + else if (IS_VALLEYVIEW(ring->dev)) + ier = I915_READ(GTIER) | I915_READ(VLV_IER); else ier = I915_READ(IER); if (!ier) { -- cgit v1.2.3-59-g8ed1b From 7e508a275b9425d612b845cac534e6b35a3f95e3 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:17 -0300 Subject: drm/i915: transform HAS_PCH_SPLIT in a feature check The macro is becoming too complex and with VLV upon us it can lead to confusion. So transforming this into a feature check instead. Signed-off-by: Eugeni Dodonov [danvet: fixed conflict with is_valleyview addition.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 6 ++++++ drivers/gpu/drm/i915/i915_drv.h | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0d92e5eb1295..2fd6694fa21e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -211,6 +211,7 @@ static const struct intel_device_info intel_ironlake_d_info = { .gen = 5, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, + .has_pch_split = 1, }; static const struct intel_device_info intel_ironlake_m_info = { @@ -218,6 +219,7 @@ static const struct intel_device_info intel_ironlake_m_info = { .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 1, .has_bsd_ring = 1, + .has_pch_split = 1, }; static const struct intel_device_info intel_sandybridge_d_info = { @@ -226,6 +228,7 @@ static const struct intel_device_info intel_sandybridge_d_info = { .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, + .has_pch_split = 1, }; static const struct intel_device_info intel_sandybridge_m_info = { @@ -235,6 +238,7 @@ static const struct intel_device_info intel_sandybridge_m_info = { .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, + .has_pch_split = 1, }; static const struct intel_device_info intel_ivybridge_d_info = { @@ -243,6 +247,7 @@ static const struct intel_device_info intel_ivybridge_d_info = { .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, + .has_pch_split = 1, }; static const struct intel_device_info intel_ivybridge_m_info = { @@ -252,6 +257,7 @@ static const struct intel_device_info intel_ivybridge_m_info = { .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, + .has_pch_split = 1, }; static const struct intel_device_info intel_valleyview_m_info = { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 48ca0d1e306c..d3a3202dd476 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -256,6 +256,7 @@ struct intel_device_info { u8 is_crestline:1; u8 is_ivybridge:1; u8 is_valleyview:1; + u8 has_pch_split:1; u8 has_fbc:1; u8 has_pipe_cxsr:1; u8 has_hotplug:1; @@ -1051,7 +1052,7 @@ struct drm_i915_file_private { #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) -#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) +#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split) #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) -- cgit v1.2.3-59-g8ed1b From 4cae9ae052fe630e63f28be6b0b115fbf52e63fb Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:18 -0300 Subject: drm/i915: add Haswell devices and their PCI IDs This adds product definitions for desktop, mobile and server boards. v2: split into a separate patch, add .has_pch_split feature. Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/char/agp/intel-agp.h | 11 +++++++++++ drivers/char/agp/intel-gtt.c | 14 ++++++++++++++ drivers/gpu/drm/i915/i915_drv.c | 18 ++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 2 ++ 4 files changed, 45 insertions(+) (limited to 'drivers') diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 41d9ee15d465..0b0710143699 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -237,6 +237,17 @@ #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A #define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */ #define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30 +#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */ +#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402 +#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412 +#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */ +#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406 +#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416 +#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */ +#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a +#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */ +#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04 int intel_gmch_probe(struct pci_dev *pdev, struct agp_bridge_data *bridge); diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 08336ba18cac..7e223a27e112 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1488,6 +1488,20 @@ static const struct intel_gtt_driver_description { "Ivybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG, "ValleyView", &valleyview_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV, + "Haswell", &sandybridge_gtt_driver }, { 0, NULL, NULL } }; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2fd6694fa21e..6d7548d1f94b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -278,6 +278,24 @@ static const struct intel_device_info intel_valleyview_d_info = { .is_valleyview = 1, }; +static const struct intel_device_info intel_haswell_d_info = { + .is_haswell = 1, .gen = 7, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_bsd_ring = 1, + .has_blt_ring = 1, + .has_llc = 1, + .has_pch_split = 1, +}; + +static const struct intel_device_info intel_haswell_m_info = { + .is_haswell = 1, .gen = 7, .is_mobile = 1, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_bsd_ring = 1, + .has_blt_ring = 1, + .has_llc = 1, + .has_pch_split = 1, +}; + static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d3a3202dd476..ffd5d26b6783 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -257,6 +257,7 @@ struct intel_device_info { u8 is_ivybridge:1; u8 is_valleyview:1; u8 has_pch_split:1; + u8 is_haswell:1; u8 has_fbc:1; u8 has_pipe_cxsr:1; u8 has_hotplug:1; @@ -1009,6 +1010,7 @@ struct drm_i915_file_private { #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) +#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) /* -- cgit v1.2.3-59-g8ed1b From eb877ebfd38b096a60a375785952cc460628d6b2 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:20 -0300 Subject: drm/i915: add support for LynxPoint PCH Reviewed-by: Rodrigo Vivi Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 4 ++++ drivers/gpu/drm/i915/i915_drv.h | 2 ++ 2 files changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6d7548d1f94b..0efc02e4e7ce 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -351,6 +351,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist); #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 +#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 void intel_detect_pch(struct drm_device *dev) { @@ -379,6 +380,9 @@ void intel_detect_pch(struct drm_device *dev) /* PantherPoint is CPT compatible */ dev_priv->pch_type = PCH_CPT; DRM_DEBUG_KMS("Found PatherPoint PCH\n"); + } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { + dev_priv->pch_type = PCH_LPT; + DRM_DEBUG_KMS("Found LynxPoint PCH\n"); } } pci_dev_put(pch); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ffd5d26b6783..d7a5146796e7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -294,6 +294,7 @@ enum no_fbc_reason { enum intel_pch { PCH_IBX, /* Ibexpeak PCH */ PCH_CPT, /* Cougarpoint PCH */ + PCH_LPT, /* Lynxpoint PCH */ }; #define QUIRK_PIPEA_FORCE (1<<0) @@ -1058,6 +1059,7 @@ struct drm_i915_file_private { #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) +#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) -- cgit v1.2.3-59-g8ed1b From 9eb3a75276892b01026489096c670a30bcc66252 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:21 -0300 Subject: drm/i915: add support for power wells This defines the registers used by different power wells. Reviewed-by: Rodrigo Vivi Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 908550c72a0d..61ee4142d643 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4021,4 +4021,17 @@ #define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) #define AUD_CONFIG_DISABLE_NCTS (1 << 3) +/* HSW Power Wells */ +#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */ +#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */ +#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */ +#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */ +#define HSW_PWR_WELL_ENABLE (1<<31) +#define HSW_PWR_WELL_STATE (1<<30) +#define HSW_PWR_WELL_CTL5 0x45410 +#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) +#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) +#define HSW_PWR_WELL_FORCE_ON (1<<19) +#define HSW_PWR_WELL_CTL6 0x45414 + #endif /* _I915_REG_H_ */ -- cgit v1.2.3-59-g8ed1b From 2b139522008b824ba17d5160085ce70f940839a0 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:22 -0300 Subject: drm/i915: add enumeration for DDI ports There are 5 DDI ports on Haswell. Port A is always enabled, and is the one connected to eDP, and Port E is the one that can be connected to the PCH using FDI protocol. Ports B, C, D and E can be used for digital outputs. Signed-off-by: Daniel Vetter Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 10 ++++++++++ drivers/gpu/drm/i915/i915_reg.h | 2 ++ 2 files changed, 12 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d7a5146796e7..b617cd50c398 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -63,6 +63,16 @@ enum plane { }; #define plane_name(p) ((p) + 'A') +enum port { + PORT_A = 0, + PORT_B, + PORT_C, + PORT_D, + PORT_E, + I915_MAX_PORTS +}; +#define port_name(p) ((p) + 'A') + #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 61ee4142d643..bca37b3082c5 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -27,6 +27,8 @@ #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) +#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) + /* * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. -- cgit v1.2.3-59-g8ed1b From e7e104c3785a5a88ce9a58f0bfd0722e53217f47 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:23 -0300 Subject: drm/i915: add DDI registers There is one set of such registers for each pipe (A/B/C/EDP). v2: update to use DDI PORTS enum v1 Reviewed-by: Rodrigo Vivi Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index bca37b3082c5..61eca8b9679f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4036,4 +4036,30 @@ #define HSW_PWR_WELL_FORCE_ON (1<<19) #define HSW_PWR_WELL_CTL6 0x45414 +/* Per-pipe DDI Function Control */ +#define PIPE_DDI_FUNC_CTL_A 0x60400 +#define PIPE_DDI_FUNC_CTL_B 0x61400 +#define PIPE_DDI_FUNC_CTL_C 0x62400 +#define PIPE_DDI_FUNC_CTL_EDP 0x6F400 +#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \ + PIPE_DDI_FUNC_CTL_A, \ + PIPE_DDI_FUNC_CTL_B) +#define PIPE_DDI_FUNC_ENABLE (1<<31) +/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ +#define PIPE_DDI_PORT_MASK (0xf<<28) +#define PIPE_DDI_SELECT_PORT(x) ((x)<<28) +#define PIPE_DDI_MODE_SELECT_HDMI (0<<24) +#define PIPE_DDI_MODE_SELECT_DVI (1<<24) +#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) +#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) +#define PIPE_DDI_MODE_SELECT_FDI (4<<24) +#define PIPE_DDI_BPC_8 (0<<20) +#define PIPE_DDI_BPC_10 (1<<20) +#define PIPE_DDI_BPC_6 (2<<20) +#define PIPE_DDI_BPC_12 (3<<20) +#define PIPE_DDI_BFI_ENABLE (1<<4) +#define PIPE_DDI_PORT_WIDTH_X1 (0<<1) +#define PIPE_DDI_PORT_WIDTH_X2 (1<<1) +#define PIPE_DDI_PORT_WIDTH_X4 (3<<1) + #endif /* _I915_REG_H_ */ -- cgit v1.2.3-59-g8ed1b From 0e87f6679807a60efd3280c99544b6997916e987 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:24 -0300 Subject: drm/i915: add DP_TP_CTL registers This is one set of those registers for each pipe. v2: use port enum to access individual registers Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 61eca8b9679f..b49775326fb4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4062,4 +4062,20 @@ #define PIPE_DDI_PORT_WIDTH_X2 (1<<1) #define PIPE_DDI_PORT_WIDTH_X4 (3<<1) +/* DisplayPort Transport Control */ +#define DP_TP_CTL_A 0x64040 +#define DP_TP_CTL_B 0x64140 +#define DP_TP_CTL(port) _PORT(port, \ + DP_TP_CTL_A, \ + DP_TP_CTL_B) +#define DP_TP_CTL_ENABLE (1<<31) +#define DP_TP_CTL_MODE_SST (0<<27) +#define DP_TP_CTL_MODE_MST (1<<27) +#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18) +#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) +#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) +#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) +#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) +#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) + #endif /* _I915_REG_H_ */ -- cgit v1.2.3-59-g8ed1b From e411b2c116626e685fb96cad84a902a12b1689f5 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:25 -0300 Subject: drm/i915: add DP_TP_STATUS registers There is one set of those registers for each port. Signed-off-by: Eugeni Dodonov Reviewed-by: Daniel Vetter Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index b49775326fb4..5b1710100963 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4078,4 +4078,12 @@ #define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) #define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) +/* DisplayPort Transport Status */ +#define DP_TP_STATUS_A 0x64044 +#define DP_TP_STATUS_B 0x64144 +#define DP_TP_STATUS(port) _PORT(port, \ + DP_TP_STATUS_A, \ + DP_TP_STATUS_B) +#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) + #endif /* _I915_REG_H_ */ -- cgit v1.2.3-59-g8ed1b From 03f896a1aeff5bd7f47b6d24562af9fc671f30ed Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:26 -0300 Subject: drm/i915: add definitions for DDI_BUF_CTL registers There is one instance of those registers for each DDI port. v2: access registers via the DDI_BUF_CTL() macro Reviewed-by: Rodrigo Vivi Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5b1710100963..bcc76ce1afa6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4086,4 +4086,27 @@ DP_TP_STATUS_B) #define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) +/* DDI Buffer Control */ +#define DDI_BUF_CTL_A 0x64000 +#define DDI_BUF_CTL_B 0x64100 +#define DDI_BUF_CTL(port) _PORT(port, \ + DDI_BUF_CTL_A, \ + DDI_BUF_CTL_B) +#define DDI_BUF_CTL_ENABLE (1<<31) +#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ +#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ +#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ +#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */ +#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */ +#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */ +#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ +#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ +#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ +#define DDI_BUF_EMP_MASK (0xf<<24) +#define DDI_BUF_IS_IDLE (1<<7) +#define DDI_PORT_WIDTH_X1 (0<<1) +#define DDI_PORT_WIDTH_X2 (1<<1) +#define DDI_PORT_WIDTH_X4 (3<<1) +#define DDI_INIT_DISPLAY_DETECTED (1<<0) + #endif /* _I915_REG_H_ */ -- cgit v1.2.3-59-g8ed1b From bb879a44ffd5f708be14b1578239e51b2a4555e8 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:27 -0300 Subject: drm/i915: add definition of DDI buffer translations regs Those registers are used to train DDI buffer translations for each link type. v2: access each port registers through the DDI_BUF_TRANS macro Signed-off-by: Eugeni Dodonov Reviewed-by: Daniel Vetter Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index bcc76ce1afa6..a87f41c92243 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4109,4 +4109,11 @@ #define DDI_PORT_WIDTH_X4 (3<<1) #define DDI_INIT_DISPLAY_DETECTED (1<<0) +/* DDI Buffer Translations */ +#define DDI_BUF_TRANS_A 0x64E00 +#define DDI_BUF_TRANS_B 0x64E60 +#define DDI_BUF_TRANS(port) _PORT(port, \ + DDI_BUF_TRANS_A, \ + DDI_BUF_TRANS_B) + #endif /* _I915_REG_H_ */ -- cgit v1.2.3-59-g8ed1b From 7501a4d846c9ca3d448f0eee102ebc409d9c1b19 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:29 -0300 Subject: drm/i915: add SBI registers Those are responsible for the Sideband Interface programming. v2: rename SBI bits to better reflect their meaning Reviewed-by: Rodrigo Vivi Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a87f41c92243..542128c80546 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4116,4 +4116,16 @@ DDI_BUF_TRANS_A, \ DDI_BUF_TRANS_B) +/* Sideband Interface (SBI) is programmed indirectly, via + * SBI_ADDR, which contains the register offset; and SBI_DATA, + * which contains the payload */ +#define SBI_ADDR 0xC6000 +#define SBI_DATA 0xC6004 +#define SBI_CTL_STAT 0xC6008 +#define SBI_CTL_OP_CRRD (0x6<<8) +#define SBI_CTL_OP_CRWR (0x7<<8) +#define SBI_RESPONSE_FAIL (0x1<<1) +#define SBI_RESPONSE_SUCCESS (0x0<<1) +#define SBI_BUSY (0x1<<0) +#define SBI_READY (0x0<<0) #endif /* _I915_REG_H_ */ -- cgit v1.2.3-59-g8ed1b From 52f025efa989318a6bc634103d70ca7a51a8b52d Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:31 -0300 Subject: drm/i915: add PIXCLK_GATE register Pixel clock gating control for Lynx point. Reviewed-by: Rodrigo Vivi Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 542128c80546..a9a47f6ef7d1 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4128,4 +4128,10 @@ #define SBI_RESPONSE_SUCCESS (0x0<<1) #define SBI_BUSY (0x1<<0) #define SBI_READY (0x0<<0) + +/* LPT PIXCLK_GATE */ +#define PIXCLK_GATE 0xC6020 +#define PIXCLK_GATE_UNGATE 1<<0 +#define PIXCLK_GATE_GATE 0<<0 + #endif /* _I915_REG_H_ */ -- cgit v1.2.3-59-g8ed1b From e93ea06aa0436f60a18962a195b95d8f36e9b7d6 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:32 -0300 Subject: drm/i915: add S PLL control This PLL control can drive DDI ports at desired frequencies for DisplayPort and FDI connections. Reviewed-by: Rodrigo Vivi Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a9a47f6ef7d1..58046ffcf031 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4134,4 +4134,12 @@ #define PIXCLK_GATE_UNGATE 1<<0 #define PIXCLK_GATE_GATE 0<<0 +/* SPLL */ +#define SPLL_CTL 0x46020 +#define SPLL_PLL_ENABLE (1<<31) +#define SPLL_PLL_SCC (1<<28) +#define SPLL_PLL_NON_SCC (2<<28) +#define SPLL_PLL_FREQ_810MHz (0<<26) +#define SPLL_PLL_FREQ_1350MHz (1<<26) + #endif /* _I915_REG_H_ */ -- cgit v1.2.3-59-g8ed1b From fec9181ca4bd70071807c6839cd73e9346a9e023 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:33 -0300 Subject: drm/i915: add port clock selection support for HSW Multiple clocks can drive different outputs. v2: use the port enums to access individual ports v1 Reviewed-by: Rodrigo Vivi Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 58046ffcf031..0cf2bf8da996 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4142,4 +4142,27 @@ #define SPLL_PLL_FREQ_810MHz (0<<26) #define SPLL_PLL_FREQ_1350MHz (1<<26) +/* Port clock selection */ +#define PORT_CLK_SEL_A 0x46100 +#define PORT_CLK_SEL_B 0x46104 +#define PORT_CLK_SEL(port) _PORT(port, \ + PORT_CLK_SEL_A, \ + PORT_CLK_SEL_B) +#define PORT_CLK_SEL_LCPLL_2700 (0<<29) +#define PORT_CLK_SEL_LCPLL_1350 (1<<29) +#define PORT_CLK_SEL_LCPLL_810 (2<<29) +#define PORT_CLK_SEL_SPLL (3<<29) +#define PORT_CLK_SEL_WRPLL1 (4<<29) +#define PORT_CLK_SEL_WRPLL2 (5<<29) + +/* Pipe clock selection */ +#define PIPE_CLK_SEL_A 0x46140 +#define PIPE_CLK_SEL_B 0x46144 +#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \ + PIPE_CLK_SEL_A, \ + PIPE_CLK_SEL_B) +/* For each pipe, we need to select the corresponding port clock */ +#define PIPE_CLK_SEL_DISABLED (0x0<<29) +#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) + #endif /* _I915_REG_H_ */ -- cgit v1.2.3-59-g8ed1b From ccf1c867ce049bf27a1f7172f1a91820b3ceb6e5 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:34 -0300 Subject: drm/i915: add SSC offsets for SBI access Different registers are identified by their target id and offset. To simplify their programming, they are called as . For example, SSCCTL register accessed through SBI at target id 6 and offset 0c is called SBI_SSCCTL6. Reviewed-by: Rodrigo Vivi Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0cf2bf8da996..5fe8e2dbef2d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4129,6 +4129,21 @@ #define SBI_BUSY (0x1<<0) #define SBI_READY (0x0<<0) +/* SBI offsets */ +#define SBI_SSCDIVINTPHASE6 0x0600 +#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1) +#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1) +#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8) +#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8) +#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) +#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) +#define SBI_SSCCTL 0x020c +#define SBI_SSCCTL6 0x060C +#define SBI_SSCCTL_DISABLE (1<<0) +#define SBI_SSCAUXDIV6 0x0610 +#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) +#define SBI_DBUFF0 0x2a00 + /* LPT PIXCLK_GATE */ #define PIXCLK_GATE 0xC6020 #define PIXCLK_GATE_UNGATE 1<<0 -- cgit v1.2.3-59-g8ed1b From 90e8d31c53890064962f1154405e1034be7ec9a1 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:35 -0300 Subject: drm/i915: add LCPLL control registers Those are used to control the display core clock. v2: change the enable bit setting, spotted by Rodrigo Vivi. Reviewed-by: Rodrigo Vivi Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5fe8e2dbef2d..8c44fe0b4fa4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4180,4 +4180,11 @@ #define PIPE_CLK_SEL_DISABLED (0x0<<29) #define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) +/* LCPLL Control */ +#define LCPLL_CTL 0x130040 +#define LCPLL_PLL_DISABLE (1<<31) +#define LCPLL_PLL_LOCK (1<<30) +#define LCPLL_CD_CLOCK_DISABLE (1<<25) +#define LCPLL_CD2X_CLOCK_DISABLE (1<<23) + #endif /* _I915_REG_H_ */ -- cgit v1.2.3-59-g8ed1b From 4dffc4043a392b4a0f13f033330d31833bbf9f35 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:36 -0300 Subject: drm/i915: add WRPLL clocks The WR PLL can drive the DDI ports at fixed frequencies for HDMI, DVI, DP and FDI. Signed-off-by: Eugeni Dodonov Reviewed-by: Daniel Vetter Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8c44fe0b4fa4..fa4d1d303623 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4157,6 +4157,14 @@ #define SPLL_PLL_FREQ_810MHz (0<<26) #define SPLL_PLL_FREQ_1350MHz (1<<26) +/* WRPLL */ +#define WRPLL_CTL1 0x46040 +#define WRPLL_CTL2 0x46060 +#define WRPLL_PLL_ENABLE (1<<31) +#define WRPLL_PLL_SELECT_SSC (0x01<<28) +#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) +#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) + /* Port clock selection */ #define PORT_CLK_SEL_A 0x46100 #define PORT_CLK_SEL_B 0x46104 -- cgit v1.2.3-59-g8ed1b From 69e94b7e0900293d28ae42fcbd8c5613d4e94f69 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:37 -0300 Subject: drm/i915: add WM_LINETIME registers Watermark line time registers for display low power watermark. v2: improve bit names as suggested by Chris Wilson Signed-off-by: Eugeni Dodonov Reviewed-by: Daniel Vetter Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index fa4d1d303623..9b34da991710 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4195,4 +4195,14 @@ #define LCPLL_CD_CLOCK_DISABLE (1<<25) #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) +/* Pipe WM_LINETIME - watermark line time */ +#define PIPE_WM_LINETIME_A 0x45270 +#define PIPE_WM_LINETIME_B 0x45274 +#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \ + PIPE_WM_LINETIME_A, \ + PIPE_WM_LINETIME_A) +#define PIPE_WM_LINETIME_MASK (0x1ff) +#define PIPE_WM_LINETIME_TIME(x) ((x)) +#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) +#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) #endif /* _I915_REG_H_ */ -- cgit v1.2.3-59-g8ed1b From 96d6e3506739c1ca2ebe578eb157dfd504bfad3a Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 29 Mar 2012 12:32:38 -0300 Subject: drm/i915: add SFUSE_STRAP registers for digital port detection DDIA is detected via the DDI_BUF_CTL registers bit 0, but for DDIB, DDIC and DDID we need to consult SFUSE_STRAP values. Signed-off-by: Eugeni Dodonov Reviewed-by: Daniel Vetter Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9b34da991710..0f6f567ae507 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4205,4 +4205,11 @@ #define PIPE_WM_LINETIME_TIME(x) ((x)) #define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) #define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) + +/* SFUSE_STRAP */ +#define SFUSE_STRAP 0xc2014 +#define SFUSE_STRAP_DDIB_DETECTED (1<<2) +#define SFUSE_STRAP_DDIC_DETECTED (1<<1) +#define SFUSE_STRAP_DDID_DETECTED (1<<0) + #endif /* _I915_REG_H_ */ -- cgit v1.2.3-59-g8ed1b From e2a1e2f0242c363ed80458282d67039c373fbb1f Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 29 Mar 2012 19:11:26 -0700 Subject: drm/i915: ring irq cleanups - gen6 put/get only need one argument rflags and gflags are always the same (see above explanation) - remove a couple redundantly defined IRQs - reordered some lines to make things go in descending order Every ring has its own interrupts, enables, masks, and status bits that are fed into the main interrupt enable/mask/status registers. At one point in time it seemed like a good idea to make our functions support the notion that each interrupt may have a different bit position in the corresponding register (blitter parser error may be bit n in IMR, but bit m in blitter IMR). It turned out though that the HW designers did us a solid on Gen6+ and this unfortunate situation has been avoided. This allows our interrupt code to be cleaned up a bit. I jammed this into one commit because there should be no functional change with this commit, and staging it into multiple commits was unnecessarily artificial IMO. CC: Chris Wilson CC: Jesse Barnes Signed-off-by: Ben Widawsky Reviewed-by: Chris Wilson [danvet: - fixed up merged conflict with vlv changes. - added GEN6 to GT blitter bit, we only use it on gen6+. - added a comment to both ring irq bits and GT irq bits that on gen6+ these alias. - added comment that GT_BSD_USER_INTERRUPT is ilk-only. - I've got confused a bit that we still use GT_USER_INTERRUPT on ivb for the render ring - but this goes back to ilk where we have only gt interrupt bits and so we be equally confusing if changed.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 20 +++++++++--------- drivers/gpu/drm/i915/i915_reg.h | 12 +++++++---- drivers/gpu/drm/i915/intel_ringbuffer.c | 36 +++++++++++---------------------- 3 files changed, 30 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 01fb65097977..06286a270b07 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -462,7 +462,7 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) notify_ring(dev, &dev_priv->ring[RCS]); if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[VCS]); - if (gt_iir & GT_BLT_USER_INTERRUPT) + if (gt_iir & GT_GEN6_BLT_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[BCS]); if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | @@ -620,9 +620,9 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) notify_ring(dev, &dev_priv->ring[RCS]); - if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT) + if (gt_iir & GEN6_BSD_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[VCS]); - if (gt_iir & GT_BLT_USER_INTERRUPT) + if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[BCS]); if (de_iir & DE_GSE_IVB) @@ -688,7 +688,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) atomic_inc(&dev_priv->irq_received); if (IS_GEN6(dev)) - bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT; + bsd_usr_interrupt = GEN6_BSD_USER_INTERRUPT; /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); @@ -722,7 +722,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) notify_ring(dev, &dev_priv->ring[RCS]); if (gt_iir & bsd_usr_interrupt) notify_ring(dev, &dev_priv->ring[VCS]); - if (gt_iir & GT_BLT_USER_INTERRUPT) + if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[BCS]); if (de_iir & DE_GSE) @@ -2081,8 +2081,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev) if (IS_GEN6(dev)) render_irqs = GT_USER_INTERRUPT | - GT_GEN6_BSD_USER_INTERRUPT | - GT_BLT_USER_INTERRUPT; + GEN6_BSD_USER_INTERRUPT | + GEN6_BLITTER_USER_INTERRUPT; else render_irqs = GT_USER_INTERRUPT | @@ -2154,8 +2154,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT | - GT_BLT_USER_INTERRUPT; + render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | + GEN6_BLITTER_USER_INTERRUPT; I915_WRITE(GTIER, render_irqs); POSTING_READ(GTIER); @@ -2218,7 +2218,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev) render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT | GT_GEN6_BLT_CS_ERROR_INTERRUPT | - GT_BLT_USER_INTERRUPT | + GT_GEN6_BLT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT | GT_GEN6_BSD_CS_ERROR_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT | diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0f6f567ae507..56d4db8026e3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -643,7 +643,9 @@ #define CACHE_MODE_1 0x7004 /* IVB+ */ #define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) -/* GEN6 interrupt control */ +/* GEN6 interrupt control + * Note that the per-ring interrupt bits do alias with the global interrupt bits + * in GTIMR. */ #define GEN6_RENDER_HWSTAM 0x2098 #define GEN6_RENDER_IMR 0x20a8 #define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) @@ -3203,13 +3205,15 @@ #define DEIIR 0x44008 #define DEIER 0x4400c -/* GT interrupt */ +/* GT interrupt. + * Note that for gen6+ the ring-specific interrupt bits do alias with the + * corresponding bits in the per-ring interrupt control registers. */ #define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26) #define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25) -#define GT_BLT_USER_INTERRUPT (1 << 22) +#define GT_GEN6_BLT_USER_INTERRUPT (1 << 22) #define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15) #define GT_GEN6_BSD_USER_INTERRUPT (1 << 12) -#define GT_BSD_USER_INTERRUPT (1 << 5) +#define GT_BSD_USER_INTERRUPT (1 << 5) /* ilk only */ #define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5) #define GT_PIPE_NOTIFY (1 << 4) #define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 75081f146390..c7eea7fad16f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -788,7 +788,7 @@ ring_add_request(struct intel_ring_buffer *ring, } static bool -gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) +gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 mask) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -803,9 +803,9 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) spin_lock(&ring->irq_lock); if (ring->irq_refcount++ == 0) { - ring->irq_mask &= ~rflag; + ring->irq_mask &= ~mask; I915_WRITE_IMR(ring, ring->irq_mask); - ironlake_enable_irq(dev_priv, gflag); + ironlake_enable_irq(dev_priv, mask); } spin_unlock(&ring->irq_lock); @@ -813,16 +813,16 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) } static void -gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) +gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 mask) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; spin_lock(&ring->irq_lock); if (--ring->irq_refcount == 0) { - ring->irq_mask |= rflag; + ring->irq_mask |= mask; I915_WRITE_IMR(ring, ring->irq_mask); - ironlake_disable_irq(dev_priv, gflag); + ironlake_disable_irq(dev_priv, mask); } spin_unlock(&ring->irq_lock); @@ -1376,33 +1376,25 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, static bool gen6_render_ring_get_irq(struct intel_ring_buffer *ring) { - return gen6_ring_get_irq(ring, - GT_USER_INTERRUPT, - GEN6_RENDER_USER_INTERRUPT); + return gen6_ring_get_irq(ring, GT_USER_INTERRUPT); } static void gen6_render_ring_put_irq(struct intel_ring_buffer *ring) { - return gen6_ring_put_irq(ring, - GT_USER_INTERRUPT, - GEN6_RENDER_USER_INTERRUPT); + return gen6_ring_put_irq(ring, GT_USER_INTERRUPT); } static bool gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) { - return gen6_ring_get_irq(ring, - GT_GEN6_BSD_USER_INTERRUPT, - GEN6_BSD_USER_INTERRUPT); + return gen6_ring_get_irq(ring, GEN6_BSD_USER_INTERRUPT); } static void gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) { - return gen6_ring_put_irq(ring, - GT_GEN6_BSD_USER_INTERRUPT, - GEN6_BSD_USER_INTERRUPT); + return gen6_ring_put_irq(ring, GEN6_BSD_USER_INTERRUPT); } /* ring buffer for Video Codec for Gen6+ */ @@ -1431,17 +1423,13 @@ static const struct intel_ring_buffer gen6_bsd_ring = { static bool blt_ring_get_irq(struct intel_ring_buffer *ring) { - return gen6_ring_get_irq(ring, - GT_BLT_USER_INTERRUPT, - GEN6_BLITTER_USER_INTERRUPT); + return gen6_ring_get_irq(ring, GEN6_BLITTER_USER_INTERRUPT); } static void blt_ring_put_irq(struct intel_ring_buffer *ring) { - gen6_ring_put_irq(ring, - GT_BLT_USER_INTERRUPT, - GEN6_BLITTER_USER_INTERRUPT); + gen6_ring_put_irq(ring, GEN6_BLITTER_USER_INTERRUPT); } static int blt_ring_flush(struct intel_ring_buffer *ring, -- cgit v1.2.3-59-g8ed1b From 25c063004a6d0048c4dece74db4da117b9ae623e Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 29 Mar 2012 19:11:27 -0700 Subject: drm/i915: open code gen6+ ring irqs We can now open-code the get/put irq functions as they were just abstracting single register definitions. It would be nice to merge this in with the IRQ handling code... but that is too much work for me at present. In addition I could probably collapse this in to a lot of the Ironlake stuff, but I don't think it's worth the potential regressions. This patch itself should not effect functionality. CC: Jesse Barnes Signed-off-by: Ben Widawsky Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 62 +++++++++------------------------ drivers/gpu/drm/i915/intel_ringbuffer.h | 1 + 2 files changed, 17 insertions(+), 46 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index c7eea7fad16f..98ac5c0ca37a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -788,10 +788,11 @@ ring_add_request(struct intel_ring_buffer *ring, } static bool -gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 mask) +gen6_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + u32 mask = ring->irq_enable; if (!dev->irq_enabled) return false; @@ -813,10 +814,11 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 mask) } static void -gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 mask) +gen6_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + u32 mask = ring->irq_enable; spin_lock(&ring->irq_lock); if (--ring->irq_refcount == 0) { @@ -1373,30 +1375,6 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, return 0; } -static bool -gen6_render_ring_get_irq(struct intel_ring_buffer *ring) -{ - return gen6_ring_get_irq(ring, GT_USER_INTERRUPT); -} - -static void -gen6_render_ring_put_irq(struct intel_ring_buffer *ring) -{ - return gen6_ring_put_irq(ring, GT_USER_INTERRUPT); -} - -static bool -gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) -{ - return gen6_ring_get_irq(ring, GEN6_BSD_USER_INTERRUPT); -} - -static void -gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) -{ - return gen6_ring_put_irq(ring, GEN6_BSD_USER_INTERRUPT); -} - /* ring buffer for Video Codec for Gen6+ */ static const struct intel_ring_buffer gen6_bsd_ring = { .name = "gen6 bsd ring", @@ -1408,8 +1386,9 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .flush = gen6_ring_flush, .add_request = gen6_add_request, .get_seqno = gen6_ring_get_seqno, - .irq_get = gen6_bsd_ring_get_irq, - .irq_put = gen6_bsd_ring_put_irq, + .irq_enable = GEN6_BSD_USER_INTERRUPT, + .irq_get = gen6_ring_get_irq, + .irq_put = gen6_ring_put_irq, .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, .sync_to = gen6_bsd_ring_sync_to, .semaphore_register = {MI_SEMAPHORE_SYNC_VR, @@ -1420,18 +1399,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = { /* Blitter support (SandyBridge+) */ -static bool -blt_ring_get_irq(struct intel_ring_buffer *ring) -{ - return gen6_ring_get_irq(ring, GEN6_BLITTER_USER_INTERRUPT); -} - -static void -blt_ring_put_irq(struct intel_ring_buffer *ring) -{ - gen6_ring_put_irq(ring, GEN6_BLITTER_USER_INTERRUPT); -} - static int blt_ring_flush(struct intel_ring_buffer *ring, u32 invalidate, u32 flush) { @@ -1463,8 +1430,9 @@ static const struct intel_ring_buffer gen6_blt_ring = { .flush = blt_ring_flush, .add_request = gen6_add_request, .get_seqno = gen6_ring_get_seqno, - .irq_get = blt_ring_get_irq, - .irq_put = blt_ring_put_irq, + .irq_get = gen6_ring_get_irq, + .irq_put = gen6_ring_put_irq, + .irq_enable = GEN6_BLITTER_USER_INTERRUPT, .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, .sync_to = gen6_blt_ring_sync_to, .semaphore_register = {MI_SEMAPHORE_SYNC_BR, @@ -1482,8 +1450,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; ring->flush = gen6_render_ring_flush; - ring->irq_get = gen6_render_ring_get_irq; - ring->irq_put = gen6_render_ring_put_irq; + ring->irq_get = gen6_ring_get_irq; + ring->irq_put = gen6_ring_put_irq; + ring->irq_enable = GT_USER_INTERRUPT; ring->get_seqno = gen6_ring_get_seqno; } else if (IS_GEN5(dev)) { ring->add_request = pc_render_add_request; @@ -1506,8 +1475,9 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) *ring = render_ring; if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; - ring->irq_get = gen6_render_ring_get_irq; - ring->irq_put = gen6_render_ring_put_irq; + ring->irq_get = gen6_ring_get_irq; + ring->irq_put = gen6_ring_put_irq; + ring->irq_enable = GT_USER_INTERRUPT; } else if (IS_GEN5(dev)) { ring->add_request = pc_render_add_request; ring->get_seqno = pc_render_get_seqno; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index bc0365b8fa4d..3488a5a127db 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -59,6 +59,7 @@ struct intel_ring_buffer { spinlock_t irq_lock; u32 irq_refcount; u32 irq_mask; + u32 irq_enable; /* IRQs enabled for this ring */ u32 irq_seqno; /* last seq seem at irq time */ u32 trace_irq_seqno; u32 waiting_seqno; -- cgit v1.2.3-59-g8ed1b From fad2596acb5e4e028266e820fd8b47e88030a4ca Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 30 Mar 2012 20:24:33 +0200 Subject: drm/i915: rip out old HWSTAM missed irq WA for vlv This got copy-pasted from an older version. The newer kinds of workarounds don't need this anymore. Shame on me for not noticing when picking up the vlv irq patch. Reviewed-by: Jesse Barnes Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 06286a270b07..8496fa55122a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1998,18 +1998,6 @@ static void valleyview_irq_preinstall(struct drm_device *dev) I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); I915_WRITE(RING_IMR(BLT_RING_BASE), 0); - if (IS_GEN6(dev) || IS_GEN7(dev)) { - /* Workaround stalls observed on Sandy Bridge GPUs by - * making the blitter command streamer generate a - * write to the Hardware Status Page for - * MI_USER_INTERRUPT. This appears to serialize the - * previous seqno write out before the interrupt - * happens. - */ - I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT); - I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT); - } - /* and GT */ I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIIR, I915_READ(GTIIR)); -- cgit v1.2.3-59-g8ed1b From 901781b997570e55af950348d7cd5cef64fb6d7c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 30 Mar 2012 20:24:34 +0200 Subject: drm/i915: use render gen to switch ring irq functions Top-level interrupt bits are usually found in the display block. It therefore makes sense to use HAS_PCH_SPLIT in i915_irq.c But the irq stuff in intel_ring.c only concerns itself with render core/gt-level interrupt sources. It therefore makes more sense to switch based on gpu gen. Kills a vlv special case. Reviewed-by: Jesse Barnes Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 98ac5c0ca37a..465a7da3b30d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -687,7 +687,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring) spin_lock(&ring->irq_lock); if (ring->irq_refcount++ == 0) { - if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev)) + if (INTEL_INFO(dev)->gen >= 5) ironlake_enable_irq(dev_priv, GT_PIPE_NOTIFY | GT_USER_INTERRUPT); else @@ -706,7 +706,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring) spin_lock(&ring->irq_lock); if (--ring->irq_refcount == 0) { - if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev)) + if (INTEL_INFO(dev)->gen >= 5) ironlake_disable_irq(dev_priv, GT_USER_INTERRUPT | GT_PIPE_NOTIFY); -- cgit v1.2.3-59-g8ed1b From e7b4c6b122443254bc7cab0f17fd67871dbe0a19 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 30 Mar 2012 20:24:35 +0200 Subject: drm/i915: extract gt interrupt handler vlv, ivb and snb all share the gen6+ gt irq handling. 3 copies of the same stuff is a bit much, so extract it into a little helper. Now ilk has a different gt irq handling than snb, but shares the same irq handler (due to the similar display block). So also extract the ilk gt irq handling to clearly separate these two things. Nice side effect of this is that we can complete Ben Widawsky's gen6+ irq bit #define cleanup and call the render irq also with the GEN6 alias. Beforehand that code was shared with ilk, and neither option really made much sense. As a bonus this enables the error interrupt handling lifted from the vlv code on snb and ivb, too. Reviewed-by: Jesse Barnes Antagonized-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 66 +++++++++++++++++++++++------------------ 1 file changed, 37 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8496fa55122a..6d76ee54278b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -430,6 +430,27 @@ static void gen6_pm_rps_work(struct work_struct *work) mutex_unlock(&dev_priv->dev->struct_mutex); } +static void snb_gt_irq_handler(struct drm_device *dev, + struct drm_i915_private *dev_priv, + u32 gt_iir) +{ + + if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | + GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) + notify_ring(dev, &dev_priv->ring[RCS]); + if (gt_iir & GEN6_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[VCS]); + if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[BCS]); + + if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | + GT_GEN6_BSD_CS_ERROR_INTERRUPT | + GT_RENDER_CS_ERROR_INTERRUPT)) { + DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); + i915_handle_error(dev, false); + } +} + static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; @@ -458,19 +479,7 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) ret = IRQ_HANDLED; - if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) - notify_ring(dev, &dev_priv->ring[RCS]); - if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VCS]); - if (gt_iir & GT_GEN6_BLT_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[BCS]); - - if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | - GT_GEN6_BSD_CS_ERROR_INTERRUPT | - GT_RENDER_CS_ERROR_INTERRUPT)) { - DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); - i915_handle_error(dev, false); - } + snb_gt_irq_handler(dev, dev_priv, gt_iir); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); for_each_pipe(pipe) { @@ -618,12 +627,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) READ_BREADCRUMB(dev_priv); } - if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) - notify_ring(dev, &dev_priv->ring[RCS]); - if (gt_iir & GEN6_BSD_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VCS]); - if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[BCS]); + snb_gt_irq_handler(dev, dev_priv, gt_iir); if (de_iir & DE_GSE_IVB) intel_opregion_gse_intr(dev); @@ -675,6 +679,16 @@ done: return ret; } +static void ilk_gt_irq_handler(struct drm_device *dev, + struct drm_i915_private *dev_priv, + u32 gt_iir) +{ + if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) + notify_ring(dev, &dev_priv->ring[RCS]); + if (gt_iir & GT_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[VCS]); +} + static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; @@ -683,13 +697,9 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; u32 hotplug_mask; struct drm_i915_master_private *master_priv; - u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; atomic_inc(&dev_priv->irq_received); - if (IS_GEN6(dev)) - bsd_usr_interrupt = GEN6_BSD_USER_INTERRUPT; - /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); @@ -718,12 +728,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) READ_BREADCRUMB(dev_priv); } - if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) - notify_ring(dev, &dev_priv->ring[RCS]); - if (gt_iir & bsd_usr_interrupt) - notify_ring(dev, &dev_priv->ring[VCS]); - if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[BCS]); + if (IS_GEN5(dev)) + ilk_gt_irq_handler(dev, dev_priv, gt_iir); + else + snb_gt_irq_handler(dev, dev_priv, gt_iir); if (de_iir & DE_GSE) intel_opregion_gse_intr(dev); -- cgit v1.2.3-59-g8ed1b From 26394d9251879231b85e6c8cf899fa43e75c68f1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 26 Mar 2012 21:33:18 +0200 Subject: drm/i915: refuse to load on gen6+ without kms Spurred by an irc discussion, let's start to clear up which parts of our kms + ums/gem + ums/dri1 + vbios/dri1 kernel driver pieces userspace in the wild actually uses. The idea is that we introduce checks at entry-points (module load time, ioctls, ...) first and then reap any obviously dead code in a second step. As a first step refuse to load without kms on chips where userspace never supported ums. Now upstream hasn't supported ums on ilk, ever. But RHEL had the great idea to backport the kms support to their ums driver. Cc: Dave Airlie Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index a9caf62b5dd2..4636391ded38 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1956,9 +1956,17 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base, int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv; + struct intel_device_info *info; int ret = 0, mmio_bar; uint32_t aperture_size; + info = (struct intel_device_info *) flags; + + /* Refuse to load on gen6+ without kms enabled. */ + if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + /* i915 has 4 more counters */ dev->counters += 4; dev->types[6] = _DRM_STAT_IRQ; @@ -1972,7 +1980,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev->dev_private = (void *)dev_priv; dev_priv->dev = dev; - dev_priv->info = (struct intel_device_info *) flags; + dev_priv->info = info; if (i915_get_bridge_dev(dev)) { ret = -EIO; -- cgit v1.2.3-59-g8ed1b From f534bc0b2212f3dc317bdaea5dfff13526f2ac17 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 26 Mar 2012 22:37:04 +0200 Subject: drm/i915: disallow gem init ioctl on ilk Ums is already disabled, but on ilk we can additionally disable gem initialization when using user mode setting. Upstream never support ilk without kernel modesetting and not even the RHEL ilk ums backport needs gem - that driver is based on xf86-video-intel version 2.2, which is pre-gem. Reviewed-by: Adam Jackson Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 54ca125a405d..759daa491ab5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -129,6 +129,10 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data, (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) return -EINVAL; + /* GEM with user mode setting was never supported on ilk and later. */ + if (INTEL_INFO(dev)->gen >= 5) + return -ENODEV; + mutex_lock(&dev->struct_mutex); i915_gem_init_global_gtt(dev, args->gtt_start, args->gtt_end, args->gtt_end); -- cgit v1.2.3-59-g8ed1b From a0b1c7a5197293d6206b245b45edc3f508aadab6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 30 Sep 2011 22:56:41 +0100 Subject: drm/i915/sdvo: Include YRPB as an additional TV output type Reported-and-tested-by: Bo Wang < bo.b.wang@intel.com> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=36997 Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sdvo.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 830c8b5d90b8..6898145b44ce 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -41,7 +41,7 @@ #define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) #define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) #define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1) -#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0) +#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0) #define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ SDVO_TV_MASK) @@ -1344,8 +1344,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) return connector_status_unknown; /* add 30ms delay when the output type might be TV */ - if (intel_sdvo->caps.output_flags & - (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0)) + if (intel_sdvo->caps.output_flags & SDVO_TV_MASK) mdelay(30); if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) @@ -2194,6 +2193,10 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0)) return false; + if (flags & SDVO_OUTPUT_YPRPB0) + if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0)) + return false; + if (flags & SDVO_OUTPUT_RGB0) if (!intel_sdvo_analog_init(intel_sdvo, 0)) return false; -- cgit v1.2.3-59-g8ed1b From 9d2f41fa0fd9a3e086d3c072b0113b6b9ebae06b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 2 Apr 2012 21:41:45 +0200 Subject: drm/i915: dump the DMA fetch addr register on pre-gen6 It exists way back to gen2, bug got moved around on gen4 a bit. Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_irq.c | 3 ++- drivers/gpu/drm/i915/i915_reg.h | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d226f2f2f7bc..2a1f0d7de0f2 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -757,8 +757,8 @@ static void i915_ring_error_state(struct seq_file *m, if (INTEL_INFO(dev)->gen >= 4) seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); + seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); if (INTEL_INFO(dev)->gen >= 6) { - seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); seq_printf(m, " SYNC_0: 0x%08x\n", error->semaphore_mboxes[ring][0]); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6d76ee54278b..febddc2952fb 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1029,7 +1029,6 @@ static void i915_record_ring_state(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_INFO(dev)->gen >= 6) { - error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); error->semaphore_mboxes[ring->id][0] = I915_READ(RING_SYNC_0(ring->mmio_base)); @@ -1038,6 +1037,7 @@ static void i915_record_ring_state(struct drm_device *dev, } if (INTEL_INFO(dev)->gen >= 4) { + error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); @@ -1047,6 +1047,7 @@ static void i915_record_ring_state(struct drm_device *dev, error->bbaddr = I915_READ64(BB_ADDR); } } else { + error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); error->ipeir[ring->id] = I915_READ(IPEIR); error->ipehr[ring->id] = I915_READ(IPEHR); error->instdone[ring->id] = I915_READ(INSTDONE); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 56d4db8026e3..cb554444e862 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -474,6 +474,7 @@ #define INSTDONE 0x02090 #define NOPID 0x02094 #define HWSTAM 0x02098 +#define DMA_FADD_I8XX 0x020d0 #define ERROR_GEN6 0x040a0 -- cgit v1.2.3-59-g8ed1b From bc0daf488fc4a255733ad19fe0de995f4a4d745f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 1 Apr 2012 13:16:49 +0200 Subject: drm/i915: make quirks more verbose And add informational dmesg output where it does not yet exist. In case a quirk matches too much, this information is crucial for debugging such a bug report. Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_crt.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 4 +++- drivers/gpu/drm/i915/intel_lvds.c | 4 ++-- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index d54d2327a1b3..70b0f1abf149 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -545,7 +545,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id) { - DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident); + DRM_INFO("Skipping CRT initialization for %s\n", id->ident); return 1; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d86362a44590..2bd08ef056d9 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9362,7 +9362,7 @@ static void quirk_pipea_force(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->quirks |= QUIRK_PIPEA_FORCE; - DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); + DRM_INFO("applying pipe a force quirk\n"); } /* @@ -9372,6 +9372,7 @@ static void quirk_ssc_force_disable(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; + DRM_INFO("applying lvds SSC disable quirk\n"); } /* @@ -9382,6 +9383,7 @@ static void quirk_invert_brightness(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; + DRM_INFO("applying inverted panel brightness quirk\n"); } struct intel_quirk { diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 4f92a11fc29a..a96d9a1399f0 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -474,7 +474,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector) static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) { - DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident); + DRM_INFO("Skipping forced modeset for %s\n", id->ident); return 1; } @@ -622,7 +622,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = { static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) { - DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident); + DRM_INFO("Skipping LVDS initialization for %s\n", id->ident); return 1; } -- cgit v1.2.3-59-g8ed1b From 618563e3945b9d0864154bab3c607865b557cecc Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 1 Apr 2012 13:38:50 +0200 Subject: drm/i915: Add a dual link lvds quirk for MacBook Pro 8,2 When booting with EFI, Apple botched this one up. v2: Switch the quirk dmesg output to DRM_INFO. v3: Actually git add the new things ... Tested-by: Austin Lund Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=42842 Acked-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2bd08ef056d9..6ec81b43ffc0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -24,6 +24,7 @@ * Eric Anholt */ +#include #include #include #include @@ -418,6 +419,24 @@ static void vlv_init_dpio(struct drm_device *dev) POSTING_READ(DPIO_CTL); } +static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) +{ + DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident); + return 1; +} + +static const struct dmi_system_id intel_dual_link_lvds[] = { + { + .callback = intel_dual_link_lvds_callback, + .ident = "Apple MacBook Pro (Core i5/i7 Series)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), + }, + }, + { } /* terminating entry */ +}; + static bool is_dual_link_lvds(struct drm_i915_private *dev_priv, unsigned int reg) { @@ -427,6 +446,9 @@ static bool is_dual_link_lvds(struct drm_i915_private *dev_priv, if (i915_lvds_channel_mode > 0) return i915_lvds_channel_mode == 2; + if (dmi_check_system(intel_dual_link_lvds)) + return true; + if (dev_priv->lvds_val) val = dev_priv->lvds_val; else { -- cgit v1.2.3-59-g8ed1b From ec34a01de31128e5c08e5f05c47f4a787f45a33c Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Tue, 3 Apr 2012 23:03:00 -0700 Subject: drm/i915: VCS is not the last ring I made a mistake, please forgive me. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48254 Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 2a1f0d7de0f2..7e009950ac39 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -742,7 +742,7 @@ static void i915_ring_error_state(struct seq_file *m, struct drm_i915_error_state *error, unsigned ring) { - BUG_ON(ring > VCS); /* shut up confused gcc */ + BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ seq_printf(m, "%s command stream:\n", ring_str(ring)); seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); -- cgit v1.2.3-59-g8ed1b From 2099810f903caa1920f3ef6014fb7f36e4786490 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 3 Apr 2012 11:53:05 +0100 Subject: drm/radeon: enable pci bus mastering after card is initialised (v2) This closes a race seen with kexec where we enable PCI bus mastering but the card has been reinitialised fully yet. This was previously fixed by a patch from Jerome, but this should close the race completely. v2: add SI support as suggested by Alex. Reported-and-tested-by: Markus Trippelsdorf Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r100.c | 4 ++++ drivers/gpu/drm/radeon/r600.c | 3 +++ drivers/gpu/drm/radeon/radeon_device.c | 1 - drivers/gpu/drm/radeon/radeon_kms.c | 2 -- drivers/gpu/drm/radeon/si.c | 2 ++ 5 files changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 81801c176aa5..e11df778e194 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -1180,6 +1180,10 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) WREG32(RADEON_CP_RB_WPTR_DELAY, 0); WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); + + /* at this point everything should be setup correctly to enable master */ + pci_set_master(rdev->pdev); + radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); if (r) { diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 391bd2636a80..4added1c7ee9 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3047,6 +3047,9 @@ int r600_irq_init(struct radeon_device *rdev) else r600_disable_interrupt_state(rdev); + /* at this point everything should be setup correctly to enable master */ + pci_set_master(rdev->pdev); + /* enable irqs */ r600_enable_interrupts(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index ea7df16e2f84..0fb4f8993cae 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -955,7 +955,6 @@ int radeon_resume_kms(struct drm_device *dev) console_unlock(); return -1; } - pci_set_master(dev->pdev); /* resume AGP if in use */ radeon_agp_resume(rdev); radeon_resume(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 3c2628b14d56..f1016a5820d1 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -57,8 +57,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) } dev->dev_private = (void *)rdev; - pci_set_master(dev->pdev); - /* update BUS flag */ if (drm_pci_device_is_agp(dev)) { flags |= RADEON_IS_AGP; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index ac7a199ffece..14919e1539fa 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3217,6 +3217,8 @@ static int si_irq_init(struct radeon_device *rdev) /* force the active interrupt state to all disabled */ si_disable_interrupt_state(rdev); + pci_set_master(rdev->pdev); + /* enable irqs */ si_enable_interrupts(rdev); -- cgit v1.2.3-59-g8ed1b From 6a7068b4ef17dfb9de3191321f1adc91fa1659ca Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 3 Apr 2012 16:23:41 +0100 Subject: drm/radeon/kms: attempt to avoid copying data twice on coherent cards. (v3) On coherent systems (not-AGP) the IB should be in cached memory so should be just as fast, so we can avoid copying to temporary pages and just use it directly. provides minor speedups on rv530: gears ~1820->1860, ipers: 29.9->30.6, but always good to use less CPU if we can. v3: cleanup unneeded bits. Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_cs.c | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 5cac83278338..e7b0b5d51bc3 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -278,11 +278,16 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) p->chunks[p->chunk_ib_idx].length_dw); return -EINVAL; } - p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); - p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL || - p->chunks[p->chunk_ib_idx].kpage[1] == NULL) - return -ENOMEM; + if ((p->rdev->flags & RADEON_IS_AGP)) { + p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); + p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL || + p->chunks[p->chunk_ib_idx].kpage[1] == NULL) { + kfree(p->chunks[i].kpage[0]); + kfree(p->chunks[i].kpage[1]); + return -ENOMEM; + } + } p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1; p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1; p->chunks[p->chunk_ib_idx].last_copied_page = -1; @@ -323,8 +328,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) kfree(parser->relocs_ptr); for (i = 0; i < parser->nchunks; i++) { kfree(parser->chunks[i].kdata); - kfree(parser->chunks[i].kpage[0]); - kfree(parser->chunks[i].kpage[1]); + if ((parser->rdev->flags & RADEON_IS_AGP)) { + kfree(parser->chunks[i].kpage[0]); + kfree(parser->chunks[i].kpage[1]); + } } kfree(parser->chunks); kfree(parser->chunks_array); @@ -573,6 +580,7 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx) struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; int i; int size = PAGE_SIZE; + bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true; for (i = ibc->last_copied_page + 1; i < pg_idx; i++) { if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)), @@ -583,14 +591,16 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx) } } - new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1; - if (pg_idx == ibc->last_page_index) { size = (ibc->length_dw * 4) % PAGE_SIZE; - if (size == 0) - size = PAGE_SIZE; + if (size == 0) + size = PAGE_SIZE; } + new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1; + if (copy1) + ibc->kpage[new_page] = p->ib->ptr + (pg_idx * (PAGE_SIZE / 4)); + if (DRM_COPY_FROM_USER(ibc->kpage[new_page], ibc->user_ptr + (pg_idx * PAGE_SIZE), size)) { @@ -598,8 +608,9 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx) return 0; } - /* copy to IB here */ - memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size); + /* copy to IB for non single case */ + if (!copy1) + memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size); ibc->last_copied_page = pg_idx; ibc->kpage_idx[new_page] = pg_idx; -- cgit v1.2.3-59-g8ed1b From 971ca4717830c03a50e1ad9993c85601a0496de7 Mon Sep 17 00:00:00 2001 From: Santosh Nayak Date: Thu, 5 Apr 2012 11:31:08 +0530 Subject: agp: Remove 'break' after 'return' statement. 'break' is unnecessary after 'return' statement. Remove all such 'break' as clean up. Signed-off-by: Santosh Nayak Signed-off-by: Dave Airlie --- drivers/char/agp/generic.c | 2 -- drivers/char/agp/sgi-agp.c | 1 - 2 files changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 17e05d1076b3..38022ea7eeb4 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -1010,7 +1010,6 @@ int agp_generic_free_gatt_table(struct agp_bridge_data *bridge) case LVL2_APER_SIZE: /* The generic routines can't deal with 2 level gatt's */ return -EINVAL; - break; default: page_order = 0; break; @@ -1077,7 +1076,6 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) case LVL2_APER_SIZE: /* The generic routines can't deal with 2 level gatt's */ return -EINVAL; - break; default: num_entries = 0; break; diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c index ffa888cd1c88..192000377737 100644 --- a/drivers/char/agp/sgi-agp.c +++ b/drivers/char/agp/sgi-agp.c @@ -158,7 +158,6 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start, break; case LVL2_APER_SIZE: return -EINVAL; - break; default: num_entries = 0; break; -- cgit v1.2.3-59-g8ed1b From ae85226ebe474c9ecfc257191edca184b70ffbc2 Mon Sep 17 00:00:00 2001 From: Santosh Nayak Date: Thu, 5 Apr 2012 11:31:44 +0530 Subject: agp: Use u32 __iomem annotation to silence sparse warning. Replace "void *" by "u32 __iomem *" to silence sparse warning. Signed-off-by: Santosh Nayak Signed-off-by: Dave Airlie --- drivers/char/agp/generic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 38022ea7eeb4..a0df182f6f7d 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -958,7 +958,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) if (set_memory_uc((unsigned long)table, 1 << page_order)) printk(KERN_WARNING "Could not set GATT table memory to UC!\n"); - bridge->gatt_table = (void *)table; + bridge->gatt_table = (u32 __iomem *)table; #else bridge->gatt_table = ioremap_nocache(virt_to_phys(table), (PAGE_SIZE * (1 << page_order))); -- cgit v1.2.3-59-g8ed1b From cce66a283e36e7479774de47ae9f33f7db2b8fcf Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Tue, 27 Mar 2012 18:59:38 -0700 Subject: drm/i915: add rc6 residency times to debugfs RC6 residency should be in intervals of 1.28us, and the counter wraps. Here is an example using awk to get the various RC6 and RC6+ residency times in seconds, since boot. cat /sys/kernel/debug/dri/0/i915_drpc_info | grep residency | awk -F':' -F' ' '{print $5 * 1.28 / 1000000}' This is primarily for QA, but has other applications as well. An upcoming patch to add interfaces should be more interesting to application developers. v2: move comment to the correct place v3: display with %u instead of %d, for Ouping CC: Ouping Zhang Reviewed-by: Eugeni Dodonov Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 11 +++++++++++ drivers/gpu/drm/i915/i915_reg.h | 5 +++++ 2 files changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 967fb928c577..97c963082d4d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1171,6 +1171,17 @@ static int gen6_drpc_info(struct seq_file *m) seq_printf(m, "Core Power Down: %s\n", yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); + + /* Not exactly sure what this is */ + seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", + I915_READ(GEN6_GT_GFX_RC6_LOCKED)); + seq_printf(m, "RC6 residency since boot: %u\n", + I915_READ(GEN6_GT_GFX_RC6)); + seq_printf(m, "RC6+ residency since boot: %u\n", + I915_READ(GEN6_GT_GFX_RC6p)); + seq_printf(m, "RC6++ residency since boot: %u\n", + I915_READ(GEN6_GT_GFX_RC6pp)); + return 0; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6924f44a88df..972321fda4d8 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3970,6 +3970,11 @@ GEN6_PM_RP_DOWN_THRESHOLD | \ GEN6_PM_RP_DOWN_TIMEOUT) +#define GEN6_GT_GFX_RC6_LOCKED 0x138104 +#define GEN6_GT_GFX_RC6 0x138108 +#define GEN6_GT_GFX_RC6p 0x13810C +#define GEN6_GT_GFX_RC6pp 0x138110 + #define GEN6_PCODE_MAILBOX 0x138124 #define GEN6_PCODE_READY (1<<31) #define GEN6_READ_OC_PARAMS 0xc -- cgit v1.2.3-59-g8ed1b From 9a5a53b3923d6e8cc4d3b352301059ac6b5c8f68 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 22 Mar 2012 15:10:00 +0000 Subject: drm/i915: Reorganise rules for get_fence/put_fence By simplifying the rules to calling get_fence when writing to the through the GTT in a tiled manner, and calling put_fence before writing to the object through the GTT in a linear manner, the code becomes clearer and there is less chance of making a mistake. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter [danvet: fixed up conflict with ppgtt code and spelling in a new comment.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 6 ++++-- drivers/gpu/drm/i915/i915_gem.c | 14 +++++++------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 15 +++++---------- drivers/gpu/drm/i915/intel_display.c | 10 ++++------ 4 files changed, 20 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 92e496afc6f4..9bc64d208334 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1256,13 +1256,15 @@ int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, struct intel_ring_buffer *pipelined); int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); -static inline void +static inline bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) { if (obj->fence_reg != I915_FENCE_REG_NONE) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; dev_priv->fence_regs[obj->fence_reg].pin_count++; - } + return true; + } else + return false; } static inline void diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b851bd34ca18..ad717511edca 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1078,10 +1078,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (!obj->has_global_gtt_mapping) i915_gem_gtt_bind_object(obj, obj->cache_level); - if (obj->tiling_mode == I915_TILING_NONE) - ret = i915_gem_object_put_fence(obj); - else - ret = i915_gem_object_get_fence(obj, NULL); + ret = i915_gem_object_get_fence(obj, NULL); if (ret) goto unlock; @@ -2395,19 +2392,19 @@ i915_find_fence_reg(struct drm_device *dev, } /** - * i915_gem_object_get_fence - set up a fence reg for an object + * i915_gem_object_get_fence - set up fencing for an object * @obj: object to map through a fence reg * @pipelined: ring on which to queue the change, or NULL for CPU access - * @interruptible: must we wait uninterruptibly for the register to retire? * * When mapping objects through the GTT, userspace wants to be able to write * to them without having to worry about swizzling if the object is tiled. - * * This function walks the fence regs looking for a free one for @obj, * stealing one if it can't find any. * * It then sets up the reg based on the object's properties: address, pitch * and tiling format. + * + * For an untiled surface, this removes any existing fence. */ int i915_gem_object_get_fence(struct drm_i915_gem_object *obj, @@ -2418,6 +2415,9 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *reg; int ret; + if (obj->tiling_mode == I915_TILING_NONE) + return i915_gem_object_put_fence(obj); + /* XXX disable pipelining. There are bugs. Shocking. */ pipelined = NULL; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 254e2f6ac4f0..56d10017d045 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -530,18 +530,13 @@ pin_and_fence_object(struct drm_i915_gem_object *obj, if (has_fenced_gpu_access) { if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { - if (obj->tiling_mode) { - ret = i915_gem_object_get_fence(obj, ring); - if (ret) - goto err_unpin; + ret = i915_gem_object_get_fence(obj, ring); + if (ret) + goto err_unpin; + if (i915_gem_object_pin_fence(obj)) entry->flags |= __EXEC_OBJECT_HAS_FENCE; - i915_gem_object_pin_fence(obj); - } else { - ret = i915_gem_object_put_fence(obj); - if (ret) - goto err_unpin; - } + obj->pending_fenced_gpu_access = true; } } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 37514a52b05c..9dfa1fa39b39 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2152,13 +2152,11 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, * framebuffer compression. For simplicity, we always install * a fence as the cost is not that onerous. */ - if (obj->tiling_mode != I915_TILING_NONE) { - ret = i915_gem_object_get_fence(obj, pipelined); - if (ret) - goto err_unpin; + ret = i915_gem_object_get_fence(obj, pipelined); + if (ret) + goto err_unpin; - i915_gem_object_pin_fence(obj); - } + i915_gem_object_pin_fence(obj); dev_priv->mm.interruptible = true; return 0; -- cgit v1.2.3-59-g8ed1b From 2911a35b2e4eb87ec48d03aeb11f019e51ae3c0d Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 5 Apr 2012 14:47:36 -0700 Subject: drm/i915: use semaphores for the display plane In theory this will have performance and power improvements. Performance because we don't need to stall when the scanout BO is busy, and power because we don't have to stall when the BO is busy (and the ring can even go to sleep if the HW supports it). v2: squash 2 patches into 1 (me) un-inline the enable_semaphores function (Daniel) remove comment about SNB hangs from i915_gem_object_sync (Chris) rename intel_enable_semaphores to i915_semaphore_is_enabled (me) removed page flip comment; "no why" (Chris) To address other comments from Daniel (irc): update the comment to say 'vt-d is crap, don't enable semaphores' - I think you misinterpreted Chris' comment, it already exists. checking out whether we can pageflip on the render ring on ivb (didn't work on early silicon) - We don't want to enable workarounds for early silicon unless we have to. - I can't find any references in the docs about this. optionally use it if the fb is already busy on the render ring - This should be how the code already worked, unless I am misunderstanding your meaning. Signed-off-by: Ben Widawsky Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 15 ++++++++ drivers/gpu/drm/i915/i915_drv.h | 4 ++ drivers/gpu/drm/i915/i915_gem.c | 51 +++++++++++++++++++++---- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 60 +----------------------------- 4 files changed, 64 insertions(+), 66 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c33b0a41a73d..96f8efc7a0d0 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -394,6 +394,21 @@ void intel_detect_pch(struct drm_device *dev) } } +bool i915_semaphore_is_enabled(struct drm_device *dev) +{ + if (INTEL_INFO(dev)->gen < 6) + return 0; + + if (i915_semaphores >= 0) + return i915_semaphores; + + /* Enable semaphores on SNB when IO remapping is off */ + if (INTEL_INFO(dev)->gen == 6) + return !intel_iommu_enabled; + + return 1; +} + void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { int count; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9bc64d208334..7dcdccb5580e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -38,6 +38,7 @@ #include #include #include +#include /* General customization: */ @@ -1230,6 +1231,8 @@ void i915_gem_lastclose(struct drm_device *dev); int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); +int i915_gem_object_sync(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *to); void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring, u32 seqno); @@ -1439,6 +1442,7 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void intel_detect_pch(struct drm_device *dev); extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); +extern bool i915_semaphore_is_enabled(struct drm_device *dev); extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv); extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ad717511edca..d75a6577b97a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1953,6 +1953,48 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) return 0; } +int +i915_gem_object_sync(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *to) +{ + struct intel_ring_buffer *from = obj->ring; + u32 seqno; + int ret, idx; + + if (from == NULL || to == from) + return 0; + + if (!i915_semaphore_is_enabled(obj->base.dev)) + return i915_gem_object_wait_rendering(obj); + + idx = intel_ring_sync_index(from, to); + + seqno = obj->last_rendering_seqno; + if (seqno <= from->sync_seqno[idx]) + return 0; + + if (seqno == from->outstanding_lazy_request) { + struct drm_i915_gem_request *request; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; + + ret = i915_add_request(from, NULL, request); + if (ret) { + kfree(request); + return ret; + } + + seqno = request->seqno; + } + + from->sync_seqno[idx] = seqno; + + return to->sync_to(to, from, seqno - 1); + +} + static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) { u32 old_write_domain, old_read_domains; @@ -2926,11 +2968,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, * Prepare buffer for display plane (scanout, cursors, etc). * Can be called from an uninterruptible phase (modesetting) and allows * any flushes to be pipelined (for pageflips). - * - * For the display plane, we want to be in the GTT but out of any write - * domains. So in many ways this looks like set_to_gtt_domain() apart from the - * ability to pipeline the waits, pinning and any additional subtleties - * that may differentiate the display plane from ordinary buffers. */ int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, @@ -2945,8 +2982,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, return ret; if (pipelined != obj->ring) { - ret = i915_gem_object_wait_rendering(obj); - if (ret == -ERESTARTSYS) + ret = i915_gem_object_sync(obj, pipelined); + if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 56d10017d045..2a24d0cd9b46 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -835,64 +835,6 @@ i915_gem_execbuffer_flush(struct drm_device *dev, return 0; } -static bool -intel_enable_semaphores(struct drm_device *dev) -{ - if (INTEL_INFO(dev)->gen < 6) - return 0; - - if (i915_semaphores >= 0) - return i915_semaphores; - - /* Disable semaphores on SNB */ - if (INTEL_INFO(dev)->gen == 6) - return 0; - - return 1; -} - -static int -i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *to) -{ - struct intel_ring_buffer *from = obj->ring; - u32 seqno; - int ret, idx; - - if (from == NULL || to == from) - return 0; - - /* XXX gpu semaphores are implicated in various hard hangs on SNB */ - if (!intel_enable_semaphores(obj->base.dev)) - return i915_gem_object_wait_rendering(obj); - - idx = intel_ring_sync_index(from, to); - - seqno = obj->last_rendering_seqno; - if (seqno <= from->sync_seqno[idx]) - return 0; - - if (seqno == from->outstanding_lazy_request) { - struct drm_i915_gem_request *request; - - request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) - return -ENOMEM; - - ret = i915_add_request(from, NULL, request); - if (ret) { - kfree(request); - return ret; - } - - seqno = request->seqno; - } - - from->sync_seqno[idx] = seqno; - - return to->sync_to(to, from, seqno - 1); -} - static int i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) { @@ -954,7 +896,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, } list_for_each_entry(obj, objects, exec_list) { - ret = i915_gem_execbuffer_sync_rings(obj, ring); + ret = i915_gem_object_sync(obj, ring); if (ret) return ret; } -- cgit v1.2.3-59-g8ed1b From 3fdcf43192559f53305644d0c1e0f7dda398f091 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 6 Apr 2012 11:46:27 -0700 Subject: drm/i915: use register name when disabling VGA Just noticed this while verifying the VGA disable code. Signed-off-by: Jesse Barnes Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9dfa1fa39b39..42dbe1475602 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9522,7 +9522,7 @@ static void i915_disable_vga(struct drm_device *dev) vga_reg = VGACNTRL; vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); - outb(1, VGA_SR_INDEX); + outb(SR01, VGA_SR_INDEX); sr1 = inb(VGA_SR_DATA); outb(sr1 | 1<<5, VGA_SR_DATA); vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); -- cgit v1.2.3-59-g8ed1b From 26883c31b0799e76edf8f0ea8be48b64e09b2a7d Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Fri, 30 Mar 2012 19:46:36 +0800 Subject: drm/i915/intel_i2c: handle zero-length writes A common method of probing an i2c bus is trying to do a zero-length write. Handle this case by checking the length first before decrementing it. This is actually important, since attempting a zero-length write is one of the ways that i2cdetect and i2c_new_probed_device detect whether there is device present on the bus with a given address. Signed-off-by: Daniel Kurtz Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_i2c.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index c12db7265893..99a04f8bcb1f 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -248,9 +248,10 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg, u32 val, loop; val = loop = 0; - do { - val |= *buf++ << (8 * loop); - } while (--len && ++loop < 4); + while (len && loop < 4) { + val |= *buf++ << (8 * loop++); + len -= 1; + } I915_WRITE(GMBUS3 + reg_offset, val); I915_WRITE(GMBUS1 + reg_offset, -- cgit v1.2.3-59-g8ed1b From 7a39a9d4767e8d22d60f2c4bf5eece4f4398c274 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Fri, 30 Mar 2012 19:46:37 +0800 Subject: drm/i915/intel_i2c: use double-buffered writes The GMBUS controller GMBUS3 register is double-buffered. Take advantage of this by writing two 4-byte words before the first wait for HW_RDY. This helps keep the GMBUS controller from becoming idle during long writes. In fact, during experiments using the GMBUS interrupts, the HW_RDY interrupt would only trigger for transactions >4 bytes after 2 writes to GMBUS3. Signed-off-by: Daniel Kurtz Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_i2c.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 99a04f8bcb1f..f02e52aca746 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -262,13 +262,6 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg, GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); POSTING_READ(GMBUS2 + reg_offset); while (len) { - if (wait_for(I915_READ(GMBUS2 + reg_offset) & - (GMBUS_SATOER | GMBUS_HW_RDY), - 50)) - return -ETIMEDOUT; - if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) - return -ENXIO; - val = loop = 0; do { val |= *buf++ << (8 * loop); @@ -276,6 +269,13 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg, I915_WRITE(GMBUS3 + reg_offset, val); POSTING_READ(GMBUS2 + reg_offset); + + if (wait_for(I915_READ(GMBUS2 + reg_offset) & + (GMBUS_SATOER | GMBUS_HW_RDY), + 50)) + return -ETIMEDOUT; + if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) + return -ENXIO; } return 0; } -- cgit v1.2.3-59-g8ed1b From e646d5773572bf52017983d758bdf05777dc5600 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Fri, 30 Mar 2012 19:46:38 +0800 Subject: drm/i915/intel_i2c: always wait for IDLE before clearing NAK The GMBUS controller can report a NAK condition while a transaction is still active. If the driver is fast enough, and the bus is slow enough, the driver may clear the NAK condition while the controller is still busy, resulting in a confused GMBUS controller. This will leave the controller in a bad state such that the next transaction may fail. Also, return -ENXIO if a device NAKs a transaction. Note: this patch also refactors gmbus_xfer to remove the "done" label. Signed-off-by: Daniel Kurtz Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_i2c.c | 41 ++++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index f02e52aca746..9707868881ec 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -324,26 +324,47 @@ gmbus_xfer(struct i2c_adapter *adapter, goto clear_err; } - goto done; + /* Mark the GMBUS interface as disabled after waiting for idle. + * We will re-enable it at the start of the next xfer, + * till then let it sleep. + */ + if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10)) + DRM_INFO("GMBUS [%s] timed out waiting for idle\n", + adapter->name); + I915_WRITE(GMBUS0 + reg_offset, 0); + ret = i; + goto out; clear_err: + /* + * Wait for bus to IDLE before clearing NAK. + * If we clear the NAK while bus is still active, then it will stay + * active and the next transaction may fail. + */ + if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, + 10)) + DRM_INFO("GMBUS [%s] timed out after NAK\n", adapter->name); + /* Toggle the Software Clear Interrupt bit. This has the effect * of resetting the GMBUS controller and so clearing the * BUS_ERROR raised by the slave's NAK. */ I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT); I915_WRITE(GMBUS1 + reg_offset, 0); + I915_WRITE(GMBUS0 + reg_offset, 0); -done: - /* Mark the GMBUS interface as disabled after waiting for idle. - * We will re-enable it at the start of the next xfer, - * till then let it sleep. + DRM_DEBUG_DRIVER("GMBUS [%s] NAK for addr: %04x %c(%d)\n", + adapter->name, msgs[i].addr, + (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); + + /* + * If no ACK is received during the address phase of a transaction, + * the adapter must report -ENXIO. + * It is not clear what to return if no ACK is received at other times. + * So, we always return -ENXIO in all NAK cases, to ensure we send + * it at least during the one case that is specified. */ - if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10)) - DRM_INFO("GMBUS [%s] timed out waiting for idle\n", - bus->adapter.name); - I915_WRITE(GMBUS0 + reg_offset, 0); - ret = i; + ret = -ENXIO; goto out; timeout: -- cgit v1.2.3-59-g8ed1b From 72d66afd1461effb143784a0f6cde2a9f9908b70 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Fri, 30 Mar 2012 19:46:39 +0800 Subject: drm/i915/intel_i2c: use WAIT cycle, not STOP The i915 is only able to generate a STOP cycle (i.e. finalize an i2c transaction) during a DATA or WAIT phase. In other words, the controller rejects a STOP requested as part of the first transaction in a sequence. Thus, for the first transaction we must always use a WAIT cycle, detect when the device has finished (and is in a WAIT phase), and then either start the next transaction, or, if there are no more transactions, generate a STOP cycle. Note: Theoretically, the last transaction of a multi-transaction sequence could initiate a STOP cycle. However, this slight optimization is left for another patch. We return -ETIMEDOUT if the hardware doesn't deactivate after the STOP cycle. Signed-off-by: Daniel Kurtz [danvet: added comment to the code that gmbus can't generate STOP on the very first cycle.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_i2c.c | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 9707868881ec..291e51ec309b 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -204,8 +204,7 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) } static int -gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, - bool last) +gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg) { int reg_offset = dev_priv->gpio_mmio_base; u16 len = msg->len; @@ -213,7 +212,6 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_WAIT | - (last ? GMBUS_CYCLE_STOP : 0) | (len << GMBUS_BYTE_COUNT_SHIFT) | (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); @@ -239,8 +237,7 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, } static int -gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg, - bool last) +gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) { int reg_offset = dev_priv->gpio_mmio_base; u16 len = msg->len; @@ -256,7 +253,6 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg, I915_WRITE(GMBUS3 + reg_offset, val); I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_WAIT | - (last ? GMBUS_CYCLE_STOP : 0) | (msg->len << GMBUS_BYTE_COUNT_SHIFT) | (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); @@ -289,7 +285,8 @@ gmbus_xfer(struct i2c_adapter *adapter, struct intel_gmbus, adapter); struct drm_i915_private *dev_priv = bus->dev_priv; - int i, reg_offset, ret; + int i, reg_offset; + int ret = 0; mutex_lock(&dev_priv->gmbus_mutex); @@ -303,20 +300,17 @@ gmbus_xfer(struct i2c_adapter *adapter, I915_WRITE(GMBUS0 + reg_offset, bus->reg0); for (i = 0; i < num; i++) { - bool last = i + 1 == num; - if (msgs[i].flags & I2C_M_RD) - ret = gmbus_xfer_read(dev_priv, &msgs[i], last); + ret = gmbus_xfer_read(dev_priv, &msgs[i]); else - ret = gmbus_xfer_write(dev_priv, &msgs[i], last); + ret = gmbus_xfer_write(dev_priv, &msgs[i]); if (ret == -ETIMEDOUT) goto timeout; if (ret == -ENXIO) goto clear_err; - if (!last && - wait_for(I915_READ(GMBUS2 + reg_offset) & + if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50)) goto timeout; @@ -324,15 +318,24 @@ gmbus_xfer(struct i2c_adapter *adapter, goto clear_err; } + /* Generate a STOP condition on the bus. Note that gmbus can't generata + * a STOP on the very first cycle. To simplify the code we + * unconditionally generate the STOP condition with an additional gmbus + * cycle. */ + I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY); + /* Mark the GMBUS interface as disabled after waiting for idle. * We will re-enable it at the start of the next xfer, * till then let it sleep. */ - if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10)) + if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, + 10)) { DRM_INFO("GMBUS [%s] timed out waiting for idle\n", adapter->name); + ret = -ETIMEDOUT; + } I915_WRITE(GMBUS0 + reg_offset, 0); - ret = i; + ret = ret ?: i; goto out; clear_err: -- cgit v1.2.3-59-g8ed1b From 56f9eac05489912ac0165ffc0ebff0f8588f77d2 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Fri, 30 Mar 2012 19:46:40 +0800 Subject: drm/i915/intel_i2c: use INDEX cycles for i2c read transactions It is very common for an i2c device to require a small 1 or 2 byte write followed by a read. For example, when reading from an i2c EEPROM it is common to write and address, offset or index followed by a reading some values. The i915 gmbus controller provides a special "INDEX" cycle for performing such a small write followed by a read. The INDEX can be either one or two bytes long. The advantage of using such a cycle is that the CPU has slightly less work to do once the read with INDEX cycle is started. Signed-off-by: Daniel Kurtz Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_i2c.c | 54 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 50 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 291e51ec309b..5e0912a4a737 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -204,13 +204,15 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) } static int -gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg) +gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, + u32 gmbus1_index) { int reg_offset = dev_priv->gpio_mmio_base; u16 len = msg->len; u8 *buf = msg->buf; I915_WRITE(GMBUS1 + reg_offset, + gmbus1_index | GMBUS_CYCLE_WAIT | (len << GMBUS_BYTE_COUNT_SHIFT) | (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | @@ -276,6 +278,46 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) return 0; } +/* + * The gmbus controller can combine a 1 or 2 byte write with a read that + * immediately follows it by using an "INDEX" cycle. + */ +static bool +gmbus_is_index_read(struct i2c_msg *msgs, int i, int num) +{ + return (i + 1 < num && + !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 && + (msgs[i + 1].flags & I2C_M_RD)); +} + +static int +gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) +{ + int reg_offset = dev_priv->gpio_mmio_base; + u32 gmbus1_index = 0; + u32 gmbus5 = 0; + int ret; + + if (msgs[0].len == 2) + gmbus5 = GMBUS_2BYTE_INDEX_EN | + msgs[0].buf[1] | (msgs[0].buf[0] << 8); + if (msgs[0].len == 1) + gmbus1_index = GMBUS_CYCLE_INDEX | + (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT); + + /* GMBUS5 holds 16-bit index */ + if (gmbus5) + I915_WRITE(GMBUS5 + reg_offset, gmbus5); + + ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index); + + /* Clear GMBUS5 after each index transfer */ + if (gmbus5) + I915_WRITE(GMBUS5 + reg_offset, 0); + + return ret; +} + static int gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, @@ -300,10 +342,14 @@ gmbus_xfer(struct i2c_adapter *adapter, I915_WRITE(GMBUS0 + reg_offset, bus->reg0); for (i = 0; i < num; i++) { - if (msgs[i].flags & I2C_M_RD) - ret = gmbus_xfer_read(dev_priv, &msgs[i]); - else + if (gmbus_is_index_read(msgs, i, num)) { + ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); + i += 1; /* set i to the index of the read xfer */ + } else if (msgs[i].flags & I2C_M_RD) { + ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); + } else { ret = gmbus_xfer_write(dev_priv, &msgs[i]); + } if (ret == -ETIMEDOUT) goto timeout; -- cgit v1.2.3-59-g8ed1b From 90e6b26d6b28ade684a4b16b856a74f27bc644bc Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Fri, 30 Mar 2012 19:46:41 +0800 Subject: drm/i915/intel_i2c: reuse GMBUS2 value read in polling loop Save the GMBUS2 value read while polling for state changes, and then reuse this value when determining for which reason the loops were exited. This is a small optimization which saves a couple of bus accesses for memory mapped IO registers. To avoid "assigning in if clause" checkpatch errors", use a ret variable to store the wait_for macro return value. Signed-off-by: Daniel Kurtz Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_i2c.c | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 5e0912a4a737..c56cc350c9cf 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -219,13 +219,16 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, GMBUS_SLAVE_READ | GMBUS_SW_RDY); POSTING_READ(GMBUS2 + reg_offset); do { + int ret; u32 val, loop = 0; + u32 gmbus2; - if (wait_for(I915_READ(GMBUS2 + reg_offset) & - (GMBUS_SATOER | GMBUS_HW_RDY), - 50)) + ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & + (GMBUS_SATOER | GMBUS_HW_RDY), + 50); + if (ret) return -ETIMEDOUT; - if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) + if (gmbus2 & GMBUS_SATOER) return -ENXIO; val = I915_READ(GMBUS3 + reg_offset); @@ -260,6 +263,9 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); POSTING_READ(GMBUS2 + reg_offset); while (len) { + int ret; + u32 gmbus2; + val = loop = 0; do { val |= *buf++ << (8 * loop); @@ -268,11 +274,12 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) I915_WRITE(GMBUS3 + reg_offset, val); POSTING_READ(GMBUS2 + reg_offset); - if (wait_for(I915_READ(GMBUS2 + reg_offset) & - (GMBUS_SATOER | GMBUS_HW_RDY), - 50)) + ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & + (GMBUS_SATOER | GMBUS_HW_RDY), + 50); + if (ret) return -ETIMEDOUT; - if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) + if (gmbus2 & GMBUS_SATOER) return -ENXIO; } return 0; @@ -342,6 +349,8 @@ gmbus_xfer(struct i2c_adapter *adapter, I915_WRITE(GMBUS0 + reg_offset, bus->reg0); for (i = 0; i < num; i++) { + u32 gmbus2; + if (gmbus_is_index_read(msgs, i, num)) { ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); i += 1; /* set i to the index of the read xfer */ @@ -356,11 +365,12 @@ gmbus_xfer(struct i2c_adapter *adapter, if (ret == -ENXIO) goto clear_err; - if (wait_for(I915_READ(GMBUS2 + reg_offset) & - (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), - 50)) + ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & + (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), + 50); + if (ret) goto timeout; - if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) + if (gmbus2 & GMBUS_SATOER) goto clear_err; } -- cgit v1.2.3-59-g8ed1b From e2ba4fb313d91c9e888f973e65a736f8f55b12b6 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Fri, 30 Mar 2012 19:46:42 +0800 Subject: drm/i915/intel_i2c: remove POSTING_READ() from gmbus transfers The POSTING_READ() calls were originally added to make sure the writes were flushed before any timing delays and across loops. Now that the code has settled a bit, let's remove them. Signed-off-by: Daniel Kurtz Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_i2c.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index c56cc350c9cf..cab879fedf3c 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -217,7 +217,6 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, (len << GMBUS_BYTE_COUNT_SHIFT) | (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); - POSTING_READ(GMBUS2 + reg_offset); do { int ret; u32 val, loop = 0; @@ -261,7 +260,6 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) (msg->len << GMBUS_BYTE_COUNT_SHIFT) | (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); - POSTING_READ(GMBUS2 + reg_offset); while (len) { int ret; u32 gmbus2; @@ -272,7 +270,6 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) } while (--len && ++loop < 4); I915_WRITE(GMBUS3 + reg_offset, val); - POSTING_READ(GMBUS2 + reg_offset); ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & (GMBUS_SATOER | GMBUS_HW_RDY), -- cgit v1.2.3-59-g8ed1b From d1686ae3ab09a918efa7187cf7e2b2c748c905e1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 10 Apr 2012 11:41:49 +0100 Subject: drm/i915: Ironlake shares the same video sprite controls as Sandybridge Well, almost. Just a couple of differences, Ironlake lacks a few of the RGB formats, only exposing x8r8g8b8, and lacks a couple of unused features. Given the similarities, we can then reuse the same routines as already written for Sandybridge to enable overlay support for Ironlake as well. Signed-off-by: Chris Wilson Reviewed-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sprite.c | 60 +++++++++++++++++++++++++++---------- 1 file changed, 45 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index a464771a7240..da525b69f7bf 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -209,7 +209,7 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) } static void -snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, +ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, @@ -263,8 +263,8 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, if (obj->tiling_mode != I915_TILING_NONE) dvscntr |= DVS_TILED; - /* must disable */ - dvscntr |= DVS_TRICKLE_FEED_DISABLE; + if (IS_GEN6(dev)) + dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ dvscntr |= DVS_ENABLE; /* Sizes are 0 based */ @@ -296,7 +296,7 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, } static void -snb_disable_plane(struct drm_plane *plane) +ilk_disable_plane(struct drm_plane *plane) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -334,7 +334,7 @@ intel_disable_primary(struct drm_crtc *crtc) } static int -snb_update_colorkey(struct drm_plane *plane, +ilk_update_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) { struct drm_device *dev = plane->dev; @@ -363,7 +363,7 @@ snb_update_colorkey(struct drm_plane *plane, } static void -snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) +ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -617,6 +617,14 @@ static const struct drm_plane_funcs intel_plane_funcs = { .destroy = intel_destroy_plane, }; +static uint32_t ilk_plane_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, +}; + static uint32_t snb_plane_formats[] = { DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, @@ -631,34 +639,56 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe) { struct intel_plane *intel_plane; unsigned long possible_crtcs; + const uint32_t *plane_formats; + int num_plane_formats; int ret; - if (!(IS_GEN6(dev) || IS_GEN7(dev))) + if (INTEL_INFO(dev)->gen < 5) return -ENODEV; intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL); if (!intel_plane) return -ENOMEM; - if (IS_GEN6(dev)) { + switch (INTEL_INFO(dev)->gen) { + case 5: + case 6: intel_plane->max_downscale = 16; - intel_plane->update_plane = snb_update_plane; - intel_plane->disable_plane = snb_disable_plane; - intel_plane->update_colorkey = snb_update_colorkey; - intel_plane->get_colorkey = snb_get_colorkey; - } else if (IS_GEN7(dev)) { + intel_plane->update_plane = ilk_update_plane; + intel_plane->disable_plane = ilk_disable_plane; + intel_plane->update_colorkey = ilk_update_colorkey; + intel_plane->get_colorkey = ilk_get_colorkey; + + if (IS_GEN6(dev)) { + plane_formats = snb_plane_formats; + num_plane_formats = ARRAY_SIZE(snb_plane_formats); + } else { + plane_formats = ilk_plane_formats; + num_plane_formats = ARRAY_SIZE(ilk_plane_formats); + } + break; + + case 7: intel_plane->max_downscale = 2; intel_plane->update_plane = ivb_update_plane; intel_plane->disable_plane = ivb_disable_plane; intel_plane->update_colorkey = ivb_update_colorkey; intel_plane->get_colorkey = ivb_get_colorkey; + + plane_formats = snb_plane_formats; + num_plane_formats = ARRAY_SIZE(snb_plane_formats); + break; + + default: + return -ENODEV; } intel_plane->pipe = pipe; possible_crtcs = (1 << pipe); ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs, - &intel_plane_funcs, snb_plane_formats, - ARRAY_SIZE(snb_plane_formats), false); + &intel_plane_funcs, + plane_formats, num_plane_formats, + false); if (ret) kfree(intel_plane); -- cgit v1.2.3-59-g8ed1b From 0136db586c028f71e7cc21cc183064ff0d5919c8 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Tue, 10 Apr 2012 21:17:01 -0700 Subject: drm/i915: rc6 in sysfs Merge rc6 information into the power group for our device. Until now the i915 driver has not had any sysfs entries (aside from the connector stuff enabled by drm core). Since it seems like we're likely to have more in the future I created a new file for sysfs stubs, as well as the rc6 sysfs functions which don't really belong elsewhere (perhaps i915_suspend, but most of the stuff is in intel_display,c). displays rc6 modes enabled (as a hex mask): cat /sys/class/drm/card0/power/rc6_enable displays #ms GPU has been in rc6 since boot: cat /sys/class/drm/card0/power/rc6_residency_ms displays #ms GPU has been in deep rc6 since boot: cat /sys/class/drm/card0/power/rc6p_residency_ms displays #ms GPU has been in deepest rc6 since boot: cat /sys/class/drm/card0/power/rc6pp_residency_ms Important note: I've seen on SNB that even when RC6 is *not* enabled the rc6 register seems to have a random value in it. I can only guess at the reason reason for this. Those writing tools that utilize this value need to be careful and probably want to scrutinize the value very carefully. v2: use common rc6 residency units to milliseconds for the other RC6 types v3: don't create sysfs files for GEN <= 5 add a rc6_enable to show a mask of enabled rc6 types use unmerge instead of remove for sysfs group squash intel_enable_rc6() extraction into this patch v4: rename sysfs files (Chris) CC: Chris Wilson CC: Daniel Vetter f CC: Arjan van de Ven Signed-off-by: Ben Widawsky Reviewed-by: Chris Wilson [danvet: squash in the 64bit division fix by Chris Wilson.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/i915_dma.c | 4 ++ drivers/gpu/drm/i915/i915_drv.h | 5 ++ drivers/gpu/drm/i915/i915_sysfs.c | 111 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_display.c | 2 +- 5 files changed, 122 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/i915/i915_sysfs.c (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index ce7fc77678b4..f8013302581f 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -12,6 +12,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ i915_gem_execbuffer.o \ i915_gem_gtt.o \ i915_gem_tiling.o \ + i915_sysfs.o \ i915_trace_points.o \ intel_display.o \ intel_crt.o \ diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 652f43f00ef2..333b7468c8ec 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2139,6 +2139,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) } } + i915_setup_sysfs(dev); + /* Must be done after probing outputs */ intel_opregion_init(dev); acpi_video_register(); @@ -2190,6 +2192,8 @@ int i915_driver_unload(struct drm_device *dev) i915_mch_dev = NULL; spin_unlock(&mchdev_lock); + i915_teardown_sysfs(dev); + if (dev_priv->mm.inactive_shrinker.shrink) unregister_shrinker(&dev_priv->mm.inactive_shrinker); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7dcdccb5580e..a189f756036f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1385,6 +1385,10 @@ extern int i915_restore_state(struct drm_device *dev); extern int i915_save_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev); +/* i915_sysfs.c */ +void i915_setup_sysfs(struct drm_device *dev_priv); +void i915_teardown_sysfs(struct drm_device *dev_priv); + /* intel_i2c.c */ extern int intel_setup_gmbus(struct drm_device *dev); extern void intel_teardown_gmbus(struct drm_device *dev); @@ -1441,6 +1445,7 @@ extern void ironlake_enable_rc6(struct drm_device *dev); extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void intel_detect_pch(struct drm_device *dev); extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); +extern int intel_enable_rc6(const struct drm_device *dev); extern bool i915_semaphore_is_enabled(struct drm_device *dev); extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c new file mode 100644 index 000000000000..f1b5108931fe --- /dev/null +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -0,0 +1,111 @@ +/* + * Copyright © 2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Ben Widawsky + * + */ + +#include +#include +#include +#include +#include "i915_drv.h" + +static u32 calc_residency(struct drm_device *dev, const u32 reg) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u64 raw_time; /* 32b value may overflow during fixed point math */ + + if (!intel_enable_rc6(dev)) + return 0; + + raw_time = I915_READ(reg) * 128ULL + 500; + return do_div(raw_time, 100000); +} + +static ssize_t +show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); + return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev)); +} + +static ssize_t +show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); + u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); + return snprintf(buf, PAGE_SIZE, "%u", rc6_residency); +} + +static ssize_t +show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); + u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); + return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency); +} + +static ssize_t +show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); + u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); + return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency); +} + +static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); +static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL); +static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL); +static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL); + +static struct attribute *rc6_attrs[] = { + &dev_attr_rc6_enable.attr, + &dev_attr_rc6_residency_ms.attr, + &dev_attr_rc6p_residency_ms.attr, + &dev_attr_rc6pp_residency_ms.attr, + NULL +}; + +static struct attribute_group rc6_attr_group = { + .name = power_group_name, + .attrs = rc6_attrs +}; + +void i915_setup_sysfs(struct drm_device *dev) +{ + int ret; + + /* ILK doesn't have any residency information */ + if (INTEL_INFO(dev)->gen < 6) + return; + + ret = sysfs_merge_group(&dev->primary->kdev.kobj, &rc6_attr_group); + if (ret) + DRM_ERROR("sysfs setup failed\n"); +} + +void i915_teardown_sysfs(struct drm_device *dev) +{ + sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); +} diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 42dbe1475602..5b8b3d15d0ee 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8547,7 +8547,7 @@ void intel_init_emon(struct drm_device *dev) dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); } -static int intel_enable_rc6(struct drm_device *dev) +int intel_enable_rc6(const struct drm_device *dev) { /* * Respect the kernel parameter if it is set -- cgit v1.2.3-59-g8ed1b From e3aef17286850a77f11a6dac28d972f65cde2235 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 10 Apr 2012 11:58:03 -0700 Subject: drm/i915: make DP configuration vars less confusing in ironlake_crtc_mode_se Both PCH and CPU eDP are DP, so set the is_dp flag to true. Add is_cpu_edp and is_pch_edp bools to make checking for each less verbose (rather than has_edp_encoder && !intel_encoder_is_pch_edp() sprinkled everywhere). And rename the "has_edp_encoder" variable to just "edp_encoder". With the above variables cleaned up, the rest of the code becomes a bit more readable and clear. Signed-off-by: Jesse Barnes Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5b8b3d15d0ee..743ec6b98476 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5963,9 +5963,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; bool ok, has_reduced_clock = false, is_sdvo = false; bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; - struct intel_encoder *has_edp_encoder = NULL; struct drm_mode_config *mode_config = &dev->mode_config; - struct intel_encoder *encoder; + struct intel_encoder *encoder, *edp_encoder = NULL; const intel_limit_t *limit; int ret; struct fdi_m_n m_n = {0}; @@ -5974,6 +5973,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, int target_clock, pixel_multiplier, lane, link_bw, factor; unsigned int pipe_bpp; bool dither; + bool is_cpu_edp = false, is_pch_edp = false; list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { if (encoder->base.crtc != crtc) @@ -5999,7 +5999,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, is_dp = true; break; case INTEL_OUTPUT_EDP: - has_edp_encoder = encoder; + is_dp = true; + if (intel_encoder_is_pch_edp(&encoder->base)) + is_pch_edp = true; + else + is_cpu_edp = true; + edp_encoder = encoder; break; } @@ -6062,15 +6067,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, lane = 0; /* CPU eDP doesn't require FDI link, so just set DP M/N according to current link config */ - if (has_edp_encoder && - !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { + if (is_cpu_edp) { target_clock = mode->clock; - intel_edp_link_config(has_edp_encoder, - &lane, &link_bw); + intel_edp_link_config(edp_encoder, &lane, &link_bw); } else { /* [e]DP over FDI requires target mode clock instead of link clock */ - if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) + if (is_dp) target_clock = mode->clock; else target_clock = adjusted_mode->clock; @@ -6161,7 +6164,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, } dpll |= DPLL_DVO_HIGH_SPEED; } - if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) + if (is_dp && !is_cpu_edp) dpll |= DPLL_DVO_HIGH_SPEED; /* compute bitmask from p1 value */ @@ -6206,8 +6209,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, /* PCH eDP needs FDI, but CPU eDP does not */ if (!intel_crtc->no_pll) { - if (!has_edp_encoder || - intel_encoder_is_pch_edp(&has_edp_encoder->base)) { + if (!is_cpu_edp) { I915_WRITE(PCH_FP0(pipe), fp); I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); @@ -6285,7 +6287,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, pipeconf |= PIPECONF_DITHER_EN; pipeconf |= PIPECONF_DITHER_TYPE_SP; } - if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { + if (is_dp && !is_cpu_edp) { intel_dp_set_m_n(crtc, mode, adjusted_mode); } else { /* For non-DP output, clear any trans DP clock recovery setting.*/ @@ -6295,9 +6297,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(TRANSDPLINK_N1(pipe), 0); } - if (!intel_crtc->no_pll && - (!has_edp_encoder || - intel_encoder_is_pch_edp(&has_edp_encoder->base))) { + if (!intel_crtc->no_pll && (!edp_encoder || is_pch_edp)) { I915_WRITE(PCH_DPLL(pipe), dpll); /* Wait for the clocks to stabilize. */ @@ -6375,10 +6375,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); - if (has_edp_encoder && - !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { + if (is_cpu_edp) ironlake_set_pll_edp(crtc, adjusted_mode->clock); - } I915_WRITE(PIPECONF(pipe), pipeconf); POSTING_READ(PIPECONF(pipe)); -- cgit v1.2.3-59-g8ed1b From 211c568bc6a1ebd51e35724f6d733e76717ce368 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 10 Apr 2012 17:29:17 +0200 Subject: drm/i915: simplify ppgtt setup We don't need the pt_addr for the !dmar case, so drop the else and move the if (dmar) condition out of the loop. v2: Fixup whitespace damage noticed by Chris Wilson. v3: Collapse the two identical if blocks. Chris Wilson makes me look like a moron right now ... Noticed-by: Konstantin Belousov Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 4fb875de32e6..25c8bf9d1d4e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -96,11 +96,10 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) GFP_KERNEL); if (!ppgtt->pt_dma_addr) goto err_pt_alloc; - } - for (i = 0; i < ppgtt->num_pd_entries; i++) { - dma_addr_t pt_addr; - if (dev_priv->mm.gtt->needs_dmar) { + for (i = 0; i < ppgtt->num_pd_entries; i++) { + dma_addr_t pt_addr; + pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096, PCI_DMA_BIDIRECTIONAL); @@ -112,8 +111,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) } ppgtt->pt_dma_addr[i] = pt_addr; - } else - pt_addr = page_to_phys(ppgtt->pt_pages[i]); + } } ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma; -- cgit v1.2.3-59-g8ed1b From f84131905b9b3b02b1c5061c0720e503c8d22778 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 10 Apr 2012 11:52:50 +0100 Subject: drm/i915: Allow concurrent read access between CPU and GPU domain Similar to allowing a buffer to be simultaneously read by the GPU and through the GTT, we wish to allow readback of the pages through the CPU domain whilst they are also being read by the GPU. Domain coherency is managed by allowing multiple readers, but only a single writer. This is used by mesa for its program cache which it may search for every new program every frame and then renews should it need to add. During renewal, mesa copies the program bo currently executing through a CPU mapping onto the new bo. This patch allows the search and that copy to proceed without causing a stall on the current batch. Testcase: i-g-t/tests/gem_cpu_concurrent_blit Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d75a6577b97a..1bfb0d28e438 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3068,9 +3068,11 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) if (ret) return ret; - ret = i915_gem_object_wait_rendering(obj); - if (ret) - return ret; + if (write || obj->pending_gpu_write) { + ret = i915_gem_object_wait_rendering(obj); + if (ret) + return ret; + } i915_gem_object_flush_gtt_write_domain(obj); -- cgit v1.2.3-59-g8ed1b From f817586cebf1b946d1f327f9a596048efd6b64e9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 10 Apr 2012 15:50:11 +0200 Subject: drm/i915: re-init modeset hw state after gpu reset After a gpu reset we need to re-init some of the hw state we only initialize when modeset is enabled, like rc6, hw contexts or render/GT core clock gating and workaround register settings. Note that this patch has a small change in the resume code: - rc6 on gen6+ is only restored for the modeset case (for more consistency with other callsites). This is no problem because recent kernels refuse to load drm/i915 without kms on gen6+ - rc6/emon on ilk is only restored for the modeset case. This is no problem because rc6 is disabled by default on ilk, and ums on ilk has never really been a supported option outside of horrible rhel backports. v2: Chris Wilson noticed that we not only fail to restore the clock gating settings after gpu reset. v3: Move the call to modeset_init_hw in _reset out of the struct_mutext protected area - other callers don't hold it, too. Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 5 +++++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_suspend.c | 12 +----------- drivers/gpu/drm/i915/intel_display.c | 29 ++++++++++++++++++----------- 4 files changed, 25 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 96f8efc7a0d0..ccfdc813171d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -851,9 +851,14 @@ int i915_reset(struct drm_device *dev, u8 flags) i915_gem_init_ppgtt(dev); mutex_unlock(&dev->struct_mutex); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + intel_modeset_init_hw(dev); + drm_irq_uninstall(dev); drm_mode_config_reset(dev); drm_irq_install(dev); + mutex_lock(&dev->struct_mutex); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a189f756036f..422f424deb4c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1433,6 +1433,7 @@ static inline void intel_unregister_dsm_handler(void) { return; } #endif /* CONFIG_ACPI */ /* modesetting */ +extern void intel_modeset_init_hw(struct drm_device *dev); extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_gem_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 2b5eb229ff2c..0c3e3bf67c28 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -879,17 +879,7 @@ int i915_restore_state(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); if (drm_core_check_feature(dev, DRIVER_MODESET)) - intel_init_clock_gating(dev); - - if (IS_IRONLAKE_M(dev)) { - ironlake_enable_drps(dev); - intel_init_emon(dev); - } - - if (INTEL_INFO(dev)->gen >= 6) { - gen6_enable_rps(dev_priv); - gen6_update_ring_freq(dev_priv); - } + intel_modeset_init_hw(dev); mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 743ec6b98476..aee389c442a4 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9530,6 +9530,23 @@ static void i915_disable_vga(struct drm_device *dev) POSTING_READ(vga_reg); } +void intel_modeset_init_hw(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + intel_init_clock_gating(dev); + + if (IS_IRONLAKE_M(dev)) { + ironlake_enable_drps(dev); + intel_init_emon(dev); + } + + if (IS_GEN6(dev) || IS_GEN7(dev)) { + gen6_enable_rps(dev_priv); + gen6_update_ring_freq(dev_priv); + } +} + void intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -9575,17 +9592,7 @@ void intel_modeset_init(struct drm_device *dev) i915_disable_vga(dev); intel_setup_outputs(dev); - intel_init_clock_gating(dev); - - if (IS_IRONLAKE_M(dev)) { - ironlake_enable_drps(dev); - intel_init_emon(dev); - } - - if (IS_GEN6(dev) || IS_GEN7(dev)) { - gen6_enable_rps(dev_priv); - gen6_update_ring_freq(dev_priv); - } + intel_modeset_init_hw(dev); INIT_WORK(&dev_priv->idle_work, intel_idle_update); setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, -- cgit v1.2.3-59-g8ed1b From bfa3384a9a84aaaa59443bbd776c142e7dba4b0f Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 10 Apr 2012 11:58:04 -0700 Subject: drm/i915: check PPS regs for sanity when using eDP If these regs don't have valid values, the panel won't come up, and may even cause a system hang. So do a basic sanity check when an eDP panel is detected. Signed-off-by: Jesse Barnes Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=44305 Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 110552ff302c..6346b29ee934 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -2462,6 +2462,13 @@ intel_dp_init(struct drm_device *dev, int output_reg) pp_off = I915_READ(PCH_PP_OFF_DELAYS); pp_div = I915_READ(PCH_PP_DIVISOR); + if (!pp_on || !pp_off || !pp_div) { + DRM_INFO("bad panel power sequencing delays, disabling panel\n"); + intel_dp_encoder_destroy(&intel_dp->base.base); + intel_dp_destroy(&intel_connector->base); + return; + } + /* Pull timing values out of registers */ cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> PANEL_POWER_UP_DELAY_SHIFT; -- cgit v1.2.3-59-g8ed1b From b6834bd63ec407444098be233122a25bf4f17c75 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 11 Apr 2012 09:23:33 -0700 Subject: drm/i915: disable turbo on ValleyView for now We'll probably need new init functions and will need to test it. v2: fix impossible GEN6 && GEN7 condition, move to Daniel's new init function Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index aee389c442a4..58f4b02151f2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9541,7 +9541,7 @@ void intel_modeset_init_hw(struct drm_device *dev) intel_init_emon(dev); } - if (IS_GEN6(dev) || IS_GEN7(dev)) { + if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { gen6_enable_rps(dev_priv); gen6_update_ring_freq(dev_priv); } @@ -9632,7 +9632,7 @@ void intel_modeset_cleanup(struct drm_device *dev) if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); - if (IS_GEN6(dev) || IS_GEN7(dev)) + if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) gen6_disable_rps(dev); if (IS_IRONLAKE_M(dev)) -- cgit v1.2.3-59-g8ed1b From f82cfb6bcda164ef3a66b8c3fc549b1f9bdd09ad Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 11 Apr 2012 09:23:35 -0700 Subject: drm/i915: allow PCH PWM override on IVB On IVB, there are two sets of panel backlight regs: one in the CPU and one in the PCH. The CPU ones aren't generally used, so on IVB make sure we allow the PCH regs to actually control the backlight. v2: remove unused pwm variable (Daniel) move to init_hw function so we override on resume too Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 58f4b02151f2..33aaad30c0bc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9530,6 +9530,19 @@ static void i915_disable_vga(struct drm_device *dev) POSTING_READ(vga_reg); } +static void ivb_pch_pwm_override(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* + * IVB has CPU eDP backlight regs too, set things up to let the + * PCH regs control the backlight + */ + I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE); + I915_WRITE(BLC_PWM_CPU_CTL, 0); + I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30)); +} + void intel_modeset_init_hw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -9545,6 +9558,9 @@ void intel_modeset_init_hw(struct drm_device *dev) gen6_enable_rps(dev_priv); gen6_update_ring_freq(dev_priv); } + + if (IS_IVYBRIDGE(dev)) + ivb_pch_pwm_override(dev); } void intel_modeset_init(struct drm_device *dev) -- cgit v1.2.3-59-g8ed1b From 5816d648d5f0d496d7bf11ab9174a365c791ccc6 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 11 Apr 2012 11:18:19 -0700 Subject: drm/i915: i915_gem_object_sync must handle NULL When I extracted the synchronization code for implementing semaphorified pageflips (74f5f6e0), I neglected the non pipelined case which also calls this code. The modesetting code wants to make sure the object has finished rendering to the frame before configuring the scanout (ie. non-pipelined case). As a result of a follow on discussion on IRC, I've decided to add a comment about the function itself which received much inspiration from Chris as well. So really, this patch was ghost-written by Chris :). Reported-by: Chris Wilson Signed-off-by: Ben Widawsky Reviewed-by: Chris Wilson Tested-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1bfb0d28e438..9fcdc9a917eb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1953,6 +1953,18 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) return 0; } +/** + * i915_gem_object_sync - sync an object to a ring. + * + * @obj: object which may be in use on another ring. + * @to: ring we wish to use the object on. May be NULL. + * + * This code is meant to abstract object synchronization with the GPU. + * Calling with NULL implies synchronizing the object with the CPU + * rather than a particular GPU ring. + * + * Returns 0 if successful, else propagates up the lower layer error. + */ int i915_gem_object_sync(struct drm_i915_gem_object *obj, struct intel_ring_buffer *to) @@ -1964,7 +1976,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, if (from == NULL || to == from) return 0; - if (!i915_semaphore_is_enabled(obj->base.dev)) + if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) return i915_gem_object_wait_rendering(obj); idx = intel_ring_sync_index(from, to); -- cgit v1.2.3-59-g8ed1b From e3a5a2250aa9e67d169f33e6c91dc7604cab513b Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 11 Apr 2012 11:18:20 -0700 Subject: drm/i915: fix for when semaphore updates fail This fixes a long standing issue where emitting the semaphore updates may have failed, but we've already updated our internal data structure. Reported-by: Daniel Vetter Signed-off-by: Ben Widawsky Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9fcdc9a917eb..0115b12df573 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2001,10 +2001,12 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, seqno = request->seqno; } - from->sync_seqno[idx] = seqno; - return to->sync_to(to, from, seqno - 1); + ret = to->sync_to(to, from, seqno - 1); + if (!ret) + from->sync_seqno[idx] = seqno; + return ret; } static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) -- cgit v1.2.3-59-g8ed1b From 1500f7ea06858819abcf8eec8f952e2f9281c610 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 11 Apr 2012 11:18:21 -0700 Subject: drm/i915: hide (seqno-1) in ringbuffer code Waiting for seqno-1 in our object synchronization code is an implementation detail given how we've decided to do the waits within the rest of our code. Requested-by: Daniel Vetter Signed-off-by: Ben Widawsky Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0115b12df573..71934dd0ee43 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2002,7 +2002,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, } - ret = to->sync_to(to, from, seqno - 1); + ret = to->sync_to(to, from, seqno); if (!ret) from->sync_seqno[idx] = seqno; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index dfdb613752c5..467b3319e4bd 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -482,6 +482,12 @@ intel_ring_sync(struct intel_ring_buffer *waiter, MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER; + /* Throughout all of the GEM code, seqno passed implies our current + * seqno is >= the last seqno executed. However for hardware the + * comparison is strictly greater than. + */ + seqno -= 1; + ret = intel_ring_begin(waiter, 4); if (ret) return ret; -- cgit v1.2.3-59-g8ed1b From 6a848ccb800f5330ca368cd804795b7ce644e36c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 22:12:46 +0200 Subject: drm/i915: rip out ring->irq_mask We only ever enable/disable one interrupt (namely user_interrupts and pipe_notify), so we don't need to track the interrupt masking state. Also rename irq_enable to irq_enable_mask, now that it won't collide - beforehand both a irq_mask and irq_enable_mask would have looked a bit strange. Reviewed-by: Eric Anholt Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 21 ++++++++------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 3 +-- 2 files changed, 9 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 467b3319e4bd..915aa07d5a61 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -798,7 +798,6 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; - u32 mask = ring->irq_enable; if (!dev->irq_enabled) return false; @@ -810,9 +809,8 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) spin_lock(&ring->irq_lock); if (ring->irq_refcount++ == 0) { - ring->irq_mask &= ~mask; - I915_WRITE_IMR(ring, ring->irq_mask); - ironlake_enable_irq(dev_priv, mask); + I915_WRITE_IMR(ring, ~ring->irq_enable_mask); + ironlake_enable_irq(dev_priv, ring->irq_enable_mask); } spin_unlock(&ring->irq_lock); @@ -824,13 +822,11 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; - u32 mask = ring->irq_enable; spin_lock(&ring->irq_lock); if (--ring->irq_refcount == 0) { - ring->irq_mask |= mask; - I915_WRITE_IMR(ring, ring->irq_mask); - ironlake_disable_irq(dev_priv, mask); + I915_WRITE_IMR(ring, ~0); + ironlake_disable_irq(dev_priv, ring->irq_enable_mask); } spin_unlock(&ring->irq_lock); @@ -1002,7 +998,6 @@ int intel_init_ring_buffer(struct drm_device *dev, init_waitqueue_head(&ring->irq_queue); spin_lock_init(&ring->irq_lock); - ring->irq_mask = ~0; if (I915_NEED_GFX_HWS(dev)) { ret = init_status_page(ring); @@ -1380,7 +1375,7 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .flush = gen6_ring_flush, .add_request = gen6_add_request, .get_seqno = gen6_ring_get_seqno, - .irq_enable = GEN6_BSD_USER_INTERRUPT, + .irq_enable_mask = GEN6_BSD_USER_INTERRUPT, .irq_get = gen6_ring_get_irq, .irq_put = gen6_ring_put_irq, .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, @@ -1426,7 +1421,7 @@ static const struct intel_ring_buffer gen6_blt_ring = { .get_seqno = gen6_ring_get_seqno, .irq_get = gen6_ring_get_irq, .irq_put = gen6_ring_put_irq, - .irq_enable = GEN6_BLITTER_USER_INTERRUPT, + .irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT, .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, .sync_to = gen6_blt_ring_sync_to, .semaphore_register = {MI_SEMAPHORE_SYNC_BR, @@ -1446,7 +1441,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->flush = gen6_render_ring_flush; ring->irq_get = gen6_ring_get_irq; ring->irq_put = gen6_ring_put_irq; - ring->irq_enable = GT_USER_INTERRUPT; + ring->irq_enable_mask = GT_USER_INTERRUPT; ring->get_seqno = gen6_ring_get_seqno; } else if (IS_GEN5(dev)) { ring->add_request = pc_render_add_request; @@ -1471,7 +1466,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ring->add_request = gen6_add_request; ring->irq_get = gen6_ring_get_irq; ring->irq_put = gen6_ring_put_irq; - ring->irq_enable = GT_USER_INTERRUPT; + ring->irq_enable_mask = GT_USER_INTERRUPT; } else if (IS_GEN5(dev)) { ring->add_request = pc_render_add_request; ring->get_seqno = pc_render_get_seqno; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 3488a5a127db..06a66adf69c2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -58,8 +58,7 @@ struct intel_ring_buffer { spinlock_t irq_lock; u32 irq_refcount; - u32 irq_mask; - u32 irq_enable; /* IRQs enabled for this ring */ + u32 irq_enable_mask; /* bitmask to enable ring interrupt */ u32 irq_seqno; /* last seq seem at irq time */ u32 trace_irq_seqno; u32 waiting_seqno; -- cgit v1.2.3-59-g8ed1b From dfc9ef2fb0ba01ad8256f4dfa9a8e8fcc6288fc4 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 22:12:47 +0200 Subject: drm/i915: set ring->size in common ring setup code Eventually we want to scale the ring size depending upon available gtt space. For now just consolidate this instead of replicating it over all ringbuffer templates. Reviewed-by: Eric Anholt Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 915aa07d5a61..87e29076123d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -995,6 +995,7 @@ int intel_init_ring_buffer(struct drm_device *dev, INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); INIT_LIST_HEAD(&ring->gpu_write_list); + ring->size = 32 * PAGE_SIZE; init_waitqueue_head(&ring->irq_queue); spin_lock_init(&ring->irq_lock); @@ -1268,7 +1269,6 @@ static const struct intel_ring_buffer render_ring = { .name = "render ring", .id = RCS, .mmio_base = RENDER_RING_BASE, - .size = 32 * PAGE_SIZE, .init = init_render_ring, .write_tail = ring_write_tail, .flush = render_ring_flush, @@ -1291,7 +1291,6 @@ static const struct intel_ring_buffer bsd_ring = { .name = "bsd ring", .id = VCS, .mmio_base = BSD_RING_BASE, - .size = 32 * PAGE_SIZE, .init = init_ring_common, .write_tail = ring_write_tail, .flush = bsd_ring_flush, @@ -1369,7 +1368,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .name = "gen6 bsd ring", .id = VCS, .mmio_base = GEN6_BSD_RING_BASE, - .size = 32 * PAGE_SIZE, .init = init_ring_common, .write_tail = gen6_bsd_ring_write_tail, .flush = gen6_ring_flush, @@ -1413,7 +1411,6 @@ static const struct intel_ring_buffer gen6_blt_ring = { .name = "blt ring", .id = BCS, .mmio_base = BLT_RING_BASE, - .size = 32 * PAGE_SIZE, .init = init_ring_common, .write_tail = ring_write_tail, .flush = blt_ring_flush, -- cgit v1.2.3-59-g8ed1b From 59465b5f78db752622350e3e01dbe18a6f19e876 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 22:12:48 +0200 Subject: drm/i915: dynamically set up the render ring functions and params Our hw is simply not well-designed enough that it neatly fits into boxes. Everywhere else we set up vtables and similar things dynamically using switch statements - it's simply much more flexible. This is prep work to rework the pre-gen6 ring irq stuff - it'll add a few more differences. With the current const struct templates, that would be a mess. This leads to some unfortunate duplication with the old dri1 code, but we can reap that again because gen6 isn't actually supported there. But that's for a separate patch. Reviewed-by: Eric Anholt Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 71 +++++++++++++++++++++++---------- 1 file changed, 49 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 87e29076123d..fd8e27ac2071 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1265,26 +1265,6 @@ void intel_ring_advance(struct intel_ring_buffer *ring) ring->write_tail(ring, ring->tail); } -static const struct intel_ring_buffer render_ring = { - .name = "render ring", - .id = RCS, - .mmio_base = RENDER_RING_BASE, - .init = init_render_ring, - .write_tail = ring_write_tail, - .flush = render_ring_flush, - .add_request = render_ring_add_request, - .get_seqno = ring_get_seqno, - .irq_get = render_ring_get_irq, - .irq_put = render_ring_put_irq, - .dispatch_execbuffer = render_ring_dispatch_execbuffer, - .cleanup = render_ring_cleanup, - .sync_to = render_ring_sync_to, - .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID, - MI_SEMAPHORE_SYNC_RV, - MI_SEMAPHORE_SYNC_RB}, - .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC}, -}; - /* ring buffer for bit-stream decoder */ static const struct intel_ring_buffer bsd_ring = { @@ -1432,7 +1412,10 @@ int intel_init_render_ring_buffer(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; - *ring = render_ring; + ring->name = "render ring"; + ring->id = RCS; + ring->mmio_base = RENDER_RING_BASE; + if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; ring->flush = gen6_render_ring_flush; @@ -1440,10 +1423,30 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->irq_put = gen6_ring_put_irq; ring->irq_enable_mask = GT_USER_INTERRUPT; ring->get_seqno = gen6_ring_get_seqno; + ring->sync_to = render_ring_sync_to; + ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; + ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB; + ring->signal_mbox[0] = GEN6_VRSYNC; + ring->signal_mbox[1] = GEN6_BRSYNC; } else if (IS_GEN5(dev)) { ring->add_request = pc_render_add_request; + ring->flush = render_ring_flush; ring->get_seqno = pc_render_get_seqno; + ring->irq_get = render_ring_get_irq; + ring->irq_put = render_ring_put_irq; + } else { + ring->add_request = render_ring_add_request; + ring->flush = render_ring_flush; + ring->get_seqno = ring_get_seqno; + ring->irq_get = render_ring_get_irq; + ring->irq_put = render_ring_put_irq; } + ring->write_tail = ring_write_tail; + ring->dispatch_execbuffer = render_ring_dispatch_execbuffer; + ring->init = init_render_ring; + ring->cleanup = render_ring_cleanup; + if (!I915_NEED_GFX_HWS(dev)) { ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; @@ -1458,16 +1461,40 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; - *ring = render_ring; + ring->name = "render ring"; + ring->id = RCS; + ring->mmio_base = RENDER_RING_BASE; + if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; + ring->flush = gen6_render_ring_flush; ring->irq_get = gen6_ring_get_irq; ring->irq_put = gen6_ring_put_irq; ring->irq_enable_mask = GT_USER_INTERRUPT; + ring->get_seqno = gen6_ring_get_seqno; + ring->sync_to = render_ring_sync_to; + ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; + ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB; + ring->signal_mbox[0] = GEN6_VRSYNC; + ring->signal_mbox[1] = GEN6_BRSYNC; } else if (IS_GEN5(dev)) { ring->add_request = pc_render_add_request; + ring->flush = render_ring_flush; ring->get_seqno = pc_render_get_seqno; + ring->irq_get = render_ring_get_irq; + ring->irq_put = render_ring_put_irq; + } else { + ring->add_request = render_ring_add_request; + ring->flush = render_ring_flush; + ring->get_seqno = ring_get_seqno; + ring->irq_get = render_ring_get_irq; + ring->irq_put = render_ring_put_irq; } + ring->write_tail = ring_write_tail; + ring->dispatch_execbuffer = render_ring_dispatch_execbuffer; + ring->init = init_render_ring; + ring->cleanup = render_ring_cleanup; if (!I915_NEED_GFX_HWS(dev)) ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; -- cgit v1.2.3-59-g8ed1b From 58fa3835874bc3466e84e3bba6057f43acd6eb18 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 22:12:49 +0200 Subject: drm/i915: dynamically set up bsd ring functions and params The same treatment for the bsd ring. Again, this will be split up further by the irq rework. Reviewed-by: Eric Anholt Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 72 ++++++++++++++------------------- 1 file changed, 31 insertions(+), 41 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index fd8e27ac2071..683d1f0fcb33 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1265,22 +1265,6 @@ void intel_ring_advance(struct intel_ring_buffer *ring) ring->write_tail(ring, ring->tail); } -/* ring buffer for bit-stream decoder */ - -static const struct intel_ring_buffer bsd_ring = { - .name = "bsd ring", - .id = VCS, - .mmio_base = BSD_RING_BASE, - .init = init_ring_common, - .write_tail = ring_write_tail, - .flush = bsd_ring_flush, - .add_request = ring_add_request, - .get_seqno = ring_get_seqno, - .irq_get = bsd_ring_get_irq, - .irq_put = bsd_ring_put_irq, - .dispatch_execbuffer = ring_dispatch_execbuffer, -}; - static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, u32 value) @@ -1343,27 +1327,6 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, return 0; } -/* ring buffer for Video Codec for Gen6+ */ -static const struct intel_ring_buffer gen6_bsd_ring = { - .name = "gen6 bsd ring", - .id = VCS, - .mmio_base = GEN6_BSD_RING_BASE, - .init = init_ring_common, - .write_tail = gen6_bsd_ring_write_tail, - .flush = gen6_ring_flush, - .add_request = gen6_add_request, - .get_seqno = gen6_ring_get_seqno, - .irq_enable_mask = GEN6_BSD_USER_INTERRUPT, - .irq_get = gen6_ring_get_irq, - .irq_put = gen6_ring_put_irq, - .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, - .sync_to = gen6_bsd_ring_sync_to, - .semaphore_register = {MI_SEMAPHORE_SYNC_VR, - MI_SEMAPHORE_SYNC_INVALID, - MI_SEMAPHORE_SYNC_VB}, - .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC}, -}; - /* Blitter support (SandyBridge+) */ static int blt_ring_flush(struct intel_ring_buffer *ring, @@ -1531,10 +1494,37 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; - if (IS_GEN6(dev) || IS_GEN7(dev)) - *ring = gen6_bsd_ring; - else - *ring = bsd_ring; + ring->name = "bsd ring"; + ring->id = VCS; + + if (IS_GEN6(dev) || IS_GEN7(dev)) { + ring->mmio_base = GEN6_BSD_RING_BASE; + ring->write_tail = gen6_bsd_ring_write_tail; + ring->flush = gen6_ring_flush; + ring->add_request = gen6_add_request; + ring->get_seqno = gen6_ring_get_seqno; + ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; + ring->irq_get = gen6_ring_get_irq; + ring->irq_put = gen6_ring_put_irq; + ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + ring->sync_to = gen6_bsd_ring_sync_to; + ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR; + ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB; + ring->signal_mbox[0] = GEN6_RVSYNC; + ring->signal_mbox[1] = GEN6_BVSYNC; + } else { + ring->mmio_base = BSD_RING_BASE; + ring->write_tail = ring_write_tail; + ring->flush = bsd_ring_flush; + ring->add_request = ring_add_request; + ring->get_seqno = ring_get_seqno; + ring->irq_get = bsd_ring_get_irq; + ring->irq_put = bsd_ring_put_irq; + ring->dispatch_execbuffer = ring_dispatch_execbuffer; + } + ring->init = init_ring_common; + return intel_init_ring_buffer(dev, ring); } -- cgit v1.2.3-59-g8ed1b From 3535d9dd5a19c2f7b88caf67d650bdaa0750b06c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 22:12:50 +0200 Subject: drm/i915: dynamically set up blt ring functions and parameters Just for consistency. Reviewed-by: Eric Anholt Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 40 ++++++++++++++++----------------- 1 file changed, 19 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 683d1f0fcb33..5b11c53a8ee0 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1350,26 +1350,6 @@ static int blt_ring_flush(struct intel_ring_buffer *ring, return 0; } -static const struct intel_ring_buffer gen6_blt_ring = { - .name = "blt ring", - .id = BCS, - .mmio_base = BLT_RING_BASE, - .init = init_ring_common, - .write_tail = ring_write_tail, - .flush = blt_ring_flush, - .add_request = gen6_add_request, - .get_seqno = gen6_ring_get_seqno, - .irq_get = gen6_ring_get_irq, - .irq_put = gen6_ring_put_irq, - .irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT, - .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, - .sync_to = gen6_blt_ring_sync_to, - .semaphore_register = {MI_SEMAPHORE_SYNC_BR, - MI_SEMAPHORE_SYNC_BV, - MI_SEMAPHORE_SYNC_INVALID}, - .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC}, -}; - int intel_init_render_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -1534,7 +1514,25 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; - *ring = gen6_blt_ring; + ring->name = "blitter ring"; + ring->id = BCS; + + ring->mmio_base = BLT_RING_BASE; + ring->write_tail = ring_write_tail; + ring->flush = blt_ring_flush; + ring->add_request = gen6_add_request; + ring->get_seqno = gen6_ring_get_seqno; + ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; + ring->irq_get = gen6_ring_get_irq; + ring->irq_put = gen6_ring_put_irq; + ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + ring->sync_to = gen6_blt_ring_sync_to; + ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR; + ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV; + ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID; + ring->signal_mbox[0] = GEN6_RBSYNC; + ring->signal_mbox[1] = GEN6_VBSYNC; + ring->init = init_ring_common; return intel_init_ring_buffer(dev, ring); } -- cgit v1.2.3-59-g8ed1b From b4178f8aaf84ae59d9b2a17a4a21aa19499f577a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 22:12:51 +0200 Subject: drm/i915: don't set up rings on gen6+ for non-kms It's not supported, and with the patch to refuse loading on gen6+ without kms enabled, there's also no way we can hit this. Reviewed-by: Eric Anholt Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 5b11c53a8ee0..be405f23dd5e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1409,18 +1409,8 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ring->mmio_base = RENDER_RING_BASE; if (INTEL_INFO(dev)->gen >= 6) { - ring->add_request = gen6_add_request; - ring->flush = gen6_render_ring_flush; - ring->irq_get = gen6_ring_get_irq; - ring->irq_put = gen6_ring_put_irq; - ring->irq_enable_mask = GT_USER_INTERRUPT; - ring->get_seqno = gen6_ring_get_seqno; - ring->sync_to = render_ring_sync_to; - ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; - ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB; - ring->signal_mbox[0] = GEN6_VRSYNC; - ring->signal_mbox[1] = GEN6_BRSYNC; + /* non-kms not supported on gen6+ */ + return -ENODEV; } else if (IS_GEN5(dev)) { ring->add_request = pc_render_add_request; ring->flush = render_ring_flush; -- cgit v1.2.3-59-g8ed1b From 686cb5f9f5dc7db150c9f4e49cc0d45ef952450b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 22:12:52 +0200 Subject: drm/i915: consolidate ring->sync-to functions The waiter is always the ring itself (otherwise we'd have a decent snafu in a callsite), so we can unify this easily. Also give it the usual gen6_ prefix, in case anyone is foolish enough to implement hw semaphores for gen5. Reviewed-by: Eric Anholt Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 60 ++++++--------------------------- 1 file changed, 11 insertions(+), 49 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index be405f23dd5e..bc33f13783b8 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -472,10 +472,9 @@ gen6_add_request(struct intel_ring_buffer *ring, * @seqno - seqno which the waiter will block on */ static int -intel_ring_sync(struct intel_ring_buffer *waiter, - struct intel_ring_buffer *signaller, - int ring, - u32 seqno) +gen6_ring_sync(struct intel_ring_buffer *waiter, + struct intel_ring_buffer *signaller, + u32 seqno) { int ret; u32 dw1 = MI_SEMAPHORE_MBOX | @@ -488,11 +487,15 @@ intel_ring_sync(struct intel_ring_buffer *waiter, */ seqno -= 1; + WARN_ON(signaller->semaphore_register[waiter->id] == + MI_SEMAPHORE_SYNC_INVALID); + ret = intel_ring_begin(waiter, 4); if (ret) return ret; - intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]); + intel_ring_emit(waiter, + dw1 | signaller->semaphore_register[waiter->id]); intel_ring_emit(waiter, seqno); intel_ring_emit(waiter, 0); intel_ring_emit(waiter, MI_NOOP); @@ -501,47 +504,6 @@ intel_ring_sync(struct intel_ring_buffer *waiter, return 0; } -/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */ -int -render_ring_sync_to(struct intel_ring_buffer *waiter, - struct intel_ring_buffer *signaller, - u32 seqno) -{ - WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID); - return intel_ring_sync(waiter, - signaller, - RCS, - seqno); -} - -/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */ -int -gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter, - struct intel_ring_buffer *signaller, - u32 seqno) -{ - WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID); - return intel_ring_sync(waiter, - signaller, - VCS, - seqno); -} - -/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */ -int -gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter, - struct intel_ring_buffer *signaller, - u32 seqno) -{ - WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID); - return intel_ring_sync(waiter, - signaller, - BCS, - seqno); -} - - - #define PIPE_CONTROL_FLUSH(ring__, addr__) \ do { \ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ @@ -1366,7 +1328,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->irq_put = gen6_ring_put_irq; ring->irq_enable_mask = GT_USER_INTERRUPT; ring->get_seqno = gen6_ring_get_seqno; - ring->sync_to = render_ring_sync_to; + ring->sync_to = gen6_ring_sync; ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB; @@ -1477,7 +1439,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ring->irq_get = gen6_ring_get_irq; ring->irq_put = gen6_ring_put_irq; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - ring->sync_to = gen6_bsd_ring_sync_to; + ring->sync_to = gen6_ring_sync; ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR; ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID; ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB; @@ -1516,7 +1478,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) ring->irq_get = gen6_ring_get_irq; ring->irq_put = gen6_ring_put_irq; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - ring->sync_to = gen6_blt_ring_sync_to; + ring->sync_to = gen6_ring_sync; ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR; ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV; ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID; -- cgit v1.2.3-59-g8ed1b From e367031966c3546b213f6699b83669739cb6fb1d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 22:12:53 +0200 Subject: drm/i915: abstract away ring-specific irq_get/put Inspired by Ben Widawsky's patch for gen6+. Now after restructuring how we set up the ring vtables and parameters, we can do this right. This kills the bsd specific get/put_irq functions, they're now the same. Reviewed-by: Eric Anholt Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 77 ++++++++++----------------------- 1 file changed, 24 insertions(+), 53 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index bc33f13783b8..a09e8aa0db8d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -645,7 +645,7 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) } static bool -render_ring_get_irq(struct intel_ring_buffer *ring) +i9xx_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -657,9 +657,9 @@ render_ring_get_irq(struct intel_ring_buffer *ring) if (ring->irq_refcount++ == 0) { if (INTEL_INFO(dev)->gen >= 5) ironlake_enable_irq(dev_priv, - GT_PIPE_NOTIFY | GT_USER_INTERRUPT); + ring->irq_enable_mask); else - i915_enable_irq(dev_priv, I915_USER_INTERRUPT); + i915_enable_irq(dev_priv, ring->irq_enable_mask); } spin_unlock(&ring->irq_lock); @@ -667,7 +667,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring) } static void -render_ring_put_irq(struct intel_ring_buffer *ring) +i9xx_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -676,10 +676,9 @@ render_ring_put_irq(struct intel_ring_buffer *ring) if (--ring->irq_refcount == 0) { if (INTEL_INFO(dev)->gen >= 5) ironlake_disable_irq(dev_priv, - GT_USER_INTERRUPT | - GT_PIPE_NOTIFY); + ring->irq_enable_mask); else - i915_disable_irq(dev_priv, I915_USER_INTERRUPT); + i915_disable_irq(dev_priv, ring->irq_enable_mask); } spin_unlock(&ring->irq_lock); } @@ -795,42 +794,6 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) gen6_gt_force_wake_put(dev_priv); } -static bool -bsd_ring_get_irq(struct intel_ring_buffer *ring) -{ - struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - - if (!dev->irq_enabled) - return false; - - spin_lock(&ring->irq_lock); - if (ring->irq_refcount++ == 0) { - if (IS_G4X(dev)) - i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT); - else - ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT); - } - spin_unlock(&ring->irq_lock); - - return true; -} -static void -bsd_ring_put_irq(struct intel_ring_buffer *ring) -{ - struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - - spin_lock(&ring->irq_lock); - if (--ring->irq_refcount == 0) { - if (IS_G4X(dev)) - i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT); - else - ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT); - } - spin_unlock(&ring->irq_lock); -} - static int ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) { @@ -1338,14 +1301,16 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->add_request = pc_render_add_request; ring->flush = render_ring_flush; ring->get_seqno = pc_render_get_seqno; - ring->irq_get = render_ring_get_irq; - ring->irq_put = render_ring_put_irq; + ring->irq_get = i9xx_ring_get_irq; + ring->irq_put = i9xx_ring_put_irq; + ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; } else { ring->add_request = render_ring_add_request; ring->flush = render_ring_flush; ring->get_seqno = ring_get_seqno; - ring->irq_get = render_ring_get_irq; - ring->irq_put = render_ring_put_irq; + ring->irq_get = i9xx_ring_get_irq; + ring->irq_put = i9xx_ring_put_irq; + ring->irq_enable_mask = I915_USER_INTERRUPT; } ring->write_tail = ring_write_tail; ring->dispatch_execbuffer = render_ring_dispatch_execbuffer; @@ -1377,14 +1342,16 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ring->add_request = pc_render_add_request; ring->flush = render_ring_flush; ring->get_seqno = pc_render_get_seqno; - ring->irq_get = render_ring_get_irq; - ring->irq_put = render_ring_put_irq; + ring->irq_get = i9xx_ring_get_irq; + ring->irq_put = i9xx_ring_put_irq; + ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; } else { ring->add_request = render_ring_add_request; ring->flush = render_ring_flush; ring->get_seqno = ring_get_seqno; - ring->irq_get = render_ring_get_irq; - ring->irq_put = render_ring_put_irq; + ring->irq_get = i9xx_ring_get_irq; + ring->irq_put = i9xx_ring_put_irq; + ring->irq_enable_mask = I915_USER_INTERRUPT; } ring->write_tail = ring_write_tail; ring->dispatch_execbuffer = render_ring_dispatch_execbuffer; @@ -1451,8 +1418,12 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ring->flush = bsd_ring_flush; ring->add_request = ring_add_request; ring->get_seqno = ring_get_seqno; - ring->irq_get = bsd_ring_get_irq; - ring->irq_put = bsd_ring_put_irq; + ring->irq_get = i9xx_ring_get_irq; + ring->irq_put = i9xx_ring_put_irq; + if (IS_GEN5(dev)) + ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; + else + ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; ring->dispatch_execbuffer = ring_dispatch_execbuffer; } ring->init = init_ring_common; -- cgit v1.2.3-59-g8ed1b From e48d86347c602c55159714f6ddcd88969a1b2f21 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 22:12:54 +0200 Subject: drm/i915: split out the gen5 ring irq get/put functions Now that we have sensibly split up, we can nicely get rid of that ugly is_gen5 check. Reviewed-by: Eric Anholt Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 66 ++++++++++++++++++++++----------- 1 file changed, 44 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index a09e8aa0db8d..7e1f2211ea2a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -644,6 +644,35 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) POSTING_READ(IMR); } +static bool +gen5_ring_get_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + if (!dev->irq_enabled) + return false; + + spin_lock(&ring->irq_lock); + if (ring->irq_refcount++ == 0) + ironlake_enable_irq(dev_priv, ring->irq_enable_mask); + spin_unlock(&ring->irq_lock); + + return true; +} + +static void +gen5_ring_put_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + spin_lock(&ring->irq_lock); + if (--ring->irq_refcount == 0) + ironlake_disable_irq(dev_priv, ring->irq_enable_mask); + spin_unlock(&ring->irq_lock); +} + static bool i9xx_ring_get_irq(struct intel_ring_buffer *ring) { @@ -654,13 +683,8 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock(&ring->irq_lock); - if (ring->irq_refcount++ == 0) { - if (INTEL_INFO(dev)->gen >= 5) - ironlake_enable_irq(dev_priv, - ring->irq_enable_mask); - else - i915_enable_irq(dev_priv, ring->irq_enable_mask); - } + if (ring->irq_refcount++ == 0) + i915_enable_irq(dev_priv, ring->irq_enable_mask); spin_unlock(&ring->irq_lock); return true; @@ -673,13 +697,8 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring) drm_i915_private_t *dev_priv = dev->dev_private; spin_lock(&ring->irq_lock); - if (--ring->irq_refcount == 0) { - if (INTEL_INFO(dev)->gen >= 5) - ironlake_disable_irq(dev_priv, - ring->irq_enable_mask); - else - i915_disable_irq(dev_priv, ring->irq_enable_mask); - } + if (--ring->irq_refcount == 0) + i915_disable_irq(dev_priv, ring->irq_enable_mask); spin_unlock(&ring->irq_lock); } @@ -1301,8 +1320,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->add_request = pc_render_add_request; ring->flush = render_ring_flush; ring->get_seqno = pc_render_get_seqno; - ring->irq_get = i9xx_ring_get_irq; - ring->irq_put = i9xx_ring_put_irq; + ring->irq_get = gen5_ring_get_irq; + ring->irq_put = gen5_ring_put_irq; ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; } else { ring->add_request = render_ring_add_request; @@ -1342,8 +1361,8 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ring->add_request = pc_render_add_request; ring->flush = render_ring_flush; ring->get_seqno = pc_render_get_seqno; - ring->irq_get = i9xx_ring_get_irq; - ring->irq_put = i9xx_ring_put_irq; + ring->irq_get = gen5_ring_get_irq; + ring->irq_put = gen5_ring_put_irq; ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; } else { ring->add_request = render_ring_add_request; @@ -1418,12 +1437,15 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ring->flush = bsd_ring_flush; ring->add_request = ring_add_request; ring->get_seqno = ring_get_seqno; - ring->irq_get = i9xx_ring_get_irq; - ring->irq_put = i9xx_ring_put_irq; - if (IS_GEN5(dev)) + if (IS_GEN5(dev)) { ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; - else + ring->irq_get = gen5_ring_get_irq; + ring->irq_put = gen5_ring_put_irq; + } else { ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; + ring->irq_get = i9xx_ring_get_irq; + ring->irq_put = i9xx_ring_put_irq; + } ring->dispatch_execbuffer = ring_dispatch_execbuffer; } ring->init = init_ring_common; -- cgit v1.2.3-59-g8ed1b From 0fd2c201482e62492f2d7dc6c2798cf7f66c9570 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 22:12:55 +0200 Subject: drm/i915: don't enable the gen6 bsd ring tail write enable on gen7 HW engineers have fixed this issue for ivb. Again, a nice cleanup possible thanks to the more flexible ring initialization. Reviewed-by: Eric Anholt Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 7e1f2211ea2a..68e1255452fa 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1415,9 +1415,12 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ring->name = "bsd ring"; ring->id = VCS; + ring->write_tail = ring_write_tail; if (IS_GEN6(dev) || IS_GEN7(dev)) { ring->mmio_base = GEN6_BSD_RING_BASE; - ring->write_tail = gen6_bsd_ring_write_tail; + /* gen6 bsd needs a special wa for tail updates */ + if (IS_GEN6(dev)) + ring->write_tail = gen6_bsd_ring_write_tail; ring->flush = gen6_ring_flush; ring->add_request = gen6_add_request; ring->get_seqno = gen6_ring_get_seqno; @@ -1433,7 +1436,6 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ring->signal_mbox[1] = GEN6_BVSYNC; } else { ring->mmio_base = BSD_RING_BASE; - ring->write_tail = ring_write_tail; ring->flush = bsd_ring_flush; ring->add_request = ring_add_request; ring->get_seqno = ring_get_seqno; -- cgit v1.2.3-59-g8ed1b From fb3256da8db2cb70be7d2c7f058b8676a9a85aaf Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 22:12:56 +0200 Subject: drm/i915: split up ring->dispatch_execbuffer functions Now that we can, we should split them up in a way that makes some sense and banishes the IS_ checks into init code. Reviewed-by: Eric Anholt Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 69 +++++++++++++++++++-------------- 1 file changed, 40 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 68e1255452fa..8cffd0b9aaf3 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -814,7 +814,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) } static int -ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) +i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) { int ret; @@ -832,37 +832,36 @@ ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) } static int -render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, +i830_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 len) { - struct drm_device *dev = ring->dev; int ret; - if (IS_I830(dev) || IS_845G(dev)) { - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER); - intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); - intel_ring_emit(ring, offset + len - 8); - intel_ring_emit(ring, 0); - } else { - ret = intel_ring_begin(ring, 2); - if (ret) - return ret; + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); + intel_ring_emit(ring, offset + len - 8); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); - if (INTEL_INFO(dev)->gen >= 4) { - intel_ring_emit(ring, - MI_BATCH_BUFFER_START | (2 << 6) | - MI_BATCH_NON_SECURE_I965); - intel_ring_emit(ring, offset); - } else { - intel_ring_emit(ring, - MI_BATCH_BUFFER_START | (2 << 6)); - intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); - } - } + return 0; +} + +static int +i915_dispatch_execbuffer(struct intel_ring_buffer *ring, + u32 offset, u32 len) +{ + int ret; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, MI_BATCH_BUFFER_START | (2 << 6)); + intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); intel_ring_advance(ring); return 0; @@ -1332,7 +1331,14 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->irq_enable_mask = I915_USER_INTERRUPT; } ring->write_tail = ring_write_tail; - ring->dispatch_execbuffer = render_ring_dispatch_execbuffer; + if (INTEL_INFO(dev)->gen >= 6) + ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + else if (INTEL_INFO(dev)->gen >= 4) + ring->dispatch_execbuffer = i965_dispatch_execbuffer; + else if (IS_I830(dev) || IS_845G(dev)) + ring->dispatch_execbuffer = i830_dispatch_execbuffer; + else + ring->dispatch_execbuffer = i915_dispatch_execbuffer; ring->init = init_render_ring; ring->cleanup = render_ring_cleanup; @@ -1373,7 +1379,12 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ring->irq_enable_mask = I915_USER_INTERRUPT; } ring->write_tail = ring_write_tail; - ring->dispatch_execbuffer = render_ring_dispatch_execbuffer; + if (INTEL_INFO(dev)->gen >= 4) + ring->dispatch_execbuffer = i965_dispatch_execbuffer; + else if (IS_I830(dev) || IS_845G(dev)) + ring->dispatch_execbuffer = i830_dispatch_execbuffer; + else + ring->dispatch_execbuffer = i915_dispatch_execbuffer; ring->init = init_render_ring; ring->cleanup = render_ring_cleanup; @@ -1448,7 +1459,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ring->irq_get = i9xx_ring_get_irq; ring->irq_put = i9xx_ring_put_irq; } - ring->dispatch_execbuffer = ring_dispatch_execbuffer; + ring->dispatch_execbuffer = i965_dispatch_execbuffer; } ring->init = init_ring_common; -- cgit v1.2.3-59-g8ed1b From 8620a3a908da34e1a0db9ad457136bc96340911f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 22:12:57 +0200 Subject: drm/i915: consolidate ring->add_request a bit They're indentical, so just kill one. Also give the other a prefix to distinguish it from the gen6+ functions - this add_request function is not really generic code. v2: Fixup commit message as noted by Ben Widawsky. Reviewed-by: Eric Anholt Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 29 ++++------------------------- 1 file changed, 4 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 8cffd0b9aaf3..ac05c749104a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -565,27 +565,6 @@ pc_render_add_request(struct intel_ring_buffer *ring, return 0; } -static int -render_ring_add_request(struct intel_ring_buffer *ring, - u32 *result) -{ - u32 seqno = i915_gem_next_request_seqno(ring); - int ret; - - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; - - intel_ring_emit(ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, MI_USER_INTERRUPT); - intel_ring_advance(ring); - - *result = seqno; - return 0; -} - static u32 gen6_ring_get_seqno(struct intel_ring_buffer *ring) { @@ -751,7 +730,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring, } static int -ring_add_request(struct intel_ring_buffer *ring, +i9xx_add_request(struct intel_ring_buffer *ring, u32 *result) { u32 seqno; @@ -1323,7 +1302,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->irq_put = gen5_ring_put_irq; ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; } else { - ring->add_request = render_ring_add_request; + ring->add_request = i9xx_add_request; ring->flush = render_ring_flush; ring->get_seqno = ring_get_seqno; ring->irq_get = i9xx_ring_get_irq; @@ -1371,7 +1350,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ring->irq_put = gen5_ring_put_irq; ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; } else { - ring->add_request = render_ring_add_request; + ring->add_request = i9xx_add_request; ring->flush = render_ring_flush; ring->get_seqno = ring_get_seqno; ring->irq_get = i9xx_ring_get_irq; @@ -1448,7 +1427,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) } else { ring->mmio_base = BSD_RING_BASE; ring->flush = bsd_ring_flush; - ring->add_request = ring_add_request; + ring->add_request = i9xx_add_request; ring->get_seqno = ring_get_seqno; if (IS_GEN5(dev)) { ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; -- cgit v1.2.3-59-g8ed1b From 28f0cbf71f5ed9b080878c04158c754b090a674a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 22:12:58 +0200 Subject: drm/i915: don't set up gem ring functions on gen5 for !kms We already disallow initialition of gem in this case in the corresponding ioctl, so don't bother setting up the gem support ring functions in the legacy dri render ring init. Reviewed-by: Eric Anholt Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index ac05c749104a..6b3ff37e752c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1342,21 +1342,17 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) if (INTEL_INFO(dev)->gen >= 6) { /* non-kms not supported on gen6+ */ return -ENODEV; - } else if (IS_GEN5(dev)) { - ring->add_request = pc_render_add_request; - ring->flush = render_ring_flush; - ring->get_seqno = pc_render_get_seqno; - ring->irq_get = gen5_ring_get_irq; - ring->irq_put = gen5_ring_put_irq; - ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; - } else { - ring->add_request = i9xx_add_request; - ring->flush = render_ring_flush; - ring->get_seqno = ring_get_seqno; - ring->irq_get = i9xx_ring_get_irq; - ring->irq_put = i9xx_ring_put_irq; - ring->irq_enable_mask = I915_USER_INTERRUPT; } + + /* Note: gem is not supported on gen5/ilk without kms (the corresponding + * gem_init ioctl returns with -ENODEV). Hence we do not need to set up + * the special gen5 functions. */ + ring->add_request = i9xx_add_request; + ring->flush = render_ring_flush; + ring->get_seqno = ring_get_seqno; + ring->irq_get = i9xx_ring_get_irq; + ring->irq_put = i9xx_ring_put_irq; + ring->irq_enable_mask = I915_USER_INTERRUPT; ring->write_tail = ring_write_tail; if (INTEL_INFO(dev)->gen >= 4) ring->dispatch_execbuffer = i965_dispatch_execbuffer; -- cgit v1.2.3-59-g8ed1b From f637fde434c9e3687798730c7ddd367e93666013 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 22:12:59 +0200 Subject: drm/i915: inline enable/disable_irq into ring->get/put_irq Now that these are properly refactored this additional indirection doesn't really buy us anything but confusion. Hence inline them. This duplicates the ironlake gt enable/disable code snippet, but we've already separate ilk from gen6+ gt irq in i915_irq.c, so I think this makes more sense. Reviewed-by: Eric Anholt Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 68 +++++++++++++-------------------- 1 file changed, 26 insertions(+), 42 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6b3ff37e752c..6c144574a5b7 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -591,38 +591,6 @@ pc_render_get_seqno(struct intel_ring_buffer *ring) return pc->cpu_page[0]; } -static void -ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) -{ - dev_priv->gt_irq_mask &= ~mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); -} - -static void -ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) -{ - dev_priv->gt_irq_mask |= mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); -} - -static void -i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) -{ - dev_priv->irq_mask &= ~mask; - I915_WRITE(IMR, dev_priv->irq_mask); - POSTING_READ(IMR); -} - -static void -i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) -{ - dev_priv->irq_mask |= mask; - I915_WRITE(IMR, dev_priv->irq_mask); - POSTING_READ(IMR); -} - static bool gen5_ring_get_irq(struct intel_ring_buffer *ring) { @@ -633,8 +601,11 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock(&ring->irq_lock); - if (ring->irq_refcount++ == 0) - ironlake_enable_irq(dev_priv, ring->irq_enable_mask); + if (ring->irq_refcount++ == 0) { + dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); + } spin_unlock(&ring->irq_lock); return true; @@ -647,8 +618,11 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring) drm_i915_private_t *dev_priv = dev->dev_private; spin_lock(&ring->irq_lock); - if (--ring->irq_refcount == 0) - ironlake_disable_irq(dev_priv, ring->irq_enable_mask); + if (--ring->irq_refcount == 0) { + dev_priv->gt_irq_mask |= ring->irq_enable_mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); + } spin_unlock(&ring->irq_lock); } @@ -662,8 +636,11 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock(&ring->irq_lock); - if (ring->irq_refcount++ == 0) - i915_enable_irq(dev_priv, ring->irq_enable_mask); + if (ring->irq_refcount++ == 0) { + dev_priv->irq_mask &= ~ring->irq_enable_mask; + I915_WRITE(IMR, dev_priv->irq_mask); + POSTING_READ(IMR); + } spin_unlock(&ring->irq_lock); return true; @@ -676,8 +653,11 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring) drm_i915_private_t *dev_priv = dev->dev_private; spin_lock(&ring->irq_lock); - if (--ring->irq_refcount == 0) - i915_disable_irq(dev_priv, ring->irq_enable_mask); + if (--ring->irq_refcount == 0) { + dev_priv->irq_mask |= ring->irq_enable_mask; + I915_WRITE(IMR, dev_priv->irq_mask); + POSTING_READ(IMR); + } spin_unlock(&ring->irq_lock); } @@ -769,7 +749,9 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) spin_lock(&ring->irq_lock); if (ring->irq_refcount++ == 0) { I915_WRITE_IMR(ring, ~ring->irq_enable_mask); - ironlake_enable_irq(dev_priv, ring->irq_enable_mask); + dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); } spin_unlock(&ring->irq_lock); @@ -785,7 +767,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) spin_lock(&ring->irq_lock); if (--ring->irq_refcount == 0) { I915_WRITE_IMR(ring, ~0); - ironlake_disable_irq(dev_priv, ring->irq_enable_mask); + dev_priv->gt_irq_mask |= ring->irq_enable_mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); } spin_unlock(&ring->irq_lock); -- cgit v1.2.3-59-g8ed1b From 79985eee842ef146ed6307a29fdc2fa008036421 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Fri, 13 Apr 2012 19:47:53 +0800 Subject: drm/i915/intel_i2c: handle zero-length reads A common method of probing an i2c bus is trying to do a zero-length read. Handle this case by checking the length first waiting for data to be read. This is actually important, since attempting a zero-length read is one of the ways that i2cdetect and i2c_new_probed_device detect whether there is device present on the bus with a given address. Reviewed-by: Chris Wilson Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48269 Signed-off-by: Daniel Kurtz Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_i2c.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index cab879fedf3c..e24916042550 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -217,7 +217,7 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, (len << GMBUS_BYTE_COUNT_SHIFT) | (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); - do { + while (len) { int ret; u32 val, loop = 0; u32 gmbus2; @@ -235,7 +235,7 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, *buf++ = val & 0xff; val >>= 8; } while (--len && ++loop < 4); - } while (len); + } return 0; } -- cgit v1.2.3-59-g8ed1b From 56fa6d6ff76c7700f8dd131bee9ffa6c3c06dcd4 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Fri, 13 Apr 2012 19:47:54 +0800 Subject: drm/i915/intel_i2c: reduce verbosity of some messages Some of these messages can be hit when userspace tries to probe the i2c with nothing connected or if the driver code tries to do the same. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48248 Signed-off-by: Daniel Kurtz Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_i2c.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index e24916042550..e04255edc801 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -383,7 +383,7 @@ gmbus_xfer(struct i2c_adapter *adapter, */ if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10)) { - DRM_INFO("GMBUS [%s] timed out waiting for idle\n", + DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n", adapter->name); ret = -ETIMEDOUT; } @@ -399,7 +399,8 @@ clear_err: */ if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10)) - DRM_INFO("GMBUS [%s] timed out after NAK\n", adapter->name); + DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n", + adapter->name); /* Toggle the Software Clear Interrupt bit. This has the effect * of resetting the GMBUS controller and so clearing the @@ -409,7 +410,7 @@ clear_err: I915_WRITE(GMBUS1 + reg_offset, 0); I915_WRITE(GMBUS0 + reg_offset, 0); - DRM_DEBUG_DRIVER("GMBUS [%s] NAK for addr: %04x %c(%d)\n", + DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n", adapter->name, msgs[i].addr, (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); -- cgit v1.2.3-59-g8ed1b From d1e61e7fc4456c4cb9a33ed182edf40e34ddedea Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 10 Apr 2012 17:00:41 +0100 Subject: drm/i915: Trigger hangcheck if we detect more a repeating missed IRQ On the first instance we just wish to kick the waiters and see if that terminates the wait conditions. If it does not, then we do not want to keep retrying without ever making any forward progress and becoming stuck in a hangcheck loop. Reported-and-tested-by: Lukas Hejtmanek Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48209 Reviewed-by: Ben Widawsky Signed-off-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 63 +++++++++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index febddc2952fb..39663f51e102 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1875,6 +1875,36 @@ static bool kick_ring(struct intel_ring_buffer *ring) return false; } +static bool i915_hangcheck_hung(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + if (dev_priv->hangcheck_count++ > 1) { + DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); + i915_handle_error(dev, true); + + if (!IS_GEN2(dev)) { + /* Is the chip hanging on a WAIT_FOR_EVENT? + * If so we can simply poke the RB_WAIT bit + * and break the hang. This should work on + * all but the second generation chipsets. + */ + if (kick_ring(&dev_priv->ring[RCS])) + return false; + + if (HAS_BSD(dev) && kick_ring(&dev_priv->ring[VCS])) + return false; + + if (HAS_BLT(dev) && kick_ring(&dev_priv->ring[BCS])) + return false; + } + + return true; + } + + return false; +} + /** * This is called when the chip hasn't reported back with completed * batchbuffers in a long time. The first time this is called we simply record @@ -1895,9 +1925,14 @@ void i915_hangcheck_elapsed(unsigned long data) if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) { - dev_priv->hangcheck_count = 0; - if (err) + if (err) { + if (i915_hangcheck_hung(dev)) + return; + goto repeat; + } + + dev_priv->hangcheck_count = 0; return; } @@ -1919,30 +1954,8 @@ void i915_hangcheck_elapsed(unsigned long data) dev_priv->last_acthd_blt == acthd_blt && dev_priv->last_instdone == instdone && dev_priv->last_instdone1 == instdone1) { - if (dev_priv->hangcheck_count++ > 1) { - DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); - i915_handle_error(dev, true); - - if (!IS_GEN2(dev)) { - /* Is the chip hanging on a WAIT_FOR_EVENT? - * If so we can simply poke the RB_WAIT bit - * and break the hang. This should work on - * all but the second generation chipsets. - */ - if (kick_ring(&dev_priv->ring[RCS])) - goto repeat; - - if (HAS_BSD(dev) && - kick_ring(&dev_priv->ring[VCS])) - goto repeat; - - if (HAS_BLT(dev) && - kick_ring(&dev_priv->ring[BCS])) - goto repeat; - } - + if (i915_hangcheck_hung(dev)) return; - } } else { dev_priv->hangcheck_count = 0; -- cgit v1.2.3-59-g8ed1b From c07496fa61f4c5cb2addd1c57f6b22fcaeea2eeb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 13 Apr 2012 15:51:51 +0200 Subject: drm/i915: don't pwrite tiled objects through the gtt ... we will botch up the bit17 swizzling. Furthermore tiled pwrite is a (now) unused slowpath, so no one really cares. This fixes the last swizzling issues I have with i-g-t on my bit17 swizzling i915G. No regression, it's been broken since the dawn of gem, but it's nice for regression tracking when really _all_ i-g-t tests work. Actually this is not true, Chris Wilson noticed while reviewing this patch that the commit commit d9e86c0ee60f323e890484628f351bf50fa9a15d Author: Chris Wilson Date: Wed Nov 10 16:40:20 2010 +0000 drm/i915: Pipelined fencing [infrastructure] contained a functional change that broke things. Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 71934dd0ee43..9415c07b6285 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -876,6 +876,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (obj->gtt_space && obj->cache_level == I915_CACHE_NONE && + obj->tiling_mode == I915_TILING_NONE && obj->map_and_fenceable && obj->base.write_domain != I915_GEM_DOMAIN_CPU) { ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); -- cgit v1.2.3-59-g8ed1b From fc6826d1dcd65f3d1e9a5377678882e4e08f02be Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 15 Apr 2012 11:56:03 +0100 Subject: drm/i915: Refactor the deferred PM_IIR handling into a single function This function, along with the registers and deferred work hander, are all shared with SandyBridge, IvyBridge and their variants. So remove the duplicate code into a single function. Signed-off-by: Chris Wilson Reviewed-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 70 ++++++++++++++++++----------------------- 1 file changed, 31 insertions(+), 39 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 39663f51e102..967b92eaf797 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -451,6 +451,31 @@ static void snb_gt_irq_handler(struct drm_device *dev, } } +static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, + u32 pm_iir) +{ + unsigned long flags; + + /* + * IIR bits should never already be set because IMR should + * prevent an interrupt from being shown in IIR. The warning + * displays a case where we've unsafely cleared + * dev_priv->pm_iir. Although missing an interrupt of the same + * type is not a problem, it displays a problem in the logic. + * + * The mask bit in IMR is cleared by rps_work. + */ + + spin_lock_irqsave(&dev_priv->rps_lock, flags); + WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); + dev_priv->pm_iir |= pm_iir; + I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); + POSTING_READ(GEN6_PMIMR); + spin_unlock_irqrestore(&dev_priv->rps_lock, flags); + + queue_work(dev_priv->wq, &dev_priv->rps_work); +} + static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; @@ -532,16 +557,8 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; - if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { - unsigned long flags; - spin_lock_irqsave(&dev_priv->rps_lock, flags); - WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); - dev_priv->pm_iir |= pm_iir; - I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); - POSTING_READ(GEN6_PMIMR); - spin_unlock_irqrestore(&dev_priv->rps_lock, flags); - queue_work(dev_priv->wq, &dev_priv->rps_work); - } + if (pm_iir & GEN6_PM_DEFERRED_EVENTS) + gen6_queue_rps_work(dev_priv, pm_iir); I915_WRITE(GTIIR, gt_iir); I915_WRITE(GEN6_PMIIR, pm_iir); @@ -655,16 +672,8 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) pch_irq_handler(dev); } - if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { - unsigned long flags; - spin_lock_irqsave(&dev_priv->rps_lock, flags); - WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); - dev_priv->pm_iir |= pm_iir; - I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); - POSTING_READ(GEN6_PMIMR); - spin_unlock_irqrestore(&dev_priv->rps_lock, flags); - queue_work(dev_priv->wq, &dev_priv->rps_work); - } + if (pm_iir & GEN6_PM_DEFERRED_EVENTS) + gen6_queue_rps_work(dev_priv, pm_iir); /* should clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); @@ -764,25 +773,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) i915_handle_rps_change(dev); } - if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) { - /* - * IIR bits should never already be set because IMR should - * prevent an interrupt from being shown in IIR. The warning - * displays a case where we've unsafely cleared - * dev_priv->pm_iir. Although missing an interrupt of the same - * type is not a problem, it displays a problem in the logic. - * - * The mask bit in IMR is cleared by rps_work. - */ - unsigned long flags; - spin_lock_irqsave(&dev_priv->rps_lock, flags); - WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); - dev_priv->pm_iir |= pm_iir; - I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); - POSTING_READ(GEN6_PMIMR); - spin_unlock_irqrestore(&dev_priv->rps_lock, flags); - queue_work(dev_priv->wq, &dev_priv->rps_work); - } + if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) + gen6_queue_rps_work(dev_priv, pm_iir); /* should clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); -- cgit v1.2.3-59-g8ed1b From f681fa235f931bcf41f6c8fa22aadde9d8833eec Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 14 Apr 2012 21:56:08 +0100 Subject: drm/i915: Export the generic, not arch specific, intel_update_watermarks() Rather than export every single architecture specific update_wm, just export the wrapper around the display vtable. Signed-off-by: Chris Wilson Reviewed-by: Jesse Barnes Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 5 ++--- drivers/gpu/drm/i915/intel_drv.h | 2 +- drivers/gpu/drm/i915/intel_sprite.c | 4 ++-- 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 33aaad30c0bc..fdf8c9f5cd6b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -45,7 +45,6 @@ #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) bool intel_pipe_has_type(struct drm_crtc *crtc, int type); -static void intel_update_watermarks(struct drm_device *dev); static void intel_increase_pllclock(struct drm_crtc *crtc); static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); @@ -4820,7 +4819,7 @@ static void ironlake_update_wm(struct drm_device *dev) */ } -void sandybridge_update_wm(struct drm_device *dev) +static void sandybridge_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ @@ -5125,7 +5124,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, * We don't use the sprite, so we can ignore that. And on Crestline we have * to set the non-SR watermarks to 8. */ -static void intel_update_watermarks(struct drm_device *dev) +void intel_update_watermarks(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 79cabf58d877..8748e5e500fc 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -422,7 +422,7 @@ extern void intel_write_eld(struct drm_encoder *encoder, extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe); /* For use by IVB LP watermark workaround in intel_sprite.c */ -extern void sandybridge_update_wm(struct drm_device *dev); +extern void intel_update_watermarks(struct drm_device *dev); extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, uint32_t sprite_width, int pixel_size); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index da525b69f7bf..10dd1b6ec5f3 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -112,13 +112,13 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, */ if (crtc_w != src_w || crtc_h != src_h) { dev_priv->sprite_scaling_enabled = true; - sandybridge_update_wm(dev); + intel_update_watermarks(dev); intel_wait_for_vblank(dev, pipe); sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; } else { dev_priv->sprite_scaling_enabled = false; /* potentially re-enable LP watermarks */ - sandybridge_update_wm(dev); + intel_update_watermarks(dev); } I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); -- cgit v1.2.3-59-g8ed1b From 8aaa81a166d80ac9bf2813984e5b4c2503d0fe08 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 14 Apr 2012 22:14:26 +0100 Subject: drm/i915/sprite: Always enable the scaler on IronLake As I do not see the output update without the scaler enabled on my i3-330m, always enable it. Signed-off-by: Chris Wilson Acked-by: Jesse Barnes Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sprite.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 10dd1b6ec5f3..749d04356306 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -219,7 +219,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(plane); int pipe = intel_plane->pipe, pixel_size; - u32 dvscntr, dvsscale = 0; + u32 dvscntr, dvsscale; dvscntr = I915_READ(DVSCNTR(pipe)); @@ -275,7 +275,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); - if (crtc_w != src_w || crtc_h != src_h) + dvsscale = 0; + if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h) dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); -- cgit v1.2.3-59-g8ed1b From 17038de5f16569a25343cf68668f3b657eafb00e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 16 Apr 2012 22:43:42 +0100 Subject: drm/i915/dp: Flush any outstanding work to turn the VDD off As we may kick off a delayed workqueue task to switch of the VDD lines, we need to complete that task prior to turning off the panel (which itself depends upon VDD being off). v2: Don't cancel the outstanding work as this may trigger a deadlock Signed-off-by: Chris Wilson Cc: Keith Packard Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6346b29ee934..38d097db696f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1128,6 +1128,7 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp) DRM_DEBUG_KMS("Turn eDP power off\n"); WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n"); + ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */ pp = ironlake_get_pp_control(dev_priv); pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); -- cgit v1.2.3-59-g8ed1b From de4a8bd16205283e9cb746e8fbfa7f9735c039b5 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 20:42:38 +0200 Subject: drm/i915: implement a media hang w/a Contrary to the other clock gating w/a in GEN6_UCGCTL1, this one is actually documented in Bspec, vol1g "GT Interface Registers [SNB]", Section 1.5.1 "UCGCTL1 - Unit Level Clock Gating Control 1". Supposedly this can prevent hangs on the media ring. Reviewed-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_display.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3f7fc4629363..1124e4f594f5 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3893,6 +3893,7 @@ #define GEN6_UCGCTL1 0x9400 # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) +# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) #define GEN6_UCGCTL2 0x9404 # define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2d0dc27323c1..813cc3cda059 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8880,7 +8880,8 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | - GEN6_BLBUNIT_CLOCK_GATE_DISABLE); + GEN6_BLBUNIT_CLOCK_GATE_DISABLE | + GEN6_CSUNIT_CLOCK_GATE_DISABLE); /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock * gating disable must be set. Failure to set it results in -- cgit v1.2.3-59-g8ed1b From be901a5a1bdb13c3390110d4b9780c03018d96a0 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 20:42:39 +0200 Subject: drm/i915: set w/a bit for snb pagefaults Bspec says that we need to set this: vol1c.3 "Blitter Command Streamer", Section 1.1.2.1 "GAB_CTL_REG - GAB Unit Control Register". We don't really rely on pagefaults, but who knows what this all affects. Reviewed-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 7 ++++++- drivers/gpu/drm/i915/i915_reg.h | 3 +++ 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ac8bc1df7c8f..92acc5f8e334 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3669,7 +3669,12 @@ void i915_gem_init_ppgtt(struct drm_device *dev) pd_offset <<= 16; if (INTEL_INFO(dev)->gen == 6) { - uint32_t ecochk = I915_READ(GAM_ECOCHK); + uint32_t ecochk, gab_ctl; + + gab_ctl = I915_READ(GAB_CTL); + I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); + + ecochk = I915_READ(GAM_ECOCHK); I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE)); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1124e4f594f5..d875fb19f62d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -127,6 +127,9 @@ #define ECOCHK_PPGTT_CACHE64B (0x3<<3) #define ECOCHK_PPGTT_CACHE4B (0x0<<3) +#define GAB_CTL 0x24000 +#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8) + /* VGA stuff */ #define VGA_ST01_MDA 0x3ba -- cgit v1.2.3-59-g8ed1b From 48ecfa1090b65390b1cfa4c693ece6b171a407e4 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 20:42:40 +0200 Subject: drm/i915: properly set ppgtt cacheability on snb For some reason snb has 2 fields to set ppgtt cacheability. This one here does not exist on gen7. This might explain why ppgtt wasn't a win on snb like on ivb - not enough pte caching. v2: Fixup rebase fail. Reviewed-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 5 ++++- drivers/gpu/drm/i915/i915_reg.h | 4 ++++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 92acc5f8e334..aa44ff240147 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3669,7 +3669,10 @@ void i915_gem_init_ppgtt(struct drm_device *dev) pd_offset <<= 16; if (INTEL_INFO(dev)->gen == 6) { - uint32_t ecochk, gab_ctl; + uint32_t ecochk, gab_ctl, ecobits; + + ecobits = I915_READ(GAC_ECO_BITS); + I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); gab_ctl = I915_READ(GAB_CTL); I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d875fb19f62d..a9030f852cf9 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -127,6 +127,10 @@ #define ECOCHK_PPGTT_CACHE64B (0x3<<3) #define ECOCHK_PPGTT_CACHE4B (0x0<<3) +#define GAC_ECO_BITS 0x14090 +#define ECOBITS_PPGTT_CACHE64B (3<<8) +#define ECOBITS_PPGTT_CACHE4B (0<<8) + #define GAB_CTL 0x24000 #define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8) -- cgit v1.2.3-59-g8ed1b From bf97b276ca04cee9ab65ffd378fa8e6aedd71ff6 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 20:42:41 +0200 Subject: drm/i915: implement w/a for incorrect guarband clipping According to Bsepc, this should be set by default, but isn't. See vo1c.4 "Render Engine Command Streamer", Section 1.1.14.3 "3D_CHICKEN3" Bspec also says that we always need to set all mask bits. v2: Add comment about the mask bits wtf. Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_display.c | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a9030f852cf9..6d9205436121 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -497,6 +497,7 @@ */ # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) #define _3D_CHICKEN3 0x02090 +#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5) #define MI_MODE 0x0209c # define VS_TIMER_DISPATCH (1 << 6) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 813cc3cda059..1a6bb6101491 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8897,6 +8897,10 @@ static void gen6_init_clock_gating(struct drm_device *dev) GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | GEN6_RCCUNIT_CLOCK_GATE_DISABLE); + /* Bspec says we need to always set all mask bits. */ + I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) | + _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL); + /* * According to the spec the following bits should be * set in order to enable memory self-refresh and fbc: -- cgit v1.2.3-59-g8ed1b From 009be664ecc77d58d3c27fb22347b969745a329a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Apr 2012 20:42:42 +0200 Subject: drm/i915: set stc evict disable lra evict w/a Our workaround list kindly lists that this new default value needs to be updated in Bspec. Naturally, this did not happen. Acked-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_display.c | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6d9205436121..02124a51edad 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -639,6 +639,7 @@ #define CM0_MASK_SHIFT 16 #define CM0_IZ_OPT_DISABLE (1<<6) #define CM0_ZR_OPT_DISABLE (1<<5) +#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) #define CM0_DEPTH_EVICT_DISABLE (1<<4) #define CM0_COLOR_EVICT_DISABLE (1<<3) #define CM0_DEPTH_WRITE_DISABLE (1<<1) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1a6bb6101491..7506a72ee882 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8878,6 +8878,10 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); + /* clear masked bit */ + I915_WRITE(CACHE_MODE_0, + CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT); + I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | GEN6_BLBUNIT_CLOCK_GATE_DISABLE | -- cgit v1.2.3-59-g8ed1b From dc04a61a0503613e17bc1405538fec52e74d4b43 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Fri, 13 Apr 2012 17:08:37 -0300 Subject: drm/i915: add definition of LPT FDI port width registers v2: change bits names to align better with other bits style Reviewed-by: Rodrigo Vivi Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 02124a51edad..bc1a5c60822e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3657,6 +3657,9 @@ #define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8) #define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) #define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) +/* LPT */ +#define FDI_PORT_WIDTH_2X_LPT (1<<19) +#define FDI_PORT_WIDTH_1X_LPT (0<<19) #define _FDI_RXA_MISC 0xf0010 #define _FDI_RXB_MISC 0xf1010 -- cgit v1.2.3-59-g8ed1b From ef4d084fae9d4719bc52f97e15e41e1602e3bc6e Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Fri, 13 Apr 2012 17:08:38 -0300 Subject: drm/i915: add WRPLL divider programming bits Those are used to program the WRPLL dividers correctly for each gives frequency. Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index bc1a5c60822e..0668815d05d7 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4191,6 +4191,10 @@ #define WRPLL_PLL_SELECT_SSC (0x01<<28) #define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) #define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) +/* WRPLL divider programming */ +#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) +#define WRPLL_DIVIDER_POST(x) ((x)<<8) +#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) /* Port clock selection */ #define PORT_CLK_SEL_A 0x46100 -- cgit v1.2.3-59-g8ed1b From 246bdbeb0f0fb14c4c085b6ed7edbab11afccd33 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Fri, 13 Apr 2012 17:08:44 -0300 Subject: drm/i915: share forcewaking code between IVB and HSW Reviewed-by: Rodrigo Vivi Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 7506a72ee882..0ef44720af97 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9312,7 +9312,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.force_wake_put = __gen6_gt_force_wake_put; /* IVB configs may use multi-threaded forcewake */ - if (IS_IVYBRIDGE(dev)) { + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { u32 ecobus; /* A small trick here - if the bios hasn't configured MT forcewake, -- cgit v1.2.3-59-g8ed1b From c51ed787f6c49de7c2180c0f78e1d0e1360c7e86 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Fri, 13 Apr 2012 17:08:45 -0300 Subject: drm/i915: haswell has 3 pipes as well They work differently, but the count is the same. Reviewed-by: Rodrigo Vivi Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 333b7468c8ec..a813f652fa1f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2115,7 +2115,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->error_lock); spin_lock_init(&dev_priv->rps_lock); - if (IS_IVYBRIDGE(dev)) + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) dev_priv->num_pipe = 3; else if (IS_MOBILE(dev) || !IS_GEN2(dev)) dev_priv->num_pipe = 2; -- cgit v1.2.3-59-g8ed1b From 0cd83aa9a6aba8fec7d94d4fd5302172e14cebf7 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Fri, 13 Apr 2012 17:08:48 -0300 Subject: drm/i915: share IVB cursor routine with Haswell Reviewed-by: Rodrigo Vivi Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0ef44720af97..c67eb7784aa8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6770,7 +6770,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, if (!visible && !intel_crtc->cursor_visible) return; - if (IS_IVYBRIDGE(dev)) { + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { I915_WRITE(CURPOS_IVB(pipe), pos); ivb_update_cursor(crtc, base); } else { -- cgit v1.2.3-59-g8ed1b From 83de97c885b633ab6d12346a406911fadeb85f8c Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Fri, 13 Apr 2012 17:08:54 -0300 Subject: drm/i915: disable rc6 on haswell for now This needs proper enablement to avoid machine hangs, so let's just avoid it for now. Reviewed-by: Rodrigo Vivi Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c67eb7784aa8..02e9932c3774 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8585,6 +8585,10 @@ int intel_enable_rc6(const struct drm_device *dev) if (INTEL_INFO(dev)->gen == 5) return 0; + /* Sorry Haswell, no RC6 for you for now. */ + if (IS_HASWELL(dev)) + return 0; + /* * Disable rc6 on Sandybridge */ -- cgit v1.2.3-59-g8ed1b From 446f254566ea8911c9e19c7bc8a162fc0e53cf31 Mon Sep 17 00:00:00 2001 From: Armin Reese Date: Fri, 30 Mar 2012 16:20:16 -0700 Subject: drm/i915: Mask reserved bits in display/sprite address registers The purpose of this patch is to avoid zeroing the lower 12 reserved bits of surface base address registers (framebuffer & sprite). There are bits in that range that may occasionally be set by BIOS or by other components. Signed-off-by: Armin Reese Reviewed-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 3 ++- drivers/gpu/drm/i915/i915_reg.h | 7 +++++++ drivers/gpu/drm/i915/intel_display.c | 4 ++-- drivers/gpu/drm/i915/intel_sprite.c | 8 ++++---- 4 files changed, 15 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 967b92eaf797..ab023ca73b45 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1368,7 +1368,8 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) obj = work->pending_flip_obj; if (INTEL_INFO(dev)->gen >= 4) { int dspsurf = DSPSURF(intel_crtc->plane); - stall_detected = I915_READ(dspsurf) == obj->gtt_offset; + stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == + obj->gtt_offset; } else { int dspaddr = DSPADDR(intel_crtc->plane); stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0668815d05d7..d093dba8224b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2869,6 +2869,13 @@ #define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) #define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) +/* Display/Sprite base address macros */ +#define DISP_BASEADDR_MASK (0xfffff000) +#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK) +#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK) +#define I915_MODIFY_DISPBASE(reg, gfx_addr) \ + (I915_WRITE(reg, gfx_addr | I915_LO_DISPBASE(I915_READ(reg)))) + /* VBIOS flags */ #define SWF00 0x71410 #define SWF01 0x71414 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 02e9932c3774..eb7ebf49f97e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2236,7 +2236,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, Start, Offset, x, y, fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); if (INTEL_INFO(dev)->gen >= 4) { - I915_WRITE(DSPSURF(plane), Start); + I915_MODIFY_DISPBASE(DSPSURF(plane), Start); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPADDR(plane), Offset); } else @@ -2316,7 +2316,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc, DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", Start, Offset, x, y, fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); - I915_WRITE(DSPSURF(plane), Start); + I915_MODIFY_DISPBASE(DSPSURF(plane), Start); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPADDR(plane), Offset); POSTING_READ(reg); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 987800a0234f..fbf03b996587 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -133,7 +133,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); I915_WRITE(SPRSCALE(pipe), sprscale); I915_WRITE(SPRCTL(pipe), sprctl); - I915_WRITE(SPRSURF(pipe), obj->gtt_offset); + I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset); POSTING_READ(SPRSURF(pipe)); } @@ -149,7 +149,7 @@ ivb_disable_plane(struct drm_plane *plane) /* Can't leave the scaler enabled... */ I915_WRITE(SPRSCALE(pipe), 0); /* Activate double buffered register update */ - I915_WRITE(SPRSURF(pipe), 0); + I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); POSTING_READ(SPRSURF(pipe)); } @@ -291,7 +291,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); I915_WRITE(DVSSCALE(pipe), dvsscale); I915_WRITE(DVSCNTR(pipe), dvscntr); - I915_WRITE(DVSSURF(pipe), obj->gtt_offset); + I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset); POSTING_READ(DVSSURF(pipe)); } @@ -307,7 +307,7 @@ ilk_disable_plane(struct drm_plane *plane) /* Disable the scaler */ I915_WRITE(DVSSCALE(pipe), 0); /* Flush double buffered register updates */ - I915_WRITE(DVSSURF(pipe), 0); + I915_MODIFY_DISPBASE(DVSSURF(pipe), 0); POSTING_READ(DVSSURF(pipe)); } -- cgit v1.2.3-59-g8ed1b From df0323c42ac35f81b49e25fe04e8e52b9c6f6467 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 17 Apr 2012 15:06:33 -0700 Subject: drm/i915: IBX+ doesn't have separate vsync/hsync controls on the VGA DAC When the PCH split occurred, hw dropped support for separate hsync and vsync disable in the VGA DAC. So add a PCH specific DPMS function that just uses the port enable bit for controlling DPMS states. Before this fix, when anything other than a full DPMS off occurred, the VGA port would be left enabled and scanning out while all the other heads would turn off as expected. v2: duplicate encoder helper vtable into pch and gmch versions (Daniel) Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48491 Reviewed-by: Chris Wilson Signed-off-by: Jesse Barnes [danvet: s/intel_crt_dpms/gmch_crt_dpms as suggested by Chris.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_crt.c | 54 ++++++++++++++++++++++++++++++++-------- 1 file changed, 43 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 70b0f1abf149..0976137ab79a 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -55,18 +55,36 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector) struct intel_crt, base); } -static void intel_crt_dpms(struct drm_encoder *encoder, int mode) +static void pch_crt_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 temp, reg; + u32 temp; - if (HAS_PCH_SPLIT(dev)) - reg = PCH_ADPA; - else - reg = ADPA; + temp = I915_READ(PCH_ADPA); + temp &= ~ADPA_DAC_ENABLE; + + switch (mode) { + case DRM_MODE_DPMS_ON: + temp |= ADPA_DAC_ENABLE; + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + /* Just leave port enable cleared */ + break; + } + + I915_WRITE(PCH_ADPA, temp); +} - temp = I915_READ(reg); +static void gmch_crt_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 temp; + + temp = I915_READ(ADPA); temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); temp &= ~ADPA_DAC_ENABLE; @@ -85,7 +103,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) break; } - I915_WRITE(reg, temp); + I915_WRITE(ADPA, temp); } static int intel_crt_mode_valid(struct drm_connector *connector, @@ -516,12 +534,20 @@ static void intel_crt_reset(struct drm_connector *connector) * Routines for controlling stuff on the analog port */ -static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = { - .dpms = intel_crt_dpms, +static const struct drm_encoder_helper_funcs pch_encoder_funcs = { + .mode_fixup = intel_crt_mode_fixup, + .prepare = intel_encoder_prepare, + .commit = intel_encoder_commit, + .mode_set = intel_crt_mode_set, + .dpms = pch_crt_dpms, +}; + +static const struct drm_encoder_helper_funcs gmch_encoder_funcs = { .mode_fixup = intel_crt_mode_fixup, .prepare = intel_encoder_prepare, .commit = intel_encoder_commit, .mode_set = intel_crt_mode_set, + .dpms = gmch_crt_dpms, }; static const struct drm_connector_funcs intel_crt_connector_funcs = { @@ -567,6 +593,7 @@ void intel_crt_init(struct drm_device *dev) struct intel_crt *crt; struct intel_connector *intel_connector; struct drm_i915_private *dev_priv = dev->dev_private; + const struct drm_encoder_helper_funcs *encoder_helper_funcs; /* Skip machines without VGA that falsely report hotplug events */ if (dmi_check_system(intel_no_crt)) @@ -602,7 +629,12 @@ void intel_crt_init(struct drm_device *dev) connector->interlace_allowed = 1; connector->doublescan_allowed = 0; - drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs); + if (HAS_PCH_SPLIT(dev)) + encoder_helper_funcs = &pch_encoder_funcs; + else + encoder_helper_funcs = &gmch_encoder_funcs; + + drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs); drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); drm_sysfs_connector_add(connector); -- cgit v1.2.3-59-g8ed1b From c43b5634037ff00c83cde4ab7fc5e46831c846ef Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 16 Apr 2012 14:07:40 -0700 Subject: drm/i915: [sparse] trivial sparse fixes This should contain all the changes which require no thought to make sparse happy. Signed-off-by: Ben Widawsky Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_ioc32.c | 5 ++++- drivers/gpu/drm/i915/i915_trace_points.c | 2 ++ drivers/gpu/drm/i915/intel_acpi.c | 1 + drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_fb.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 4 ++-- drivers/gpu/drm/i915/intel_sdvo.c | 2 +- 9 files changed, 15 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 97c963082d4d..35462df7cefd 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1832,7 +1832,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) return 0; } -int i915_forcewake_release(struct inode *inode, struct file *file) +static int i915_forcewake_release(struct inode *inode, struct file *file) { struct drm_device *dev = inode->i_private; struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 422f424deb4c..303cee71b490 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1128,8 +1128,10 @@ extern void i915_driver_preclose(struct drm_device *dev, extern void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv); extern int i915_driver_device_is_agp(struct drm_device * dev); +#ifdef CONFIG_COMPAT extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +#endif extern int i915_emit_box(struct drm_device *dev, struct drm_clip_rect *box, int DR1, int DR4); diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c index 13b028994b2b..0e72abb9f701 100644 --- a/drivers/gpu/drm/i915/i915_ioc32.c +++ b/drivers/gpu/drm/i915/i915_ioc32.c @@ -34,6 +34,7 @@ #include "drmP.h" #include "drm.h" #include "i915_drm.h" +#include "i915_drv.h" typedef struct _drm_i915_batchbuffer32 { int start; /* agp offset */ @@ -181,7 +182,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd, (unsigned long)request); } -drm_ioctl_compat_t *i915_compat_ioctls[] = { +static drm_ioctl_compat_t *i915_compat_ioctls[] = { [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer, [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer, [DRM_I915_GETPARAM] = compat_i915_getparam, @@ -189,6 +190,7 @@ drm_ioctl_compat_t *i915_compat_ioctls[] = { [DRM_I915_ALLOC] = compat_i915_alloc }; +#ifdef CONFIG_COMPAT /** * Called whenever a 32-bit process running under a 64-bit kernel * performs an ioctl on /dev/dri/card. @@ -217,3 +219,4 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return ret; } +#endif diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c index ead876eb6ea0..f1df2bd4ecf4 100644 --- a/drivers/gpu/drm/i915/i915_trace_points.c +++ b/drivers/gpu/drm/i915/i915_trace_points.c @@ -7,5 +7,7 @@ #include "i915_drv.h" +#ifndef __CHECKER__ #define CREATE_TRACE_POINTS #include "i915_trace.h" +#endif diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index f152b2a7fc54..f413899475e9 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -9,6 +9,7 @@ #include #include "drmP.h" +#include "i915_drv.h" #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index eb7ebf49f97e..78179e03a901 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9503,7 +9503,7 @@ struct intel_quirk { void (*hook)(struct drm_device *dev); }; -struct intel_quirk intel_quirks[] = { +static struct intel_quirk intel_quirks[] = { /* HP Mini needs pipe A force quirk (LP: #322104) */ { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 19ecd78b8a2c..71ef2896be96 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -94,7 +94,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, mutex_lock(&dev->struct_mutex); /* Flush everything out, we'll be doing GTT only from now on */ - ret = intel_pin_and_fence_fb_obj(dev, obj, false); + ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 9d4e5f06edc3..492812db537b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -891,8 +891,8 @@ err: return ret; } -int intel_init_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring) +static int intel_init_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring) { struct drm_i915_gem_object *obj; int ret; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 6898145b44ce..aa1a0d5f0a5e 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1258,7 +1258,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector) dev_priv->crt_ddc_pin)); } -enum drm_connector_status +static enum drm_connector_status intel_sdvo_tmds_sink_detect(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); -- cgit v1.2.3-59-g8ed1b From 3bf3f452362841404fe6d4589883f9471842ef8b Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 16 Apr 2012 14:07:41 -0700 Subject: drm/i915: [sparse] don't use variable size arrays Sparse doesn't like: "error: bad constant expression" Signed-off-by: Ben Widawsky [danvet: apply s/drm_malloc_ab/kcalloc bikeshed. If it's small enough for the stack, it's small enough for kmalloc.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sdvo.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index aa1a0d5f0a5e..c330efd59a0e 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -443,9 +443,17 @@ static const char *cmd_status_names[] = { static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len) { - u8 buf[args_len*2 + 2], status; - struct i2c_msg msgs[args_len + 3]; - int i, ret; + u8 *buf, status; + struct i2c_msg *msgs; + int i, ret = true; + + buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL); + if (!buf) + return false; + + msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL); + if (!msgs) + return false; intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); @@ -479,15 +487,19 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3); if (ret < 0) { DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); - return false; + ret = false; + goto out; } if (ret != i+3) { /* failure in I2C transfer */ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3); - return false; + ret = false; } - return true; +out: + kfree(msgs); + kfree(buf); + return ret; } static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, -- cgit v1.2.3-59-g8ed1b From 7b09638f45379fd1f8cbcb0a95ea2b11f0c8b850 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 14 Apr 2012 09:55:51 +0100 Subject: drm/i915: Always flush tiling changes before accessing through the GTT As we defer updating the fence register from set-tiling to the point of use, we need to declare every access through the GTT as either fenced or unfenced. This patches fixes an old bug in the execbuffer relocation processing which could conceivably be hit by a pathological userspace. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2a24d0cd9b46..1a0d54f278c4 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -381,7 +381,11 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, uint32_t __iomem *reloc_entry; void __iomem *reloc_page; - ret = i915_gem_object_set_to_gtt_domain(obj, 1); + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ret; + + ret = i915_gem_object_put_fence(obj); if (ret) return ret; -- cgit v1.2.3-59-g8ed1b From 65f5687603ea6ede1cb01b3d6c16a8c1fac88541 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Apr 2012 16:38:12 +0100 Subject: drm/i915: Replace open coded MI_BATCH_GTT The (2<<6) virtual memory space selector harks back to gen3 and is mandatory given our use of GTT space for batchbuffers. On gen4+, use of the GTT became mandatory and bit6 marked reserved. However the code must now explicitly set (1<<7), which conveniently is also (2<<6). To clarify the meaning for future readers, replace the open coded (2<<6) with MI_BATCH_GTT. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_ringbuffer.c | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d093dba8224b..0d3b97f01690 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -231,6 +231,7 @@ #define MI_BATCH_NON_SECURE (1) #define MI_BATCH_NON_SECURE_I965 (1<<8) #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) +#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ #define MI_SEMAPHORE_GLOBAL_GTT (1<<22) #define MI_SEMAPHORE_UPDATE (1<<21) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 492812db537b..4ae651bb1c97 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -786,7 +786,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) return ret; intel_ring_emit(ring, - MI_BATCH_BUFFER_START | (2 << 6) | + MI_BATCH_BUFFER_START | + MI_BATCH_GTT | MI_BATCH_NON_SECURE_I965); intel_ring_emit(ring, offset); intel_ring_advance(ring); @@ -823,7 +824,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring, if (ret) return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER_START | (2 << 6)); + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); intel_ring_advance(ring); -- cgit v1.2.3-59-g8ed1b From a1e969e0332de7a430e62822cee8f2ec8d83cd7c Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Sat, 14 Apr 2012 18:41:32 -0700 Subject: drm/i915: [GEN7] Use HW scheduler for fixed function shaders This originally started as a patch from Bernard as a way of simply setting the VS scheduler. After submitting the RFC patch, we decided to also modify the DS scheduler. To be most explicit, I've made the patch explicitly set all scheduler modes, and included the defines for other modes (in case someone feels frisky later). The rest of the story gets a bit weird. The first version of the patch showed an almost unbelievable performance improvement. Since rebasing my branch it appears the performance improvement has gone, unfortunately. But setting these bits seem to be the right thing to do given that the docs describe corruption that can occur with the default settings. In summary, I am seeing no more perf improvements (or regressions) in my limited testing, but we believe this should be set to prevent rendering corruption, therefore cc stable. v1: Clear bit 4 also (Ken + Eugeni) Do a full clear + set of the bits we want (Me). Cc: Bernard Kilarski Cc: stable Reviewed-by (RFC): Kenneth Graunke Signed-off-by: Ben Widawsky Reviewed-by: Eugeni Dodonov Reviewed-by: Kenneth Graunke Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 15 +++++++++++++++ drivers/gpu/drm/i915/intel_display.c | 14 ++++++++++++++ 2 files changed, 29 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0d3b97f01690..5ac9837e49a5 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -692,6 +692,21 @@ #define GEN6_BSD_RNCID 0x12198 +#define GEN7_FF_THREAD_MODE 0x20a0 +#define GEN7_FF_SCHED_MASK 0x0077070 +#define GEN7_FF_TS_SCHED_HS1 (0x5<<16) +#define GEN7_FF_TS_SCHED_HS0 (0x3<<16) +#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16) +#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */ +#define GEN7_FF_VS_SCHED_HS1 (0x5<<12) +#define GEN7_FF_VS_SCHED_HS0 (0x3<<12) +#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */ +#define GEN7_FF_VS_SCHED_HW (0x0<<12) +#define GEN7_FF_DS_SCHED_HS1 (0x5<<4) +#define GEN7_FF_DS_SCHED_HS0 (0x3<<4) +#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */ +#define GEN7_FF_DS_SCHED_HW (0x0<<4) + /* * Framebuffer compression (915+ only) */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 78179e03a901..96fc4679d438 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8937,6 +8937,18 @@ static void gen6_init_clock_gating(struct drm_device *dev) } } +static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) +{ + uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); + + reg &= ~GEN7_FF_SCHED_MASK; + reg |= GEN7_FF_TS_SCHED_HW; + reg |= GEN7_FF_VS_SCHED_HW; + reg |= GEN7_FF_DS_SCHED_HW; + + I915_WRITE(GEN7_FF_THREAD_MODE, reg); +} + static void ivybridge_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -8981,6 +8993,8 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) DISPPLANE_TRICKLE_FEED_DISABLE); intel_flush_display_plane(dev_priv, pipe); } + + gen7_setup_fixed_func_scheduler(dev_priv); } static void valleyview_init_clock_gating(struct drm_device *dev) -- cgit v1.2.3-59-g8ed1b From 83d4092b0381e5dd6f312b2ec57121dcf0fcbade Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Apr 2012 19:35:53 +0100 Subject: drm/i915: Unpin the flip target if we fail to queue the flip Signed-off-by: Chris Wilson Cc: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 50 +++++++++++++++++++++++++----------- 1 file changed, 35 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 96fc4679d438..16930c5cc249 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7658,14 +7658,14 @@ static int intel_gen2_queue_flip(struct drm_device *dev, ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); if (ret) - goto out; + goto err; /* Offset into the new buffer for cases of shared fbs between CRTCs */ offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; ret = BEGIN_LP_RING(6); if (ret) - goto out; + goto err_unpin; /* Can't queue multiple flips, so wait for the previous * one to finish before executing the next. @@ -7682,7 +7682,11 @@ static int intel_gen2_queue_flip(struct drm_device *dev, OUT_RING(obj->gtt_offset + offset); OUT_RING(0); /* aux display base address, unused */ ADVANCE_LP_RING(); -out: + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: return ret; } @@ -7699,14 +7703,14 @@ static int intel_gen3_queue_flip(struct drm_device *dev, ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); if (ret) - goto out; + goto err; /* Offset into the new buffer for cases of shared fbs between CRTCs */ offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; ret = BEGIN_LP_RING(6); if (ret) - goto out; + goto err_unpin; if (intel_crtc->plane) flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; @@ -7721,7 +7725,11 @@ static int intel_gen3_queue_flip(struct drm_device *dev, OUT_RING(MI_NOOP); ADVANCE_LP_RING(); -out: + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: return ret; } @@ -7737,11 +7745,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev, ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); if (ret) - goto out; + goto err; ret = BEGIN_LP_RING(4); if (ret) - goto out; + goto err_unpin; /* i965+ uses the linear or tiled offsets from the * Display Registers (which do not change across a page-flip) @@ -7760,7 +7768,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev, pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; OUT_RING(pf | pipesrc); ADVANCE_LP_RING(); -out: + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: return ret; } @@ -7776,11 +7788,11 @@ static int intel_gen6_queue_flip(struct drm_device *dev, ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); if (ret) - goto out; + goto err; ret = BEGIN_LP_RING(4); if (ret) - goto out; + goto err_unpin; OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); @@ -7791,7 +7803,11 @@ static int intel_gen6_queue_flip(struct drm_device *dev, pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; OUT_RING(pf | pipesrc); ADVANCE_LP_RING(); -out: + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: return ret; } @@ -7813,18 +7829,22 @@ static int intel_gen7_queue_flip(struct drm_device *dev, ret = intel_pin_and_fence_fb_obj(dev, obj, ring); if (ret) - goto out; + goto err; ret = intel_ring_begin(ring, 4); if (ret) - goto out; + goto err_unpin; intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); intel_ring_emit(ring, (obj->gtt_offset)); intel_ring_emit(ring, (MI_NOOP)); intel_ring_advance(ring); -out: + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: return ret; } -- cgit v1.2.3-59-g8ed1b From 6b8e6ed02a5b06435a6b1c7ddff08c11f3e2d5d1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Apr 2012 15:08:19 +0100 Subject: drm/i915: intel_update_fbc() requires struct_mutex, so no longer atomic As we need to manipulate our device structure and allocate queue a task, it is no longer a simple atomic operation and cannot be performed along the atomic modeset paths. Instead make sure that we disable FBC (which must be therefore kept as a set of simple register writes) when performing the atomic modeset and leave the heavy-weight intel_update_fbc() for the normal modeset. Signed-off-by: Chris Wilson Reviewed-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 16930c5cc249..a1a808047f12 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2331,16 +2331,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - ret = dev_priv->display.update_plane(crtc, fb, x, y); - if (ret) - return ret; - intel_update_fbc(dev); + if (dev_priv->display.disable_fbc) + dev_priv->display.disable_fbc(dev); intel_increase_pllclock(crtc); - return 0; + return dev_priv->display.update_plane(crtc, fb, x, y); } static int @@ -2375,6 +2371,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int ret; @@ -2411,8 +2408,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, if (old_fb) intel_finish_fb(old_fb); - ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, - LEAVE_ATOMIC_MODE_SET); + ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y); if (ret) { intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); mutex_unlock(&dev->struct_mutex); @@ -2425,6 +2421,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); } + intel_update_fbc(dev); mutex_unlock(&dev->struct_mutex); if (!dev->primary->master) -- cgit v1.2.3-59-g8ed1b From 46f0f8d120c4afae53a5670bf3ac80a928340ff3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 18 Apr 2012 11:12:11 +0100 Subject: drm/i915: Don't set a MBZ bit in gen2/3 MI_FLUSH On gen2 MI_EXE_FLUSH is actually an AGP flush bit and on gen3 marked as reserved. On both it is documented as being must-be-zero. So obey the documentation, and separate the gen2 flush into its own little routine and share with gen3. This means that we can rename the existing render_ring_flush() to reflect the generation from which it first applies and remove the code for handling earlier generations from it. v2: Applies to gen3 as well v3: Make it compile and improve the commit message. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 55 +++++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 4ae651bb1c97..6f610f28b1be 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -53,9 +53,35 @@ static inline int ring_space(struct intel_ring_buffer *ring) } static int -render_ring_flush(struct intel_ring_buffer *ring, - u32 invalidate_domains, - u32 flush_domains) +gen2_render_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) +{ + u32 cmd; + int ret; + + cmd = MI_FLUSH; + if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) + cmd |= MI_NO_WRITE_FLUSH; + + if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) + cmd |= MI_READ_FLUSH; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + + return 0; +} + +static int +gen4_render_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) { struct drm_device *dev = ring->dev; u32 cmd; @@ -90,17 +116,8 @@ render_ring_flush(struct intel_ring_buffer *ring, */ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; - if ((invalidate_domains|flush_domains) & - I915_GEM_DOMAIN_RENDER) + if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) cmd &= ~MI_NO_WRITE_FLUSH; - if (INTEL_INFO(dev)->gen < 4) { - /* - * On the 965, the sampler cache always gets flushed - * and this bit is reserved. - */ - if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) - cmd |= MI_READ_FLUSH; - } if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) cmd |= MI_EXE_FLUSH; @@ -1281,14 +1298,17 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->signal_mbox[1] = GEN6_BRSYNC; } else if (IS_GEN5(dev)) { ring->add_request = pc_render_add_request; - ring->flush = render_ring_flush; + ring->flush = gen4_render_ring_flush; ring->get_seqno = pc_render_get_seqno; ring->irq_get = gen5_ring_get_irq; ring->irq_put = gen5_ring_put_irq; ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; } else { ring->add_request = i9xx_add_request; - ring->flush = render_ring_flush; + if (INTEL_INFO(dev)->gen < 4) + ring->flush = gen2_render_ring_flush; + else + ring->flush = gen4_render_ring_flush; ring->get_seqno = ring_get_seqno; ring->irq_get = i9xx_ring_get_irq; ring->irq_put = i9xx_ring_put_irq; @@ -1333,7 +1353,10 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) * gem_init ioctl returns with -ENODEV). Hence we do not need to set up * the special gen5 functions. */ ring->add_request = i9xx_add_request; - ring->flush = render_ring_flush; + if (INTEL_INFO(dev)->gen < 4) + ring->flush = gen2_render_ring_flush; + else + ring->flush = gen4_render_ring_flush; ring->get_seqno = ring_get_seqno; ring->irq_get = i9xx_ring_get_irq; ring->irq_put = i9xx_ring_put_irq; -- cgit v1.2.3-59-g8ed1b From 0f91128d88bbb8b0a8e7bb93df2c40680871d45a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Apr 2012 10:05:38 +0100 Subject: drm/i915: Wait for all pending operations to the fb before disabling the pipe During modeset we have to disable the pipe to reconfigure its timings and maybe its size. Userspace may have queued up command buffers that depend upon the pipe running in a certain configuration and so the commands may become confused across the modeset. At the moment, we use a less than satisfactory kick-scanline-waits should the GPU hang during the modeset. It should be more reliable to wait for the pending operations to complete first, even though we still have a window for userspace to submit a broken command buffer during the modeset. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 27 ++++----------------------- 1 file changed, 4 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a1a808047f12..39eb3e8bf1bc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3014,16 +3014,14 @@ static void intel_clear_scanline_wait(struct drm_device *dev) static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) { - struct drm_i915_gem_object *obj; - struct drm_i915_private *dev_priv; + struct drm_device *dev = crtc->dev; if (crtc->fb == NULL) return; - obj = to_intel_framebuffer(crtc->fb)->obj; - dev_priv = crtc->dev->dev_private; - wait_event(dev_priv->pending_flip_queue, - atomic_read(&obj->pending_flip) == 0); + mutex_lock(&dev->struct_mutex); + intel_finish_fb(crtc->fb); + mutex_unlock(&dev->struct_mutex); } static bool intel_crtc_driving_pch(struct drm_crtc *crtc) @@ -3485,23 +3483,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc) struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; struct drm_device *dev = crtc->dev; - /* Flush any pending WAITs before we disable the pipe. Note that - * we need to drop the struct_mutex in order to acquire it again - * during the lowlevel dpms routines around a couple of the - * operations. It does not look trivial nor desirable to move - * that locking higher. So instead we leave a window for the - * submission of further commands on the fb before we can actually - * disable it. This race with userspace exists anyway, and we can - * only rely on the pipe being disabled by userspace after it - * receives the hotplug notification and has flushed any pending - * batches. - */ - if (crtc->fb) { - mutex_lock(&dev->struct_mutex); - intel_finish_fb(crtc->fb); - mutex_unlock(&dev->struct_mutex); - } - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); -- cgit v1.2.3-59-g8ed1b From 06d9813157cca181e3ca0aff769767669afe8adf Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Apr 2012 15:31:24 +0100 Subject: drm/i915: Remove the pipelined parameter from get_fence() We never succeeded in getting pipelined fencing to work (unresolved spurious GPU hangs), so begin the process of dismantling and removal the broken code. Step 1 is the removal of the pipeline parameter to get_fence(). Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 3 +-- drivers/gpu/drm/i915/i915_gem.c | 7 +++---- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 2 +- 4 files changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 303cee71b490..016ebc9af802 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1257,8 +1257,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); -int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined); +int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); static inline bool diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index aa44ff240147..40e080865463 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1079,7 +1079,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (!obj->has_global_gtt_mapping) i915_gem_gtt_bind_object(obj, obj->cache_level); - ret = i915_gem_object_get_fence(obj, NULL); + ret = i915_gem_object_get_fence(obj); if (ret) goto unlock; @@ -2453,7 +2453,6 @@ i915_find_fence_reg(struct drm_device *dev, /** * i915_gem_object_get_fence - set up fencing for an object * @obj: object to map through a fence reg - * @pipelined: ring on which to queue the change, or NULL for CPU access * * When mapping objects through the GTT, userspace wants to be able to write * to them without having to worry about swizzling if the object is tiled. @@ -2466,11 +2465,11 @@ i915_find_fence_reg(struct drm_device *dev, * For an untiled surface, this removes any existing fence. */ int -i915_gem_object_get_fence(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined) +i915_gem_object_get_fence(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *pipelined; struct drm_i915_fence_reg *reg; int ret; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 1a0d54f278c4..68ec0130a626 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -534,7 +534,7 @@ pin_and_fence_object(struct drm_i915_gem_object *obj, if (has_fenced_gpu_access) { if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { - ret = i915_gem_object_get_fence(obj, ring); + ret = i915_gem_object_get_fence(obj); if (ret) goto err_unpin; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 39eb3e8bf1bc..1f844c5f0460 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2151,7 +2151,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, * framebuffer compression. For simplicity, we always install * a fence as the cost is not that onerous. */ - ret = i915_gem_object_get_fence(obj, pipelined); + ret = i915_gem_object_get_fence(obj); if (ret) goto err_unpin; -- cgit v1.2.3-59-g8ed1b From a360bb1a83279243a0945a0e646fd6c66521864e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Apr 2012 15:31:25 +0100 Subject: drm/i915: Remove fence pipelining Step 2 is then to replace the pipelined parameter with NULL and perform constant folding to remove dead code. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 155 ++++++++++------------------------------ 1 file changed, 36 insertions(+), 119 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 40e080865463..5a9d90f117d3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2166,8 +2166,7 @@ int i915_gpu_idle(struct drm_device *dev, bool do_retire) return 0; } -static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined) +static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -2185,26 +2184,12 @@ static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj, val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= I965_FENCE_REG_VALID; - if (pipelined) { - int ret = intel_ring_begin(pipelined, 6); - if (ret) - return ret; - - intel_ring_emit(pipelined, MI_NOOP); - intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); - intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8); - intel_ring_emit(pipelined, (u32)val); - intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4); - intel_ring_emit(pipelined, (u32)(val >> 32)); - intel_ring_advance(pipelined); - } else - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val); + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val); return 0; } -static int i965_write_fence_reg(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined) +static int i965_write_fence_reg(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -2220,26 +2205,12 @@ static int i965_write_fence_reg(struct drm_i915_gem_object *obj, val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= I965_FENCE_REG_VALID; - if (pipelined) { - int ret = intel_ring_begin(pipelined, 6); - if (ret) - return ret; - - intel_ring_emit(pipelined, MI_NOOP); - intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); - intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8); - intel_ring_emit(pipelined, (u32)val); - intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4); - intel_ring_emit(pipelined, (u32)(val >> 32)); - intel_ring_advance(pipelined); - } else - I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val); + I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val); return 0; } -static int i915_write_fence_reg(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined) +static int i915_write_fence_reg(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -2276,24 +2247,12 @@ static int i915_write_fence_reg(struct drm_i915_gem_object *obj, else fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; - if (pipelined) { - int ret = intel_ring_begin(pipelined, 4); - if (ret) - return ret; - - intel_ring_emit(pipelined, MI_NOOP); - intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(pipelined, fence_reg); - intel_ring_emit(pipelined, val); - intel_ring_advance(pipelined); - } else - I915_WRITE(fence_reg, val); + I915_WRITE(fence_reg, val); return 0; } -static int i830_write_fence_reg(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined) +static int i830_write_fence_reg(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -2319,18 +2278,7 @@ static int i830_write_fence_reg(struct drm_i915_gem_object *obj, val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; - if (pipelined) { - int ret = intel_ring_begin(pipelined, 4); - if (ret) - return ret; - - intel_ring_emit(pipelined, MI_NOOP); - intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4); - intel_ring_emit(pipelined, val); - intel_ring_advance(pipelined); - } else - I915_WRITE(FENCE_REG_830_0 + regnum * 4, val); + I915_WRITE(FENCE_REG_830_0 + regnum * 4, val); return 0; } @@ -2341,8 +2289,7 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno) } static int -i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined) +i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) { int ret; @@ -2357,7 +2304,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, obj->fenced_gpu_access = false; } - if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { + if (obj->last_fenced_seqno && NULL != obj->last_fenced_ring) { if (!ring_passed_seqno(obj->last_fenced_ring, obj->last_fenced_seqno)) { ret = i915_wait_request(obj->last_fenced_ring, @@ -2388,7 +2335,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj) if (obj->tiling_mode) i915_gem_release_mmap(obj); - ret = i915_gem_object_flush_fence(obj, NULL); + ret = i915_gem_object_flush_fence(obj); if (ret) return ret; @@ -2406,8 +2353,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj) } static struct drm_i915_fence_reg * -i915_find_fence_reg(struct drm_device *dev, - struct intel_ring_buffer *pipelined) +i915_find_fence_reg(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_fence_reg *reg, *first, *avail; @@ -2436,9 +2382,7 @@ i915_find_fence_reg(struct drm_device *dev, if (first == NULL) first = reg; - if (!pipelined || - !reg->obj->last_fenced_ring || - reg->obj->last_fenced_ring == pipelined) { + if (reg->obj->last_fenced_ring == NULL) { avail = reg; break; } @@ -2469,67 +2413,46 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *pipelined; struct drm_i915_fence_reg *reg; int ret; if (obj->tiling_mode == I915_TILING_NONE) return i915_gem_object_put_fence(obj); - /* XXX disable pipelining. There are bugs. Shocking. */ - pipelined = NULL; - /* Just update our place in the LRU if our fence is getting reused. */ if (obj->fence_reg != I915_FENCE_REG_NONE) { reg = &dev_priv->fence_regs[obj->fence_reg]; list_move_tail(®->lru_list, &dev_priv->mm.fence_list); if (obj->tiling_changed) { - ret = i915_gem_object_flush_fence(obj, pipelined); + ret = i915_gem_object_flush_fence(obj); if (ret) return ret; - if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) - pipelined = NULL; - - if (pipelined) { - reg->setup_seqno = - i915_gem_next_request_seqno(pipelined); - obj->last_fenced_seqno = reg->setup_seqno; - obj->last_fenced_ring = pipelined; - } - goto update; } - if (!pipelined) { - if (reg->setup_seqno) { - if (!ring_passed_seqno(obj->last_fenced_ring, - reg->setup_seqno)) { - ret = i915_wait_request(obj->last_fenced_ring, - reg->setup_seqno, - true); - if (ret) - return ret; - } - - reg->setup_seqno = 0; + if (reg->setup_seqno) { + if (!ring_passed_seqno(obj->last_fenced_ring, + reg->setup_seqno)) { + ret = i915_wait_request(obj->last_fenced_ring, + reg->setup_seqno, + true); + if (ret) + return ret; } - } else if (obj->last_fenced_ring && - obj->last_fenced_ring != pipelined) { - ret = i915_gem_object_flush_fence(obj, pipelined); - if (ret) - return ret; + + reg->setup_seqno = 0; } return 0; } - reg = i915_find_fence_reg(dev, pipelined); + reg = i915_find_fence_reg(dev); if (reg == NULL) return -EDEADLK; - ret = i915_gem_object_flush_fence(obj, pipelined); + ret = i915_gem_object_flush_fence(obj); if (ret) return ret; @@ -2541,31 +2464,25 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) if (old->tiling_mode) i915_gem_release_mmap(old); - ret = i915_gem_object_flush_fence(old, pipelined); + ret = i915_gem_object_flush_fence(old); if (ret) { drm_gem_object_unreference(&old->base); return ret; } - if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0) - pipelined = NULL; - old->fence_reg = I915_FENCE_REG_NONE; - old->last_fenced_ring = pipelined; - old->last_fenced_seqno = - pipelined ? i915_gem_next_request_seqno(pipelined) : 0; + old->last_fenced_ring = NULL; + old->last_fenced_seqno = 0; drm_gem_object_unreference(&old->base); - } else if (obj->last_fenced_seqno == 0) - pipelined = NULL; + } reg->obj = obj; list_move_tail(®->lru_list, &dev_priv->mm.fence_list); obj->fence_reg = reg - dev_priv->fence_regs; - obj->last_fenced_ring = pipelined; + obj->last_fenced_ring = NULL; - reg->setup_seqno = - pipelined ? i915_gem_next_request_seqno(pipelined) : 0; + reg->setup_seqno = 0; obj->last_fenced_seqno = reg->setup_seqno; update: @@ -2573,17 +2490,17 @@ update: switch (INTEL_INFO(dev)->gen) { case 7: case 6: - ret = sandybridge_write_fence_reg(obj, pipelined); + ret = sandybridge_write_fence_reg(obj); break; case 5: case 4: - ret = i965_write_fence_reg(obj, pipelined); + ret = i965_write_fence_reg(obj); break; case 3: - ret = i915_write_fence_reg(obj, pipelined); + ret = i915_write_fence_reg(obj); break; case 2: - ret = i830_write_fence_reg(obj, pipelined); + ret = i830_write_fence_reg(obj); break; } -- cgit v1.2.3-59-g8ed1b From 69963e7c76743bd9e8ef559f955165dd85d9ba72 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Apr 2012 15:31:26 +0100 Subject: drm/i915: Remove unused ring->setup_seqno As we now no longer track a pipelined fence change, we never use ring->setup_seqno and can kill it. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_gem.c | 17 ----------------- 2 files changed, 18 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 016ebc9af802..6a504f997251 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -146,7 +146,6 @@ struct drm_i915_master_private { struct drm_i915_fence_reg { struct list_head lru_list; struct drm_i915_gem_object *obj; - uint32_t setup_seqno; int pin_count; }; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5a9d90f117d3..3a091f55fbcc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2432,19 +2432,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) goto update; } - if (reg->setup_seqno) { - if (!ring_passed_seqno(obj->last_fenced_ring, - reg->setup_seqno)) { - ret = i915_wait_request(obj->last_fenced_ring, - reg->setup_seqno, - true); - if (ret) - return ret; - } - - reg->setup_seqno = 0; - } - return 0; } @@ -2482,9 +2469,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) obj->fence_reg = reg - dev_priv->fence_regs; obj->last_fenced_ring = NULL; - reg->setup_seqno = 0; - obj->last_fenced_seqno = reg->setup_seqno; - update: obj->tiling_changed = false; switch (INTEL_INFO(dev)->gen) { @@ -2543,7 +2527,6 @@ i915_gem_clear_fence_reg(struct drm_device *dev, list_del_init(®->lru_list); reg->obj = NULL; - reg->setup_seqno = 0; reg->pin_count = 0; } -- cgit v1.2.3-59-g8ed1b From 1c293ea3b1d1c72f1fc5f398e03232d8d302bd23 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Apr 2012 15:31:27 +0100 Subject: drm/i915: Discard the unused obj->last_fenced_ring As we now never pipeline a fence update, obj->last_fenced_ring is always the same as the obj->ring whenever obj->last_fenced_seqno is active, so remove it. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 5 ++--- drivers/gpu/drm/i915/i915_gem.c | 16 +++++----------- 2 files changed, 7 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6a504f997251..69e153956182 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -930,13 +930,12 @@ struct drm_i915_gem_object { */ uint32_t gtt_offset; - /** Breadcrumb of last rendering to the buffer. */ - uint32_t last_rendering_seqno; struct intel_ring_buffer *ring; + /** Breadcrumb of last rendering to the buffer. */ + uint32_t last_rendering_seqno; /** Breadcrumb of last fenced GPU access to the buffer. */ uint32_t last_fenced_seqno; - struct intel_ring_buffer *last_fenced_ring; /** Current tiling stride for the object, if it's tiled. */ uint32_t stride; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3a091f55fbcc..b25d22971513 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1398,7 +1398,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, if (obj->fenced_gpu_access) { obj->last_fenced_seqno = seqno; - obj->last_fenced_ring = ring; /* Bump MRU to take account of the delayed flush */ if (obj->fence_reg != I915_FENCE_REG_NONE) { @@ -1445,7 +1444,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) BUG_ON(!list_empty(&obj->gpu_write_list)); BUG_ON(!obj->active); obj->ring = NULL; - obj->last_fenced_ring = NULL; i915_gem_object_move_off_active(obj); obj->fenced_gpu_access = false; @@ -1650,7 +1648,6 @@ static void i915_gem_reset_fences(struct drm_device *dev) reg->obj->fence_reg = I915_FENCE_REG_NONE; reg->obj->fenced_gpu_access = false; reg->obj->last_fenced_seqno = 0; - reg->obj->last_fenced_ring = NULL; i915_gem_clear_fence_reg(dev, reg); } } @@ -2295,7 +2292,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) if (obj->fenced_gpu_access) { if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { - ret = i915_gem_flush_ring(obj->last_fenced_ring, + ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); if (ret) return ret; @@ -2304,10 +2301,10 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) obj->fenced_gpu_access = false; } - if (obj->last_fenced_seqno && NULL != obj->last_fenced_ring) { - if (!ring_passed_seqno(obj->last_fenced_ring, + if (obj->last_fenced_seqno) { + if (!ring_passed_seqno(obj->ring, obj->last_fenced_seqno)) { - ret = i915_wait_request(obj->last_fenced_ring, + ret = i915_wait_request(obj->ring, obj->last_fenced_seqno, true); if (ret) @@ -2315,7 +2312,6 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) } obj->last_fenced_seqno = 0; - obj->last_fenced_ring = NULL; } /* Ensure that all CPU reads are completed before installing a fence @@ -2382,7 +2378,7 @@ i915_find_fence_reg(struct drm_device *dev) if (first == NULL) first = reg; - if (reg->obj->last_fenced_ring == NULL) { + if (reg->obj->last_fenced_seqno == 0) { avail = reg; break; } @@ -2458,7 +2454,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) } old->fence_reg = I915_FENCE_REG_NONE; - old->last_fenced_ring = NULL; old->last_fenced_seqno = 0; drm_gem_object_unreference(&old->base); @@ -2467,7 +2462,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) reg->obj = obj; list_move_tail(®->lru_list, &dev_priv->mm.fence_list); obj->fence_reg = reg - dev_priv->fence_regs; - obj->last_fenced_ring = NULL; update: obj->tiling_changed = false; -- cgit v1.2.3-59-g8ed1b From 8fe301add51206ca53576471059393b925f30124 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Apr 2012 15:31:28 +0100 Subject: drm/i915: Simplify fence finding As the fences are stored in LRU order, we can simply reuse the oldest if we do not have an unused register. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b25d22971513..f7cd3461dc4f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2352,7 +2352,7 @@ static struct drm_i915_fence_reg * i915_find_fence_reg(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_fence_reg *reg, *first, *avail; + struct drm_i915_fence_reg *reg, *avail; int i; /* First try to find a free reg */ @@ -2370,24 +2370,14 @@ i915_find_fence_reg(struct drm_device *dev) return NULL; /* None available, try to steal one or wait for a user to finish */ - avail = first = NULL; list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { if (reg->pin_count) continue; - if (first == NULL) - first = reg; - - if (reg->obj->last_fenced_seqno == 0) { - avail = reg; - break; - } + return reg; } - if (avail == NULL) - avail = first; - - return avail; + return NULL; } /** -- cgit v1.2.3-59-g8ed1b From 1899184547dec95ec8b7eb00e202d9b3a3b1c87b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Apr 2012 15:31:29 +0100 Subject: drm/i915: Remove the unsightly "optimisation" from flush_fence() As i915_wait_request() will first check for an already passed seqno, doing it also in the caller is a waste of space for a cold path. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f7cd3461dc4f..bac3570d2c47 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2280,11 +2280,6 @@ static int i830_write_fence_reg(struct drm_i915_gem_object *obj) return 0; } -static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno) -{ - return i915_seqno_passed(ring->get_seqno(ring), seqno); -} - static int i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) { @@ -2302,14 +2297,11 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) } if (obj->last_fenced_seqno) { - if (!ring_passed_seqno(obj->ring, - obj->last_fenced_seqno)) { - ret = i915_wait_request(obj->ring, - obj->last_fenced_seqno, - true); - if (ret) - return ret; - } + ret = i915_wait_request(obj->ring, + obj->last_fenced_seqno, + true); + if (ret) + return ret; obj->last_fenced_seqno = 0; } -- cgit v1.2.3-59-g8ed1b From 9ce079e4812c41c5f4ee9ea116c768b8939197d6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Apr 2012 15:31:30 +0100 Subject: drm/i915: Prepare to consolidate fence writing Update the existing architecture specific fence writing routines to either update the fence to point to a tiled object or to clear them in preparation to remove the other fence writing routes. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 211 ++++++++++++++++++++-------------------- 1 file changed, 108 insertions(+), 103 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bac3570d2c47..199306dfca85 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2163,121 +2163,142 @@ int i915_gpu_idle(struct drm_device *dev, bool do_retire) return 0; } -static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj) +static void sandybridge_write_fence_reg(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - u32 size = obj->gtt_space->size; - int regnum = obj->fence_reg; uint64_t val; - val = (uint64_t)((obj->gtt_offset + size - 4096) & - 0xfffff000) << 32; - val |= obj->gtt_offset & 0xfffff000; - val |= (uint64_t)((obj->stride / 128) - 1) << - SANDYBRIDGE_FENCE_PITCH_SHIFT; + if (obj) { + u32 size = obj->gtt_space->size; - if (obj->tiling_mode == I915_TILING_Y) - val |= 1 << I965_FENCE_TILING_Y_SHIFT; - val |= I965_FENCE_REG_VALID; + val = (uint64_t)((obj->gtt_offset + size - 4096) & + 0xfffff000) << 32; + val |= obj->gtt_offset & 0xfffff000; + val |= (uint64_t)((obj->stride / 128) - 1) << + SANDYBRIDGE_FENCE_PITCH_SHIFT; - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val); + if (obj->tiling_mode == I915_TILING_Y) + val |= 1 << I965_FENCE_TILING_Y_SHIFT; + val |= I965_FENCE_REG_VALID; + } else + val = 0; - return 0; + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val); + POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8); } -static int i965_write_fence_reg(struct drm_i915_gem_object *obj) +static void i965_write_fence_reg(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - u32 size = obj->gtt_space->size; - int regnum = obj->fence_reg; uint64_t val; - val = (uint64_t)((obj->gtt_offset + size - 4096) & - 0xfffff000) << 32; - val |= obj->gtt_offset & 0xfffff000; - val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; - if (obj->tiling_mode == I915_TILING_Y) - val |= 1 << I965_FENCE_TILING_Y_SHIFT; - val |= I965_FENCE_REG_VALID; + if (obj) { + u32 size = obj->gtt_space->size; - I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val); + val = (uint64_t)((obj->gtt_offset + size - 4096) & + 0xfffff000) << 32; + val |= obj->gtt_offset & 0xfffff000; + val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; + if (obj->tiling_mode == I915_TILING_Y) + val |= 1 << I965_FENCE_TILING_Y_SHIFT; + val |= I965_FENCE_REG_VALID; + } else + val = 0; - return 0; + I915_WRITE64(FENCE_REG_965_0 + reg * 8, val); + POSTING_READ(FENCE_REG_965_0 + reg * 8); } -static int i915_write_fence_reg(struct drm_i915_gem_object *obj) +static void i915_write_fence_reg(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - u32 size = obj->gtt_space->size; - u32 fence_reg, val, pitch_val; - int tile_width; - - if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || - (size & -size) != size || - (obj->gtt_offset & (size - 1)), - "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", - obj->gtt_offset, obj->map_and_fenceable, size)) - return -EINVAL; + u32 val; - if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) - tile_width = 128; - else - tile_width = 512; - - /* Note: pitch better be a power of two tile widths */ - pitch_val = obj->stride / tile_width; - pitch_val = ffs(pitch_val) - 1; - - val = obj->gtt_offset; - if (obj->tiling_mode == I915_TILING_Y) - val |= 1 << I830_FENCE_TILING_Y_SHIFT; - val |= I915_FENCE_SIZE_BITS(size); - val |= pitch_val << I830_FENCE_PITCH_SHIFT; - val |= I830_FENCE_REG_VALID; - - fence_reg = obj->fence_reg; - if (fence_reg < 8) - fence_reg = FENCE_REG_830_0 + fence_reg * 4; - else - fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; + if (obj) { + u32 size = obj->gtt_space->size; + int pitch_val; + int tile_width; - I915_WRITE(fence_reg, val); + WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || + (size & -size) != size || + (obj->gtt_offset & (size - 1)), + "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", + obj->gtt_offset, obj->map_and_fenceable, size); - return 0; + if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) + tile_width = 128; + else + tile_width = 512; + + /* Note: pitch better be a power of two tile widths */ + pitch_val = obj->stride / tile_width; + pitch_val = ffs(pitch_val) - 1; + + val = obj->gtt_offset; + if (obj->tiling_mode == I915_TILING_Y) + val |= 1 << I830_FENCE_TILING_Y_SHIFT; + val |= I915_FENCE_SIZE_BITS(size); + val |= pitch_val << I830_FENCE_PITCH_SHIFT; + val |= I830_FENCE_REG_VALID; + } else + val = 0; + + if (reg < 8) + reg = FENCE_REG_830_0 + reg * 4; + else + reg = FENCE_REG_945_8 + (reg - 8) * 4; + + I915_WRITE(reg, val); + POSTING_READ(reg); } -static int i830_write_fence_reg(struct drm_i915_gem_object *obj) +static void i830_write_fence_reg(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - u32 size = obj->gtt_space->size; - int regnum = obj->fence_reg; uint32_t val; - uint32_t pitch_val; - - if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || - (size & -size) != size || - (obj->gtt_offset & (size - 1)), - "object 0x%08x not 512K or pot-size 0x%08x aligned\n", - obj->gtt_offset, size)) - return -EINVAL; - pitch_val = obj->stride / 128; - pitch_val = ffs(pitch_val) - 1; - - val = obj->gtt_offset; - if (obj->tiling_mode == I915_TILING_Y) - val |= 1 << I830_FENCE_TILING_Y_SHIFT; - val |= I830_FENCE_SIZE_BITS(size); - val |= pitch_val << I830_FENCE_PITCH_SHIFT; - val |= I830_FENCE_REG_VALID; + if (obj) { + u32 size = obj->gtt_space->size; + uint32_t pitch_val; + + WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || + (size & -size) != size || + (obj->gtt_offset & (size - 1)), + "object 0x%08x not 512K or pot-size 0x%08x aligned\n", + obj->gtt_offset, size); + + pitch_val = obj->stride / 128; + pitch_val = ffs(pitch_val) - 1; + + val = obj->gtt_offset; + if (obj->tiling_mode == I915_TILING_Y) + val |= 1 << I830_FENCE_TILING_Y_SHIFT; + val |= I830_FENCE_SIZE_BITS(size); + val |= pitch_val << I830_FENCE_PITCH_SHIFT; + val |= I830_FENCE_REG_VALID; + } else + val = 0; - I915_WRITE(FENCE_REG_830_0 + regnum * 4, val); + I915_WRITE(FENCE_REG_830_0 + reg * 4, val); + POSTING_READ(FENCE_REG_830_0 + reg * 4); +} - return 0; +static void i915_gem_write_fence(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) +{ + switch (INTEL_INFO(dev)->gen) { + case 7: + case 6: sandybridge_write_fence_reg(dev, reg, obj); break; + case 5: + case 4: i965_write_fence_reg(dev, reg, obj); break; + case 3: i915_write_fence_reg(dev, reg, obj); break; + case 2: i830_write_fence_reg(dev, reg, obj); break; + default: break; + } } static int @@ -2447,24 +2468,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) update: obj->tiling_changed = false; - switch (INTEL_INFO(dev)->gen) { - case 7: - case 6: - ret = sandybridge_write_fence_reg(obj); - break; - case 5: - case 4: - ret = i965_write_fence_reg(obj); - break; - case 3: - ret = i915_write_fence_reg(obj); - break; - case 2: - ret = i830_write_fence_reg(obj); - break; - } - - return ret; + i915_gem_write_fence(dev, reg - dev_priv->fence_regs, obj); + return 0; } /** -- cgit v1.2.3-59-g8ed1b From 61050808bb019ebea966b7b5bfd357aaf219fb51 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Apr 2012 15:31:31 +0100 Subject: drm/i915: Refactor put_fence() to use the common fence writing routine One clarification that we make is to the existing semantics of obj->tiling_changed to only mean that we need to update an associated fence register (including the NO_FENCE when executing an untiled but fenced GPU command). If we do not have a fence register or pending fenced GPU access for the object (after put_fence() for example), then we can clear the tiling_changed flag as any fence will necessarily be rewritten upon acquisition. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 62 +++++++++++++++++++++++++++++++++-------- 1 file changed, 51 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 199306dfca85..3601b8b6e45f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -50,10 +50,28 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_file *file); static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); +static void i915_gem_write_fence(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj); +static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, + struct drm_i915_fence_reg *fence, + bool enable); + static int i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc); static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); +static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) +{ + if (obj->tiling_mode) + i915_gem_release_mmap(obj); + + /* As we do not have an associated fence register, we will force + * a tiling change if we ever need to acquire one. + */ + obj->tiling_changed = false; + obj->fence_reg = I915_FENCE_REG_NONE; +} + /* some bookkeeping */ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, size_t size) @@ -2301,6 +2319,32 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg, } } +static inline int fence_number(struct drm_i915_private *dev_priv, + struct drm_i915_fence_reg *fence) +{ + return fence - dev_priv->fence_regs; +} + +static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, + struct drm_i915_fence_reg *fence, + bool enable) +{ + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + int reg = fence_number(dev_priv, fence); + + i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); + + if (enable) { + obj->fence_reg = reg; + fence->obj = obj; + list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); + } else { + obj->fence_reg = I915_FENCE_REG_NONE; + fence->obj = NULL; + list_del_init(&fence->lru_list); + } +} + static int i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) { @@ -2339,24 +2383,20 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) int i915_gem_object_put_fence(struct drm_i915_gem_object *obj) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; int ret; - if (obj->tiling_mode) - i915_gem_release_mmap(obj); - ret = i915_gem_object_flush_fence(obj); if (ret) return ret; - if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - - WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count); - i915_gem_clear_fence_reg(obj->base.dev, - &dev_priv->fence_regs[obj->fence_reg]); + if (obj->fence_reg == I915_FENCE_REG_NONE) + return 0; - obj->fence_reg = I915_FENCE_REG_NONE; - } + i915_gem_object_update_fence(obj, + &dev_priv->fence_regs[obj->fence_reg], + false); + i915_gem_object_fence_lost(obj); return 0; } -- cgit v1.2.3-59-g8ed1b From ada726c734e79c83f72676c02b5d68d4f9faead0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Apr 2012 15:31:32 +0100 Subject: drm/i915: Refactor fence clearing to use the common fence writing routine Now that we have a routine that is able to clear the fences as well as setup up the register for a tiled object, remove the surplus routines to clear the fences. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 62 ++++++----------------------------------- 1 file changed, 9 insertions(+), 53 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3601b8b6e45f..e09ac3a95308 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -42,8 +42,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, bool map_and_fenceable); -static void i915_gem_clear_fence_reg(struct drm_device *dev, - struct drm_i915_fence_reg *reg); static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -1655,19 +1653,18 @@ static void i915_gem_reset_fences(struct drm_device *dev) for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; - struct drm_i915_gem_object *obj = reg->obj; - if (!obj) - continue; + i915_gem_write_fence(dev, i, NULL); - if (obj->tiling_mode) - i915_gem_release_mmap(obj); + if (reg->obj) + i915_gem_object_fence_lost(reg->obj); - reg->obj->fence_reg = I915_FENCE_REG_NONE; - reg->obj->fenced_gpu_access = false; - reg->obj->last_fenced_seqno = 0; - i915_gem_clear_fence_reg(dev, reg); + reg->pin_count = 0; + reg->obj = NULL; + INIT_LIST_HEAD(®->lru_list); } + + INIT_LIST_HEAD(&dev_priv->mm.fence_list); } void i915_gem_reset(struct drm_device *dev) @@ -2512,45 +2509,6 @@ update: return 0; } -/** - * i915_gem_clear_fence_reg - clear out fence register info - * @obj: object to clear - * - * Zeroes out the fence register itself and clears out the associated - * data structures in dev_priv and obj. - */ -static void -i915_gem_clear_fence_reg(struct drm_device *dev, - struct drm_i915_fence_reg *reg) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t fence_reg = reg - dev_priv->fence_regs; - - switch (INTEL_INFO(dev)->gen) { - case 7: - case 6: - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0); - break; - case 5: - case 4: - I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0); - break; - case 3: - if (fence_reg >= 8) - fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; - else - case 2: - fence_reg = FENCE_REG_830_0 + fence_reg * 4; - - I915_WRITE(fence_reg, 0); - break; - } - - list_del_init(®->lru_list); - reg->obj = NULL; - reg->pin_count = 0; -} - /** * Finds free space in the GTT aperture and binds the object there. */ @@ -3788,9 +3746,7 @@ i915_gem_load(struct drm_device *dev) dev_priv->num_fence_regs = 8; /* Initialize fence registers to zero */ - for (i = 0; i < dev_priv->num_fence_regs; i++) { - i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]); - } + i915_gem_reset_fences(dev); i915_gem_detect_bit_6_swizzle(dev); init_waitqueue_head(&dev_priv->pending_flip_queue); -- cgit v1.2.3-59-g8ed1b From 14415745b2518fc7309616a33ce8e656e79a4e05 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Apr 2012 15:31:33 +0100 Subject: drm/i915: Refactor get_fence() to use the common fence writing routine We can also take advantage of the new 'no retire' mode for seqno waiting to avoid having to take a reference on the old fence object whilst flushing an existing fence. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 70 ++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 43 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e09ac3a95308..7bc4a40132ad 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2361,7 +2361,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) if (obj->last_fenced_seqno) { ret = i915_wait_request(obj->ring, obj->last_fenced_seqno, - true); + false); if (ret) return ret; @@ -2449,63 +2449,47 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + bool enable = obj->tiling_mode != I915_TILING_NONE; struct drm_i915_fence_reg *reg; int ret; - if (obj->tiling_mode == I915_TILING_NONE) - return i915_gem_object_put_fence(obj); + /* Have we updated the tiling parameters upon the object and so + * will need to serialise the write to the associated fence register? + */ + if (obj->tiling_changed) { + ret = i915_gem_object_flush_fence(obj); + if (ret) + return ret; + } /* Just update our place in the LRU if our fence is getting reused. */ if (obj->fence_reg != I915_FENCE_REG_NONE) { reg = &dev_priv->fence_regs[obj->fence_reg]; - list_move_tail(®->lru_list, &dev_priv->mm.fence_list); + if (!obj->tiling_changed) { + list_move_tail(®->lru_list, + &dev_priv->mm.fence_list); + return 0; + } + } else if (enable) { + reg = i915_find_fence_reg(dev); + if (reg == NULL) + return -EDEADLK; + + if (reg->obj) { + struct drm_i915_gem_object *old = reg->obj; - if (obj->tiling_changed) { - ret = i915_gem_object_flush_fence(obj); + ret = i915_gem_object_flush_fence(old); if (ret) return ret; - goto update; + i915_gem_object_fence_lost(old); } - + } else return 0; - } - - reg = i915_find_fence_reg(dev); - if (reg == NULL) - return -EDEADLK; - - ret = i915_gem_object_flush_fence(obj); - if (ret) - return ret; - - if (reg->obj) { - struct drm_i915_gem_object *old = reg->obj; - drm_gem_object_reference(&old->base); - - if (old->tiling_mode) - i915_gem_release_mmap(old); - - ret = i915_gem_object_flush_fence(old); - if (ret) { - drm_gem_object_unreference(&old->base); - return ret; - } - - old->fence_reg = I915_FENCE_REG_NONE; - old->last_fenced_seqno = 0; - - drm_gem_object_unreference(&old->base); - } - - reg->obj = obj; - list_move_tail(®->lru_list, &dev_priv->mm.fence_list); - obj->fence_reg = reg - dev_priv->fence_regs; - -update: + i915_gem_object_update_fence(obj, reg, enable); obj->tiling_changed = false; - i915_gem_write_fence(dev, reg - dev_priv->fence_regs, obj); + return 0; } -- cgit v1.2.3-59-g8ed1b From 85208be0154e73c8c902eb5bfb625f9188c87901 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Mon, 16 Apr 2012 22:20:34 -0300 Subject: drm/i915: move fbc-related functionality into intel_pm module This commit moves Frame Buffer Compression-related operations and support functions into the new intel_pm module. Signed-off-by: Eugeni Dodonov Acked-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/intel_display.c | 484 -------------------------------- drivers/gpu/drm/i915/intel_drv.h | 15 + drivers/gpu/drm/i915/intel_pm.c | 521 +++++++++++++++++++++++++++++++++++ 4 files changed, 537 insertions(+), 484 deletions(-) create mode 100644 drivers/gpu/drm/i915/intel_pm.c (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index f8013302581f..b65c06f1a021 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -23,6 +23,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ intel_sdvo.o \ intel_modes.o \ intel_panel.o \ + intel_pm.o \ intel_i2c.o \ intel_fb.o \ intel_tv.o \ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1f844c5f0460..6768d7551769 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1627,490 +1627,6 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, disable_pch_hdmi(dev_priv, pipe, HDMID); } -static void i8xx_disable_fbc(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 fbc_ctl; - - /* Disable compression */ - fbc_ctl = I915_READ(FBC_CONTROL); - if ((fbc_ctl & FBC_CTL_EN) == 0) - return; - - fbc_ctl &= ~FBC_CTL_EN; - I915_WRITE(FBC_CONTROL, fbc_ctl); - - /* Wait for compressing bit to clear */ - if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { - DRM_DEBUG_KMS("FBC idle timed out\n"); - return; - } - - DRM_DEBUG_KMS("disabled FBC\n"); -} - -static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_framebuffer *fb = crtc->fb; - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int cfb_pitch; - int plane, i; - u32 fbc_ctl, fbc_ctl2; - - cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; - if (fb->pitches[0] < cfb_pitch) - cfb_pitch = fb->pitches[0]; - - /* FBC_CTL wants 64B units */ - cfb_pitch = (cfb_pitch / 64) - 1; - plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; - - /* Clear old tags */ - for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) - I915_WRITE(FBC_TAG + (i * 4), 0); - - /* Set it up... */ - fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; - fbc_ctl2 |= plane; - I915_WRITE(FBC_CONTROL2, fbc_ctl2); - I915_WRITE(FBC_FENCE_OFF, crtc->y); - - /* enable it... */ - fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; - if (IS_I945GM(dev)) - fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ - fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; - fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; - fbc_ctl |= obj->fence_reg; - I915_WRITE(FBC_CONTROL, fbc_ctl); - - DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ", - cfb_pitch, crtc->y, intel_crtc->plane); -} - -static bool i8xx_fbc_enabled(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - return I915_READ(FBC_CONTROL) & FBC_CTL_EN; -} - -static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_framebuffer *fb = crtc->fb; - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; - unsigned long stall_watermark = 200; - u32 dpfc_ctl; - - dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; - dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; - I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); - - I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | - (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | - (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); - I915_WRITE(DPFC_FENCE_YOFF, crtc->y); - - /* enable it... */ - I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); - - DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); -} - -static void g4x_disable_fbc(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 dpfc_ctl; - - /* Disable compression */ - dpfc_ctl = I915_READ(DPFC_CONTROL); - if (dpfc_ctl & DPFC_CTL_EN) { - dpfc_ctl &= ~DPFC_CTL_EN; - I915_WRITE(DPFC_CONTROL, dpfc_ctl); - - DRM_DEBUG_KMS("disabled FBC\n"); - } -} - -static bool g4x_fbc_enabled(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; -} - -static void sandybridge_blit_fbc_update(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 blt_ecoskpd; - - /* Make sure blitter notifies FBC of writes */ - gen6_gt_force_wake_get(dev_priv); - blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); - blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << - GEN6_BLITTER_LOCK_SHIFT; - I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); - blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; - I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); - blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << - GEN6_BLITTER_LOCK_SHIFT); - I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); - POSTING_READ(GEN6_BLITTER_ECOSKPD); - gen6_gt_force_wake_put(dev_priv); -} - -static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_framebuffer *fb = crtc->fb; - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; - unsigned long stall_watermark = 200; - u32 dpfc_ctl; - - dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); - dpfc_ctl &= DPFC_RESERVED; - dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); - /* Set persistent mode for front-buffer rendering, ala X. */ - dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; - dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg); - I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); - - I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | - (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | - (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); - I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); - I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); - /* enable it... */ - I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); - - if (IS_GEN6(dev)) { - I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | obj->fence_reg); - I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); - sandybridge_blit_fbc_update(dev); - } - - DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); -} - -static void ironlake_disable_fbc(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 dpfc_ctl; - - /* Disable compression */ - dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); - if (dpfc_ctl & DPFC_CTL_EN) { - dpfc_ctl &= ~DPFC_CTL_EN; - I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); - - DRM_DEBUG_KMS("disabled FBC\n"); - } -} - -static bool ironlake_fbc_enabled(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; -} - -bool intel_fbc_enabled(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!dev_priv->display.fbc_enabled) - return false; - - return dev_priv->display.fbc_enabled(dev); -} - -static void intel_fbc_work_fn(struct work_struct *__work) -{ - struct intel_fbc_work *work = - container_of(to_delayed_work(__work), - struct intel_fbc_work, work); - struct drm_device *dev = work->crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - mutex_lock(&dev->struct_mutex); - if (work == dev_priv->fbc_work) { - /* Double check that we haven't switched fb without cancelling - * the prior work. - */ - if (work->crtc->fb == work->fb) { - dev_priv->display.enable_fbc(work->crtc, - work->interval); - - dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane; - dev_priv->cfb_fb = work->crtc->fb->base.id; - dev_priv->cfb_y = work->crtc->y; - } - - dev_priv->fbc_work = NULL; - } - mutex_unlock(&dev->struct_mutex); - - kfree(work); -} - -static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) -{ - if (dev_priv->fbc_work == NULL) - return; - - DRM_DEBUG_KMS("cancelling pending FBC enable\n"); - - /* Synchronisation is provided by struct_mutex and checking of - * dev_priv->fbc_work, so we can perform the cancellation - * entirely asynchronously. - */ - if (cancel_delayed_work(&dev_priv->fbc_work->work)) - /* tasklet was killed before being run, clean up */ - kfree(dev_priv->fbc_work); - - /* Mark the work as no longer wanted so that if it does - * wake-up (because the work was already running and waiting - * for our mutex), it will discover that is no longer - * necessary to run. - */ - dev_priv->fbc_work = NULL; -} - -static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) -{ - struct intel_fbc_work *work; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!dev_priv->display.enable_fbc) - return; - - intel_cancel_fbc_work(dev_priv); - - work = kzalloc(sizeof *work, GFP_KERNEL); - if (work == NULL) { - dev_priv->display.enable_fbc(crtc, interval); - return; - } - - work->crtc = crtc; - work->fb = crtc->fb; - work->interval = interval; - INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); - - dev_priv->fbc_work = work; - - DRM_DEBUG_KMS("scheduling delayed FBC enable\n"); - - /* Delay the actual enabling to let pageflipping cease and the - * display to settle before starting the compression. Note that - * this delay also serves a second purpose: it allows for a - * vblank to pass after disabling the FBC before we attempt - * to modify the control registers. - * - * A more complicated solution would involve tracking vblanks - * following the termination of the page-flipping sequence - * and indeed performing the enable as a co-routine and not - * waiting synchronously upon the vblank. - */ - schedule_delayed_work(&work->work, msecs_to_jiffies(50)); -} - -void intel_disable_fbc(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - intel_cancel_fbc_work(dev_priv); - - if (!dev_priv->display.disable_fbc) - return; - - dev_priv->display.disable_fbc(dev); - dev_priv->cfb_plane = -1; -} - -/** - * intel_update_fbc - enable/disable FBC as needed - * @dev: the drm_device - * - * Set up the framebuffer compression hardware at mode set time. We - * enable it if possible: - * - plane A only (on pre-965) - * - no pixel mulitply/line duplication - * - no alpha buffer discard - * - no dual wide - * - framebuffer <= 2048 in width, 1536 in height - * - * We can't assume that any compression will take place (worst case), - * so the compressed buffer has to be the same size as the uncompressed - * one. It also must reside (along with the line length buffer) in - * stolen memory. - * - * We need to enable/disable FBC on a global basis. - */ -static void intel_update_fbc(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = NULL, *tmp_crtc; - struct intel_crtc *intel_crtc; - struct drm_framebuffer *fb; - struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj; - int enable_fbc; - - DRM_DEBUG_KMS("\n"); - - if (!i915_powersave) - return; - - if (!I915_HAS_FBC(dev)) - return; - - /* - * If FBC is already on, we just have to verify that we can - * keep it that way... - * Need to disable if: - * - more than one pipe is active - * - changing FBC params (stride, fence, mode) - * - new fb is too large to fit in compressed buffer - * - going to an unsupported config (interlace, pixel multiply, etc.) - */ - list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { - if (tmp_crtc->enabled && tmp_crtc->fb) { - if (crtc) { - DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; - goto out_disable; - } - crtc = tmp_crtc; - } - } - - if (!crtc || crtc->fb == NULL) { - DRM_DEBUG_KMS("no output, disabling\n"); - dev_priv->no_fbc_reason = FBC_NO_OUTPUT; - goto out_disable; - } - - intel_crtc = to_intel_crtc(crtc); - fb = crtc->fb; - intel_fb = to_intel_framebuffer(fb); - obj = intel_fb->obj; - - enable_fbc = i915_enable_fbc; - if (enable_fbc < 0) { - DRM_DEBUG_KMS("fbc set to per-chip default\n"); - enable_fbc = 1; - if (INTEL_INFO(dev)->gen <= 6) - enable_fbc = 0; - } - if (!enable_fbc) { - DRM_DEBUG_KMS("fbc disabled per module param\n"); - dev_priv->no_fbc_reason = FBC_MODULE_PARAM; - goto out_disable; - } - if (intel_fb->obj->base.size > dev_priv->cfb_size) { - DRM_DEBUG_KMS("framebuffer too large, disabling " - "compression\n"); - dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; - goto out_disable; - } - if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || - (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { - DRM_DEBUG_KMS("mode incompatible with compression, " - "disabling\n"); - dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; - goto out_disable; - } - if ((crtc->mode.hdisplay > 2048) || - (crtc->mode.vdisplay > 1536)) { - DRM_DEBUG_KMS("mode too large for compression, disabling\n"); - dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; - goto out_disable; - } - if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { - DRM_DEBUG_KMS("plane not 0, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_BAD_PLANE; - goto out_disable; - } - - /* The use of a CPU fence is mandatory in order to detect writes - * by the CPU to the scanout and trigger updates to the FBC. - */ - if (obj->tiling_mode != I915_TILING_X || - obj->fence_reg == I915_FENCE_REG_NONE) { - DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_NOT_TILED; - goto out_disable; - } - - /* If the kernel debugger is active, always disable compression */ - if (in_dbg_master()) - goto out_disable; - - /* If the scanout has not changed, don't modify the FBC settings. - * Note that we make the fundamental assumption that the fb->obj - * cannot be unpinned (and have its GTT offset and fence revoked) - * without first being decoupled from the scanout and FBC disabled. - */ - if (dev_priv->cfb_plane == intel_crtc->plane && - dev_priv->cfb_fb == fb->base.id && - dev_priv->cfb_y == crtc->y) - return; - - if (intel_fbc_enabled(dev)) { - /* We update FBC along two paths, after changing fb/crtc - * configuration (modeswitching) and after page-flipping - * finishes. For the latter, we know that not only did - * we disable the FBC at the start of the page-flip - * sequence, but also more than one vblank has passed. - * - * For the former case of modeswitching, it is possible - * to switch between two FBC valid configurations - * instantaneously so we do need to disable the FBC - * before we can modify its control registers. We also - * have to wait for the next vblank for that to take - * effect. However, since we delay enabling FBC we can - * assume that a vblank has passed since disabling and - * that we can safely alter the registers in the deferred - * callback. - * - * In the scenario that we go from a valid to invalid - * and then back to valid FBC configuration we have - * no strict enforcement that a vblank occurred since - * disabling the FBC. However, along all current pipe - * disabling paths we do need to wait for a vblank at - * some point. And we wait before enabling FBC anyway. - */ - DRM_DEBUG_KMS("disabling active FBC for update\n"); - intel_disable_fbc(dev); - } - - intel_enable_fbc(crtc, 500); - return; - -out_disable: - /* Multiple disables should be harmless */ - if (intel_fbc_enabled(dev)) { - DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); - intel_disable_fbc(dev); - } -} - int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8748e5e500fc..def112ee1a35 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -434,4 +434,19 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg); +/* Power-related functions, located in intel_pm.c */ +/* FBC */ +extern void i8xx_disable_fbc(struct drm_device *dev); +extern void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval); +extern bool i8xx_fbc_enabled(struct drm_device *dev); +extern void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval); +extern void g4x_disable_fbc(struct drm_device *dev); +extern bool g4x_fbc_enabled(struct drm_device *dev); +extern void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval); +extern void ironlake_disable_fbc(struct drm_device *dev); +extern bool ironlake_fbc_enabled(struct drm_device *dev); +extern bool intel_fbc_enabled(struct drm_device *dev); +extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); +extern void intel_update_fbc(struct drm_device *dev); + #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c new file mode 100644 index 000000000000..7fbd305d3f8d --- /dev/null +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -0,0 +1,521 @@ +/* + * Copyright © 2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eugeni Dodonov + * + */ + +#include "i915_drv.h" +#include "intel_drv.h" + +/* FBC, or Frame Buffer Compression, is a technique employed to compress the framebuffer contents in-memory, aiming at reducing the required bandwidth during in-memory transfers and, therefore, reduce the power packet. + * + * The benefits of FBC are mostly visible with solid backgrounds and variation-less patterns. + * + * FBC-related functionality can be enabled by the means of the i915.i915_enable_fbc parameter + */ + +void i8xx_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 fbc_ctl; + + /* Disable compression */ + fbc_ctl = I915_READ(FBC_CONTROL); + if ((fbc_ctl & FBC_CTL_EN) == 0) + return; + + fbc_ctl &= ~FBC_CTL_EN; + I915_WRITE(FBC_CONTROL, fbc_ctl); + + /* Wait for compressing bit to clear */ + if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { + DRM_DEBUG_KMS("FBC idle timed out\n"); + return; + } + + DRM_DEBUG_KMS("disabled FBC\n"); +} + +void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int cfb_pitch; + int plane, i; + u32 fbc_ctl, fbc_ctl2; + + cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; + if (fb->pitches[0] < cfb_pitch) + cfb_pitch = fb->pitches[0]; + + /* FBC_CTL wants 64B units */ + cfb_pitch = (cfb_pitch / 64) - 1; + plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; + + /* Clear old tags */ + for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) + I915_WRITE(FBC_TAG + (i * 4), 0); + + /* Set it up... */ + fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; + fbc_ctl2 |= plane; + I915_WRITE(FBC_CONTROL2, fbc_ctl2); + I915_WRITE(FBC_FENCE_OFF, crtc->y); + + /* enable it... */ + fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; + if (IS_I945GM(dev)) + fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ + fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; + fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; + fbc_ctl |= obj->fence_reg; + I915_WRITE(FBC_CONTROL, fbc_ctl); + + DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ", + cfb_pitch, crtc->y, intel_crtc->plane); +} + +bool i8xx_fbc_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(FBC_CONTROL) & FBC_CTL_EN; +} + +void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; + unsigned long stall_watermark = 200; + u32 dpfc_ctl; + + dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; + dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; + I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); + + I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | + (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | + (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); + I915_WRITE(DPFC_FENCE_YOFF, crtc->y); + + /* enable it... */ + I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); + + DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); +} + +void g4x_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = I915_READ(DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + I915_WRITE(DPFC_CONTROL, dpfc_ctl); + + DRM_DEBUG_KMS("disabled FBC\n"); + } +} + +bool g4x_fbc_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; +} + +static void sandybridge_blit_fbc_update(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 blt_ecoskpd; + + /* Make sure blitter notifies FBC of writes */ + gen6_gt_force_wake_get(dev_priv); + blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); + blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << + GEN6_BLITTER_LOCK_SHIFT; + I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); + blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; + I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); + blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << + GEN6_BLITTER_LOCK_SHIFT); + I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); + POSTING_READ(GEN6_BLITTER_ECOSKPD); + gen6_gt_force_wake_put(dev_priv); +} + +void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; + unsigned long stall_watermark = 200; + u32 dpfc_ctl; + + dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); + dpfc_ctl &= DPFC_RESERVED; + dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); + /* Set persistent mode for front-buffer rendering, ala X. */ + dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; + dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg); + I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); + + I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | + (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | + (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); + I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); + I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); + /* enable it... */ + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + + if (IS_GEN6(dev)) { + I915_WRITE(SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | obj->fence_reg); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); + sandybridge_blit_fbc_update(dev); + } + + DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); +} + +void ironlake_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); + + DRM_DEBUG_KMS("disabled FBC\n"); + } +} + +bool ironlake_fbc_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; +} + +bool intel_fbc_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!dev_priv->display.fbc_enabled) + return false; + + return dev_priv->display.fbc_enabled(dev); +} + +static void intel_fbc_work_fn(struct work_struct *__work) +{ + struct intel_fbc_work *work = + container_of(to_delayed_work(__work), + struct intel_fbc_work, work); + struct drm_device *dev = work->crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + mutex_lock(&dev->struct_mutex); + if (work == dev_priv->fbc_work) { + /* Double check that we haven't switched fb without cancelling + * the prior work. + */ + if (work->crtc->fb == work->fb) { + dev_priv->display.enable_fbc(work->crtc, + work->interval); + + dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane; + dev_priv->cfb_fb = work->crtc->fb->base.id; + dev_priv->cfb_y = work->crtc->y; + } + + dev_priv->fbc_work = NULL; + } + mutex_unlock(&dev->struct_mutex); + + kfree(work); +} + +static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) +{ + if (dev_priv->fbc_work == NULL) + return; + + DRM_DEBUG_KMS("cancelling pending FBC enable\n"); + + /* Synchronisation is provided by struct_mutex and checking of + * dev_priv->fbc_work, so we can perform the cancellation + * entirely asynchronously. + */ + if (cancel_delayed_work(&dev_priv->fbc_work->work)) + /* tasklet was killed before being run, clean up */ + kfree(dev_priv->fbc_work); + + /* Mark the work as no longer wanted so that if it does + * wake-up (because the work was already running and waiting + * for our mutex), it will discover that is no longer + * necessary to run. + */ + dev_priv->fbc_work = NULL; +} + +void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct intel_fbc_work *work; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!dev_priv->display.enable_fbc) + return; + + intel_cancel_fbc_work(dev_priv); + + work = kzalloc(sizeof *work, GFP_KERNEL); + if (work == NULL) { + dev_priv->display.enable_fbc(crtc, interval); + return; + } + + work->crtc = crtc; + work->fb = crtc->fb; + work->interval = interval; + INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); + + dev_priv->fbc_work = work; + + DRM_DEBUG_KMS("scheduling delayed FBC enable\n"); + + /* Delay the actual enabling to let pageflipping cease and the + * display to settle before starting the compression. Note that + * this delay also serves a second purpose: it allows for a + * vblank to pass after disabling the FBC before we attempt + * to modify the control registers. + * + * A more complicated solution would involve tracking vblanks + * following the termination of the page-flipping sequence + * and indeed performing the enable as a co-routine and not + * waiting synchronously upon the vblank. + */ + schedule_delayed_work(&work->work, msecs_to_jiffies(50)); +} + +void intel_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + intel_cancel_fbc_work(dev_priv); + + if (!dev_priv->display.disable_fbc) + return; + + dev_priv->display.disable_fbc(dev); + dev_priv->cfb_plane = -1; +} + +/** + * intel_update_fbc - enable/disable FBC as needed + * @dev: the drm_device + * + * Set up the framebuffer compression hardware at mode set time. We + * enable it if possible: + * - plane A only (on pre-965) + * - no pixel mulitply/line duplication + * - no alpha buffer discard + * - no dual wide + * - framebuffer <= 2048 in width, 1536 in height + * + * We can't assume that any compression will take place (worst case), + * so the compressed buffer has to be the same size as the uncompressed + * one. It also must reside (along with the line length buffer) in + * stolen memory. + * + * We need to enable/disable FBC on a global basis. + */ +void intel_update_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = NULL, *tmp_crtc; + struct intel_crtc *intel_crtc; + struct drm_framebuffer *fb; + struct intel_framebuffer *intel_fb; + struct drm_i915_gem_object *obj; + int enable_fbc; + + DRM_DEBUG_KMS("\n"); + + if (!i915_powersave) + return; + + if (!I915_HAS_FBC(dev)) + return; + + /* + * If FBC is already on, we just have to verify that we can + * keep it that way... + * Need to disable if: + * - more than one pipe is active + * - changing FBC params (stride, fence, mode) + * - new fb is too large to fit in compressed buffer + * - going to an unsupported config (interlace, pixel multiply, etc.) + */ + list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { + if (tmp_crtc->enabled && tmp_crtc->fb) { + if (crtc) { + DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); + dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; + goto out_disable; + } + crtc = tmp_crtc; + } + } + + if (!crtc || crtc->fb == NULL) { + DRM_DEBUG_KMS("no output, disabling\n"); + dev_priv->no_fbc_reason = FBC_NO_OUTPUT; + goto out_disable; + } + + intel_crtc = to_intel_crtc(crtc); + fb = crtc->fb; + intel_fb = to_intel_framebuffer(fb); + obj = intel_fb->obj; + + enable_fbc = i915_enable_fbc; + if (enable_fbc < 0) { + DRM_DEBUG_KMS("fbc set to per-chip default\n"); + enable_fbc = 1; + if (INTEL_INFO(dev)->gen <= 6) + enable_fbc = 0; + } + if (!enable_fbc) { + DRM_DEBUG_KMS("fbc disabled per module param\n"); + dev_priv->no_fbc_reason = FBC_MODULE_PARAM; + goto out_disable; + } + if (intel_fb->obj->base.size > dev_priv->cfb_size) { + DRM_DEBUG_KMS("framebuffer too large, disabling " + "compression\n"); + dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; + goto out_disable; + } + if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || + (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { + DRM_DEBUG_KMS("mode incompatible with compression, " + "disabling\n"); + dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; + goto out_disable; + } + if ((crtc->mode.hdisplay > 2048) || + (crtc->mode.vdisplay > 1536)) { + DRM_DEBUG_KMS("mode too large for compression, disabling\n"); + dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; + goto out_disable; + } + if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { + DRM_DEBUG_KMS("plane not 0, disabling compression\n"); + dev_priv->no_fbc_reason = FBC_BAD_PLANE; + goto out_disable; + } + + /* The use of a CPU fence is mandatory in order to detect writes + * by the CPU to the scanout and trigger updates to the FBC. + */ + if (obj->tiling_mode != I915_TILING_X || + obj->fence_reg == I915_FENCE_REG_NONE) { + DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); + dev_priv->no_fbc_reason = FBC_NOT_TILED; + goto out_disable; + } + + /* If the kernel debugger is active, always disable compression */ + if (in_dbg_master()) + goto out_disable; + + /* If the scanout has not changed, don't modify the FBC settings. + * Note that we make the fundamental assumption that the fb->obj + * cannot be unpinned (and have its GTT offset and fence revoked) + * without first being decoupled from the scanout and FBC disabled. + */ + if (dev_priv->cfb_plane == intel_crtc->plane && + dev_priv->cfb_fb == fb->base.id && + dev_priv->cfb_y == crtc->y) + return; + + if (intel_fbc_enabled(dev)) { + /* We update FBC along two paths, after changing fb/crtc + * configuration (modeswitching) and after page-flipping + * finishes. For the latter, we know that not only did + * we disable the FBC at the start of the page-flip + * sequence, but also more than one vblank has passed. + * + * For the former case of modeswitching, it is possible + * to switch between two FBC valid configurations + * instantaneously so we do need to disable the FBC + * before we can modify its control registers. We also + * have to wait for the next vblank for that to take + * effect. However, since we delay enabling FBC we can + * assume that a vblank has passed since disabling and + * that we can safely alter the registers in the deferred + * callback. + * + * In the scenario that we go from a valid to invalid + * and then back to valid FBC configuration we have + * no strict enforcement that a vblank occurred since + * disabling the FBC. However, along all current pipe + * disabling paths we do need to wait for a vblank at + * some point. And we wait before enabling FBC anyway. + */ + DRM_DEBUG_KMS("disabling active FBC for update\n"); + intel_disable_fbc(dev); + } + + intel_enable_fbc(crtc, 500); + return; + +out_disable: + /* Multiple disables should be harmless */ + if (intel_fbc_enabled(dev)) { + DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); + intel_disable_fbc(dev); + } +} + -- cgit v1.2.3-59-g8ed1b From b445e3b013adfcb05322ebde7fb16488f0644579 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Mon, 16 Apr 2012 22:20:35 -0300 Subject: drm/i915: move watermarks settings into intel_pm module Move watermarks and helper functions (such as cxsr and fifo buffers) into intel_pm module. Signed-off-by: Eugeni Dodonov Acked-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 1476 ---------------------------------- drivers/gpu/drm/i915/intel_drv.h | 40 + drivers/gpu/drm/i915/intel_pm.c | 1456 +++++++++++++++++++++++++++++++++ 3 files changed, 1496 insertions(+), 1476 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 6768d7551769..03c015c3adb3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3187,1482 +3187,6 @@ ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); } - -struct intel_watermark_params { - unsigned long fifo_size; - unsigned long max_wm; - unsigned long default_wm; - unsigned long guard_size; - unsigned long cacheline_size; -}; - -/* Pineview has different values for various configs */ -static const struct intel_watermark_params pineview_display_wm = { - PINEVIEW_DISPLAY_FIFO, - PINEVIEW_MAX_WM, - PINEVIEW_DFT_WM, - PINEVIEW_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params pineview_display_hplloff_wm = { - PINEVIEW_DISPLAY_FIFO, - PINEVIEW_MAX_WM, - PINEVIEW_DFT_HPLLOFF_WM, - PINEVIEW_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params pineview_cursor_wm = { - PINEVIEW_CURSOR_FIFO, - PINEVIEW_CURSOR_MAX_WM, - PINEVIEW_CURSOR_DFT_WM, - PINEVIEW_CURSOR_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE, -}; -static const struct intel_watermark_params pineview_cursor_hplloff_wm = { - PINEVIEW_CURSOR_FIFO, - PINEVIEW_CURSOR_MAX_WM, - PINEVIEW_CURSOR_DFT_WM, - PINEVIEW_CURSOR_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params g4x_wm_info = { - G4X_FIFO_SIZE, - G4X_MAX_WM, - G4X_MAX_WM, - 2, - G4X_FIFO_LINE_SIZE, -}; -static const struct intel_watermark_params g4x_cursor_wm_info = { - I965_CURSOR_FIFO, - I965_CURSOR_MAX_WM, - I965_CURSOR_DFT_WM, - 2, - G4X_FIFO_LINE_SIZE, -}; -static const struct intel_watermark_params valleyview_wm_info = { - VALLEYVIEW_FIFO_SIZE, - VALLEYVIEW_MAX_WM, - VALLEYVIEW_MAX_WM, - 2, - G4X_FIFO_LINE_SIZE, -}; -static const struct intel_watermark_params valleyview_cursor_wm_info = { - I965_CURSOR_FIFO, - VALLEYVIEW_CURSOR_MAX_WM, - I965_CURSOR_DFT_WM, - 2, - G4X_FIFO_LINE_SIZE, -}; -static const struct intel_watermark_params i965_cursor_wm_info = { - I965_CURSOR_FIFO, - I965_CURSOR_MAX_WM, - I965_CURSOR_DFT_WM, - 2, - I915_FIFO_LINE_SIZE, -}; -static const struct intel_watermark_params i945_wm_info = { - I945_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I915_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params i915_wm_info = { - I915_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I915_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params i855_wm_info = { - I855GM_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I830_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params i830_wm_info = { - I830_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I830_FIFO_LINE_SIZE -}; - -static const struct intel_watermark_params ironlake_display_wm_info = { - ILK_DISPLAY_FIFO, - ILK_DISPLAY_MAXWM, - ILK_DISPLAY_DFTWM, - 2, - ILK_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params ironlake_cursor_wm_info = { - ILK_CURSOR_FIFO, - ILK_CURSOR_MAXWM, - ILK_CURSOR_DFTWM, - 2, - ILK_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params ironlake_display_srwm_info = { - ILK_DISPLAY_SR_FIFO, - ILK_DISPLAY_MAX_SRWM, - ILK_DISPLAY_DFT_SRWM, - 2, - ILK_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params ironlake_cursor_srwm_info = { - ILK_CURSOR_SR_FIFO, - ILK_CURSOR_MAX_SRWM, - ILK_CURSOR_DFT_SRWM, - 2, - ILK_FIFO_LINE_SIZE -}; - -static const struct intel_watermark_params sandybridge_display_wm_info = { - SNB_DISPLAY_FIFO, - SNB_DISPLAY_MAXWM, - SNB_DISPLAY_DFTWM, - 2, - SNB_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params sandybridge_cursor_wm_info = { - SNB_CURSOR_FIFO, - SNB_CURSOR_MAXWM, - SNB_CURSOR_DFTWM, - 2, - SNB_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params sandybridge_display_srwm_info = { - SNB_DISPLAY_SR_FIFO, - SNB_DISPLAY_MAX_SRWM, - SNB_DISPLAY_DFT_SRWM, - 2, - SNB_FIFO_LINE_SIZE -}; -static const struct intel_watermark_params sandybridge_cursor_srwm_info = { - SNB_CURSOR_SR_FIFO, - SNB_CURSOR_MAX_SRWM, - SNB_CURSOR_DFT_SRWM, - 2, - SNB_FIFO_LINE_SIZE -}; - - -/** - * intel_calculate_wm - calculate watermark level - * @clock_in_khz: pixel clock - * @wm: chip FIFO params - * @pixel_size: display pixel size - * @latency_ns: memory latency for the platform - * - * Calculate the watermark level (the level at which the display plane will - * start fetching from memory again). Each chip has a different display - * FIFO size and allocation, so the caller needs to figure that out and pass - * in the correct intel_watermark_params structure. - * - * As the pixel clock runs, the FIFO will be drained at a rate that depends - * on the pixel size. When it reaches the watermark level, it'll start - * fetching FIFO line sized based chunks from memory until the FIFO fills - * past the watermark point. If the FIFO drains completely, a FIFO underrun - * will occur, and a display engine hang could result. - */ -static unsigned long intel_calculate_wm(unsigned long clock_in_khz, - const struct intel_watermark_params *wm, - int fifo_size, - int pixel_size, - unsigned long latency_ns) -{ - long entries_required, wm_size; - - /* - * Note: we need to make sure we don't overflow for various clock & - * latency values. - * clocks go from a few thousand to several hundred thousand. - * latency is usually a few thousand - */ - entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / - 1000; - entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); - - DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); - - wm_size = fifo_size - (entries_required + wm->guard_size); - - DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); - - /* Don't promote wm_size to unsigned... */ - if (wm_size > (long)wm->max_wm) - wm_size = wm->max_wm; - if (wm_size <= 0) - wm_size = wm->default_wm; - return wm_size; -} - -struct cxsr_latency { - int is_desktop; - int is_ddr3; - unsigned long fsb_freq; - unsigned long mem_freq; - unsigned long display_sr; - unsigned long display_hpll_disable; - unsigned long cursor_sr; - unsigned long cursor_hpll_disable; -}; - -static const struct cxsr_latency cxsr_latency_table[] = { - {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ - {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ - {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ - {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ - {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ - - {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ - {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ - {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ - {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ - {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ - - {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ - {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ - {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ - {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ - {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ - - {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ - {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ - {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ - {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ - {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ - - {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ - {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ - {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ - {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ - {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ - - {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ - {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ - {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ - {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ - {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ -}; - -static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, - int is_ddr3, - int fsb, - int mem) -{ - const struct cxsr_latency *latency; - int i; - - if (fsb == 0 || mem == 0) - return NULL; - - for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { - latency = &cxsr_latency_table[i]; - if (is_desktop == latency->is_desktop && - is_ddr3 == latency->is_ddr3 && - fsb == latency->fsb_freq && mem == latency->mem_freq) - return latency; - } - - DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); - - return NULL; -} - -static void pineview_disable_cxsr(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - /* deactivate cxsr */ - I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); -} - -/* - * Latency for FIFO fetches is dependent on several factors: - * - memory configuration (speed, channels) - * - chipset - * - current MCH state - * It can be fairly high in some situations, so here we assume a fairly - * pessimal value. It's a tradeoff between extra memory fetches (if we - * set this value too high, the FIFO will fetch frequently to stay full) - * and power consumption (set it too low to save power and we might see - * FIFO underruns and display "flicker"). - * - * A value of 5us seems to be a good balance; safe for very low end - * platforms but not overly aggressive on lower latency configs. - */ -static const int latency_ns = 5000; - -static int i9xx_get_fifo_size(struct drm_device *dev, int plane) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dsparb = I915_READ(DSPARB); - int size; - - size = dsparb & 0x7f; - if (plane) - size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; - - DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", size); - - return size; -} - -static int i85x_get_fifo_size(struct drm_device *dev, int plane) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dsparb = I915_READ(DSPARB); - int size; - - size = dsparb & 0x1ff; - if (plane) - size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; - size >>= 1; /* Convert to cachelines */ - - DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", size); - - return size; -} - -static int i845_get_fifo_size(struct drm_device *dev, int plane) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dsparb = I915_READ(DSPARB); - int size; - - size = dsparb & 0x7f; - size >>= 2; /* Convert to cachelines */ - - DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", - size); - - return size; -} - -static int i830_get_fifo_size(struct drm_device *dev, int plane) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dsparb = I915_READ(DSPARB); - int size; - - size = dsparb & 0x7f; - size >>= 1; /* Convert to cachelines */ - - DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", size); - - return size; -} - -static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) -{ - struct drm_crtc *crtc, *enabled = NULL; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if (crtc->enabled && crtc->fb) { - if (enabled) - return NULL; - enabled = crtc; - } - } - - return enabled; -} - -static void pineview_update_wm(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc; - const struct cxsr_latency *latency; - u32 reg; - unsigned long wm; - - latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, - dev_priv->fsb_freq, dev_priv->mem_freq); - if (!latency) { - DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); - pineview_disable_cxsr(dev); - return; - } - - crtc = single_enabled_crtc(dev); - if (crtc) { - int clock = crtc->mode.clock; - int pixel_size = crtc->fb->bits_per_pixel / 8; - - /* Display SR */ - wm = intel_calculate_wm(clock, &pineview_display_wm, - pineview_display_wm.fifo_size, - pixel_size, latency->display_sr); - reg = I915_READ(DSPFW1); - reg &= ~DSPFW_SR_MASK; - reg |= wm << DSPFW_SR_SHIFT; - I915_WRITE(DSPFW1, reg); - DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); - - /* cursor SR */ - wm = intel_calculate_wm(clock, &pineview_cursor_wm, - pineview_display_wm.fifo_size, - pixel_size, latency->cursor_sr); - reg = I915_READ(DSPFW3); - reg &= ~DSPFW_CURSOR_SR_MASK; - reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; - I915_WRITE(DSPFW3, reg); - - /* Display HPLL off SR */ - wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, - pineview_display_hplloff_wm.fifo_size, - pixel_size, latency->display_hpll_disable); - reg = I915_READ(DSPFW3); - reg &= ~DSPFW_HPLL_SR_MASK; - reg |= wm & DSPFW_HPLL_SR_MASK; - I915_WRITE(DSPFW3, reg); - - /* cursor HPLL off SR */ - wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, - pineview_display_hplloff_wm.fifo_size, - pixel_size, latency->cursor_hpll_disable); - reg = I915_READ(DSPFW3); - reg &= ~DSPFW_HPLL_CURSOR_MASK; - reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; - I915_WRITE(DSPFW3, reg); - DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); - - /* activate cxsr */ - I915_WRITE(DSPFW3, - I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); - DRM_DEBUG_KMS("Self-refresh is enabled\n"); - } else { - pineview_disable_cxsr(dev); - DRM_DEBUG_KMS("Self-refresh is disabled\n"); - } -} - -static bool g4x_compute_wm0(struct drm_device *dev, - int plane, - const struct intel_watermark_params *display, - int display_latency_ns, - const struct intel_watermark_params *cursor, - int cursor_latency_ns, - int *plane_wm, - int *cursor_wm) -{ - struct drm_crtc *crtc; - int htotal, hdisplay, clock, pixel_size; - int line_time_us, line_count; - int entries, tlb_miss; - - crtc = intel_get_crtc_for_plane(dev, plane); - if (crtc->fb == NULL || !crtc->enabled) { - *cursor_wm = cursor->guard_size; - *plane_wm = display->guard_size; - return false; - } - - htotal = crtc->mode.htotal; - hdisplay = crtc->mode.hdisplay; - clock = crtc->mode.clock; - pixel_size = crtc->fb->bits_per_pixel / 8; - - /* Use the small buffer method to calculate plane watermark */ - entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; - tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; - if (tlb_miss > 0) - entries += tlb_miss; - entries = DIV_ROUND_UP(entries, display->cacheline_size); - *plane_wm = entries + display->guard_size; - if (*plane_wm > (int)display->max_wm) - *plane_wm = display->max_wm; - - /* Use the large buffer method to calculate cursor watermark */ - line_time_us = ((htotal * 1000) / clock); - line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; - entries = line_count * 64 * pixel_size; - tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; - if (tlb_miss > 0) - entries += tlb_miss; - entries = DIV_ROUND_UP(entries, cursor->cacheline_size); - *cursor_wm = entries + cursor->guard_size; - if (*cursor_wm > (int)cursor->max_wm) - *cursor_wm = (int)cursor->max_wm; - - return true; -} - -/* - * Check the wm result. - * - * If any calculated watermark values is larger than the maximum value that - * can be programmed into the associated watermark register, that watermark - * must be disabled. - */ -static bool g4x_check_srwm(struct drm_device *dev, - int display_wm, int cursor_wm, - const struct intel_watermark_params *display, - const struct intel_watermark_params *cursor) -{ - DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", - display_wm, cursor_wm); - - if (display_wm > display->max_wm) { - DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", - display_wm, display->max_wm); - return false; - } - - if (cursor_wm > cursor->max_wm) { - DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", - cursor_wm, cursor->max_wm); - return false; - } - - if (!(display_wm || cursor_wm)) { - DRM_DEBUG_KMS("SR latency is 0, disabling\n"); - return false; - } - - return true; -} - -static bool g4x_compute_srwm(struct drm_device *dev, - int plane, - int latency_ns, - const struct intel_watermark_params *display, - const struct intel_watermark_params *cursor, - int *display_wm, int *cursor_wm) -{ - struct drm_crtc *crtc; - int hdisplay, htotal, pixel_size, clock; - unsigned long line_time_us; - int line_count, line_size; - int small, large; - int entries; - - if (!latency_ns) { - *display_wm = *cursor_wm = 0; - return false; - } - - crtc = intel_get_crtc_for_plane(dev, plane); - hdisplay = crtc->mode.hdisplay; - htotal = crtc->mode.htotal; - clock = crtc->mode.clock; - pixel_size = crtc->fb->bits_per_pixel / 8; - - line_time_us = (htotal * 1000) / clock; - line_count = (latency_ns / line_time_us + 1000) / 1000; - line_size = hdisplay * pixel_size; - - /* Use the minimum of the small and large buffer method for primary */ - small = ((clock * pixel_size / 1000) * latency_ns) / 1000; - large = line_count * line_size; - - entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); - *display_wm = entries + display->guard_size; - - /* calculate the self-refresh watermark for display cursor */ - entries = line_count * pixel_size * 64; - entries = DIV_ROUND_UP(entries, cursor->cacheline_size); - *cursor_wm = entries + cursor->guard_size; - - return g4x_check_srwm(dev, - *display_wm, *cursor_wm, - display, cursor); -} - -static bool vlv_compute_drain_latency(struct drm_device *dev, - int plane, - int *plane_prec_mult, - int *plane_dl, - int *cursor_prec_mult, - int *cursor_dl) -{ - struct drm_crtc *crtc; - int clock, pixel_size; - int entries; - - crtc = intel_get_crtc_for_plane(dev, plane); - if (crtc->fb == NULL || !crtc->enabled) - return false; - - clock = crtc->mode.clock; /* VESA DOT Clock */ - pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */ - - entries = (clock / 1000) * pixel_size; - *plane_prec_mult = (entries > 256) ? - DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; - *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) * - pixel_size); - - entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */ - *cursor_prec_mult = (entries > 256) ? - DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; - *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4); - - return true; -} - -/* - * Update drain latency registers of memory arbiter - * - * Valleyview SoC has a new memory arbiter and needs drain latency registers - * to be programmed. Each plane has a drain latency multiplier and a drain - * latency value. - */ - -static void vlv_update_drain_latency(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int planea_prec, planea_dl, planeb_prec, planeb_dl; - int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl; - int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is - either 16 or 32 */ - - /* For plane A, Cursor A */ - if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl, - &cursor_prec_mult, &cursora_dl)) { - cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? - DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16; - planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? - DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16; - - I915_WRITE(VLV_DDL1, cursora_prec | - (cursora_dl << DDL_CURSORA_SHIFT) | - planea_prec | planea_dl); - } - - /* For plane B, Cursor B */ - if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl, - &cursor_prec_mult, &cursorb_dl)) { - cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? - DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16; - planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? - DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16; - - I915_WRITE(VLV_DDL2, cursorb_prec | - (cursorb_dl << DDL_CURSORB_SHIFT) | - planeb_prec | planeb_dl); - } -} - -#define single_plane_enabled(mask) is_power_of_2(mask) - -static void valleyview_update_wm(struct drm_device *dev) -{ - static const int sr_latency_ns = 12000; - struct drm_i915_private *dev_priv = dev->dev_private; - int planea_wm, planeb_wm, cursora_wm, cursorb_wm; - int plane_sr, cursor_sr; - unsigned int enabled = 0; - - vlv_update_drain_latency(dev); - - if (g4x_compute_wm0(dev, 0, - &valleyview_wm_info, latency_ns, - &valleyview_cursor_wm_info, latency_ns, - &planea_wm, &cursora_wm)) - enabled |= 1; - - if (g4x_compute_wm0(dev, 1, - &valleyview_wm_info, latency_ns, - &valleyview_cursor_wm_info, latency_ns, - &planeb_wm, &cursorb_wm)) - enabled |= 2; - - plane_sr = cursor_sr = 0; - if (single_plane_enabled(enabled) && - g4x_compute_srwm(dev, ffs(enabled) - 1, - sr_latency_ns, - &valleyview_wm_info, - &valleyview_cursor_wm_info, - &plane_sr, &cursor_sr)) - I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); - else - I915_WRITE(FW_BLC_SELF_VLV, - I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); - - DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", - planea_wm, cursora_wm, - planeb_wm, cursorb_wm, - plane_sr, cursor_sr); - - I915_WRITE(DSPFW1, - (plane_sr << DSPFW_SR_SHIFT) | - (cursorb_wm << DSPFW_CURSORB_SHIFT) | - (planeb_wm << DSPFW_PLANEB_SHIFT) | - planea_wm); - I915_WRITE(DSPFW2, - (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | - (cursora_wm << DSPFW_CURSORA_SHIFT)); - I915_WRITE(DSPFW3, - (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT))); -} - -static void g4x_update_wm(struct drm_device *dev) -{ - static const int sr_latency_ns = 12000; - struct drm_i915_private *dev_priv = dev->dev_private; - int planea_wm, planeb_wm, cursora_wm, cursorb_wm; - int plane_sr, cursor_sr; - unsigned int enabled = 0; - - if (g4x_compute_wm0(dev, 0, - &g4x_wm_info, latency_ns, - &g4x_cursor_wm_info, latency_ns, - &planea_wm, &cursora_wm)) - enabled |= 1; - - if (g4x_compute_wm0(dev, 1, - &g4x_wm_info, latency_ns, - &g4x_cursor_wm_info, latency_ns, - &planeb_wm, &cursorb_wm)) - enabled |= 2; - - plane_sr = cursor_sr = 0; - if (single_plane_enabled(enabled) && - g4x_compute_srwm(dev, ffs(enabled) - 1, - sr_latency_ns, - &g4x_wm_info, - &g4x_cursor_wm_info, - &plane_sr, &cursor_sr)) - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); - else - I915_WRITE(FW_BLC_SELF, - I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); - - DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", - planea_wm, cursora_wm, - planeb_wm, cursorb_wm, - plane_sr, cursor_sr); - - I915_WRITE(DSPFW1, - (plane_sr << DSPFW_SR_SHIFT) | - (cursorb_wm << DSPFW_CURSORB_SHIFT) | - (planeb_wm << DSPFW_PLANEB_SHIFT) | - planea_wm); - I915_WRITE(DSPFW2, - (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | - (cursora_wm << DSPFW_CURSORA_SHIFT)); - /* HPLL off in SR has some issues on G4x... disable it */ - I915_WRITE(DSPFW3, - (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | - (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); -} - -static void i965_update_wm(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc; - int srwm = 1; - int cursor_sr = 16; - - /* Calc sr entries for one plane configs */ - crtc = single_enabled_crtc(dev); - if (crtc) { - /* self-refresh has much higher latency */ - static const int sr_latency_ns = 12000; - int clock = crtc->mode.clock; - int htotal = crtc->mode.htotal; - int hdisplay = crtc->mode.hdisplay; - int pixel_size = crtc->fb->bits_per_pixel / 8; - unsigned long line_time_us; - int entries; - - line_time_us = ((htotal * 1000) / clock); - - /* Use ns/us then divide to preserve precision */ - entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * hdisplay; - entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); - srwm = I965_FIFO_SIZE - entries; - if (srwm < 0) - srwm = 1; - srwm &= 0x1ff; - DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", - entries, srwm); - - entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * 64; - entries = DIV_ROUND_UP(entries, - i965_cursor_wm_info.cacheline_size); - cursor_sr = i965_cursor_wm_info.fifo_size - - (entries + i965_cursor_wm_info.guard_size); - - if (cursor_sr > i965_cursor_wm_info.max_wm) - cursor_sr = i965_cursor_wm_info.max_wm; - - DRM_DEBUG_KMS("self-refresh watermark: display plane %d " - "cursor %d\n", srwm, cursor_sr); - - if (IS_CRESTLINE(dev)) - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); - } else { - /* Turn off self refresh if both pipes are enabled */ - if (IS_CRESTLINE(dev)) - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); - } - - DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", - srwm); - - /* 965 has limitations... */ - I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | - (8 << 16) | (8 << 8) | (8 << 0)); - I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); - /* update cursor SR watermark */ - I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); -} - -static void i9xx_update_wm(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - const struct intel_watermark_params *wm_info; - uint32_t fwater_lo; - uint32_t fwater_hi; - int cwm, srwm = 1; - int fifo_size; - int planea_wm, planeb_wm; - struct drm_crtc *crtc, *enabled = NULL; - - if (IS_I945GM(dev)) - wm_info = &i945_wm_info; - else if (!IS_GEN2(dev)) - wm_info = &i915_wm_info; - else - wm_info = &i855_wm_info; - - fifo_size = dev_priv->display.get_fifo_size(dev, 0); - crtc = intel_get_crtc_for_plane(dev, 0); - if (crtc->enabled && crtc->fb) { - planea_wm = intel_calculate_wm(crtc->mode.clock, - wm_info, fifo_size, - crtc->fb->bits_per_pixel / 8, - latency_ns); - enabled = crtc; - } else - planea_wm = fifo_size - wm_info->guard_size; - - fifo_size = dev_priv->display.get_fifo_size(dev, 1); - crtc = intel_get_crtc_for_plane(dev, 1); - if (crtc->enabled && crtc->fb) { - planeb_wm = intel_calculate_wm(crtc->mode.clock, - wm_info, fifo_size, - crtc->fb->bits_per_pixel / 8, - latency_ns); - if (enabled == NULL) - enabled = crtc; - else - enabled = NULL; - } else - planeb_wm = fifo_size - wm_info->guard_size; - - DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); - - /* - * Overlay gets an aggressive default since video jitter is bad. - */ - cwm = 2; - - /* Play safe and disable self-refresh before adjusting watermarks. */ - if (IS_I945G(dev) || IS_I945GM(dev)) - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); - else if (IS_I915GM(dev)) - I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); - - /* Calc sr entries for one plane configs */ - if (HAS_FW_BLC(dev) && enabled) { - /* self-refresh has much higher latency */ - static const int sr_latency_ns = 6000; - int clock = enabled->mode.clock; - int htotal = enabled->mode.htotal; - int hdisplay = enabled->mode.hdisplay; - int pixel_size = enabled->fb->bits_per_pixel / 8; - unsigned long line_time_us; - int entries; - - line_time_us = (htotal * 1000) / clock; - - /* Use ns/us then divide to preserve precision */ - entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * hdisplay; - entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); - DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); - srwm = wm_info->fifo_size - entries; - if (srwm < 0) - srwm = 1; - - if (IS_I945G(dev) || IS_I945GM(dev)) - I915_WRITE(FW_BLC_SELF, - FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); - else if (IS_I915GM(dev)) - I915_WRITE(FW_BLC_SELF, srwm & 0x3f); - } - - DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", - planea_wm, planeb_wm, cwm, srwm); - - fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); - fwater_hi = (cwm & 0x1f); - - /* Set request length to 8 cachelines per fetch */ - fwater_lo = fwater_lo | (1 << 24) | (1 << 8); - fwater_hi = fwater_hi | (1 << 8); - - I915_WRITE(FW_BLC, fwater_lo); - I915_WRITE(FW_BLC2, fwater_hi); - - if (HAS_FW_BLC(dev)) { - if (enabled) { - if (IS_I945G(dev) || IS_I945GM(dev)) - I915_WRITE(FW_BLC_SELF, - FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); - else if (IS_I915GM(dev)) - I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); - DRM_DEBUG_KMS("memory self refresh enabled\n"); - } else - DRM_DEBUG_KMS("memory self refresh disabled\n"); - } -} - -static void i830_update_wm(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc; - uint32_t fwater_lo; - int planea_wm; - - crtc = single_enabled_crtc(dev); - if (crtc == NULL) - return; - - planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, - dev_priv->display.get_fifo_size(dev, 0), - crtc->fb->bits_per_pixel / 8, - latency_ns); - fwater_lo = I915_READ(FW_BLC) & ~0xfff; - fwater_lo |= (3<<8) | planea_wm; - - DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); - - I915_WRITE(FW_BLC, fwater_lo); -} - -#define ILK_LP0_PLANE_LATENCY 700 -#define ILK_LP0_CURSOR_LATENCY 1300 - -/* - * Check the wm result. - * - * If any calculated watermark values is larger than the maximum value that - * can be programmed into the associated watermark register, that watermark - * must be disabled. - */ -static bool ironlake_check_srwm(struct drm_device *dev, int level, - int fbc_wm, int display_wm, int cursor_wm, - const struct intel_watermark_params *display, - const struct intel_watermark_params *cursor) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," - " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); - - if (fbc_wm > SNB_FBC_MAX_SRWM) { - DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", - fbc_wm, SNB_FBC_MAX_SRWM, level); - - /* fbc has it's own way to disable FBC WM */ - I915_WRITE(DISP_ARB_CTL, - I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); - return false; - } - - if (display_wm > display->max_wm) { - DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", - display_wm, SNB_DISPLAY_MAX_SRWM, level); - return false; - } - - if (cursor_wm > cursor->max_wm) { - DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", - cursor_wm, SNB_CURSOR_MAX_SRWM, level); - return false; - } - - if (!(fbc_wm || display_wm || cursor_wm)) { - DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); - return false; - } - - return true; -} - -/* - * Compute watermark values of WM[1-3], - */ -static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, - int latency_ns, - const struct intel_watermark_params *display, - const struct intel_watermark_params *cursor, - int *fbc_wm, int *display_wm, int *cursor_wm) -{ - struct drm_crtc *crtc; - unsigned long line_time_us; - int hdisplay, htotal, pixel_size, clock; - int line_count, line_size; - int small, large; - int entries; - - if (!latency_ns) { - *fbc_wm = *display_wm = *cursor_wm = 0; - return false; - } - - crtc = intel_get_crtc_for_plane(dev, plane); - hdisplay = crtc->mode.hdisplay; - htotal = crtc->mode.htotal; - clock = crtc->mode.clock; - pixel_size = crtc->fb->bits_per_pixel / 8; - - line_time_us = (htotal * 1000) / clock; - line_count = (latency_ns / line_time_us + 1000) / 1000; - line_size = hdisplay * pixel_size; - - /* Use the minimum of the small and large buffer method for primary */ - small = ((clock * pixel_size / 1000) * latency_ns) / 1000; - large = line_count * line_size; - - entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); - *display_wm = entries + display->guard_size; - - /* - * Spec says: - * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 - */ - *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; - - /* calculate the self-refresh watermark for display cursor */ - entries = line_count * pixel_size * 64; - entries = DIV_ROUND_UP(entries, cursor->cacheline_size); - *cursor_wm = entries + cursor->guard_size; - - return ironlake_check_srwm(dev, level, - *fbc_wm, *display_wm, *cursor_wm, - display, cursor); -} - -static void ironlake_update_wm(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int fbc_wm, plane_wm, cursor_wm; - unsigned int enabled; - - enabled = 0; - if (g4x_compute_wm0(dev, 0, - &ironlake_display_wm_info, - ILK_LP0_PLANE_LATENCY, - &ironlake_cursor_wm_info, - ILK_LP0_CURSOR_LATENCY, - &plane_wm, &cursor_wm)) { - I915_WRITE(WM0_PIPEA_ILK, - (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); - DRM_DEBUG_KMS("FIFO watermarks For pipe A -" - " plane %d, " "cursor: %d\n", - plane_wm, cursor_wm); - enabled |= 1; - } - - if (g4x_compute_wm0(dev, 1, - &ironlake_display_wm_info, - ILK_LP0_PLANE_LATENCY, - &ironlake_cursor_wm_info, - ILK_LP0_CURSOR_LATENCY, - &plane_wm, &cursor_wm)) { - I915_WRITE(WM0_PIPEB_ILK, - (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); - DRM_DEBUG_KMS("FIFO watermarks For pipe B -" - " plane %d, cursor: %d\n", - plane_wm, cursor_wm); - enabled |= 2; - } - - /* - * Calculate and update the self-refresh watermark only when one - * display plane is used. - */ - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); - - if (!single_plane_enabled(enabled)) - return; - enabled = ffs(enabled) - 1; - - /* WM1 */ - if (!ironlake_compute_srwm(dev, 1, enabled, - ILK_READ_WM1_LATENCY() * 500, - &ironlake_display_srwm_info, - &ironlake_cursor_srwm_info, - &fbc_wm, &plane_wm, &cursor_wm)) - return; - - I915_WRITE(WM1_LP_ILK, - WM1_LP_SR_EN | - (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | - (fbc_wm << WM1_LP_FBC_SHIFT) | - (plane_wm << WM1_LP_SR_SHIFT) | - cursor_wm); - - /* WM2 */ - if (!ironlake_compute_srwm(dev, 2, enabled, - ILK_READ_WM2_LATENCY() * 500, - &ironlake_display_srwm_info, - &ironlake_cursor_srwm_info, - &fbc_wm, &plane_wm, &cursor_wm)) - return; - - I915_WRITE(WM2_LP_ILK, - WM2_LP_EN | - (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | - (fbc_wm << WM1_LP_FBC_SHIFT) | - (plane_wm << WM1_LP_SR_SHIFT) | - cursor_wm); - - /* - * WM3 is unsupported on ILK, probably because we don't have latency - * data for that power state - */ -} - -static void sandybridge_update_wm(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ - u32 val; - int fbc_wm, plane_wm, cursor_wm; - unsigned int enabled; - - enabled = 0; - if (g4x_compute_wm0(dev, 0, - &sandybridge_display_wm_info, latency, - &sandybridge_cursor_wm_info, latency, - &plane_wm, &cursor_wm)) { - val = I915_READ(WM0_PIPEA_ILK); - val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); - I915_WRITE(WM0_PIPEA_ILK, val | - ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); - DRM_DEBUG_KMS("FIFO watermarks For pipe A -" - " plane %d, " "cursor: %d\n", - plane_wm, cursor_wm); - enabled |= 1; - } - - if (g4x_compute_wm0(dev, 1, - &sandybridge_display_wm_info, latency, - &sandybridge_cursor_wm_info, latency, - &plane_wm, &cursor_wm)) { - val = I915_READ(WM0_PIPEB_ILK); - val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); - I915_WRITE(WM0_PIPEB_ILK, val | - ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); - DRM_DEBUG_KMS("FIFO watermarks For pipe B -" - " plane %d, cursor: %d\n", - plane_wm, cursor_wm); - enabled |= 2; - } - - /* IVB has 3 pipes */ - if (IS_IVYBRIDGE(dev) && - g4x_compute_wm0(dev, 2, - &sandybridge_display_wm_info, latency, - &sandybridge_cursor_wm_info, latency, - &plane_wm, &cursor_wm)) { - val = I915_READ(WM0_PIPEC_IVB); - val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); - I915_WRITE(WM0_PIPEC_IVB, val | - ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); - DRM_DEBUG_KMS("FIFO watermarks For pipe C -" - " plane %d, cursor: %d\n", - plane_wm, cursor_wm); - enabled |= 3; - } - - /* - * Calculate and update the self-refresh watermark only when one - * display plane is used. - * - * SNB support 3 levels of watermark. - * - * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, - * and disabled in the descending order - * - */ - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); - - if (!single_plane_enabled(enabled) || - dev_priv->sprite_scaling_enabled) - return; - enabled = ffs(enabled) - 1; - - /* WM1 */ - if (!ironlake_compute_srwm(dev, 1, enabled, - SNB_READ_WM1_LATENCY() * 500, - &sandybridge_display_srwm_info, - &sandybridge_cursor_srwm_info, - &fbc_wm, &plane_wm, &cursor_wm)) - return; - - I915_WRITE(WM1_LP_ILK, - WM1_LP_SR_EN | - (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | - (fbc_wm << WM1_LP_FBC_SHIFT) | - (plane_wm << WM1_LP_SR_SHIFT) | - cursor_wm); - - /* WM2 */ - if (!ironlake_compute_srwm(dev, 2, enabled, - SNB_READ_WM2_LATENCY() * 500, - &sandybridge_display_srwm_info, - &sandybridge_cursor_srwm_info, - &fbc_wm, &plane_wm, &cursor_wm)) - return; - - I915_WRITE(WM2_LP_ILK, - WM2_LP_EN | - (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | - (fbc_wm << WM1_LP_FBC_SHIFT) | - (plane_wm << WM1_LP_SR_SHIFT) | - cursor_wm); - - /* WM3 */ - if (!ironlake_compute_srwm(dev, 3, enabled, - SNB_READ_WM3_LATENCY() * 500, - &sandybridge_display_srwm_info, - &sandybridge_cursor_srwm_info, - &fbc_wm, &plane_wm, &cursor_wm)) - return; - - I915_WRITE(WM3_LP_ILK, - WM3_LP_EN | - (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | - (fbc_wm << WM1_LP_FBC_SHIFT) | - (plane_wm << WM1_LP_SR_SHIFT) | - cursor_wm); -} - -static bool -sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, - uint32_t sprite_width, int pixel_size, - const struct intel_watermark_params *display, - int display_latency_ns, int *sprite_wm) -{ - struct drm_crtc *crtc; - int clock; - int entries, tlb_miss; - - crtc = intel_get_crtc_for_plane(dev, plane); - if (crtc->fb == NULL || !crtc->enabled) { - *sprite_wm = display->guard_size; - return false; - } - - clock = crtc->mode.clock; - - /* Use the small buffer method to calculate the sprite watermark */ - entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; - tlb_miss = display->fifo_size*display->cacheline_size - - sprite_width * 8; - if (tlb_miss > 0) - entries += tlb_miss; - entries = DIV_ROUND_UP(entries, display->cacheline_size); - *sprite_wm = entries + display->guard_size; - if (*sprite_wm > (int)display->max_wm) - *sprite_wm = display->max_wm; - - return true; -} - -static bool -sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, - uint32_t sprite_width, int pixel_size, - const struct intel_watermark_params *display, - int latency_ns, int *sprite_wm) -{ - struct drm_crtc *crtc; - unsigned long line_time_us; - int clock; - int line_count, line_size; - int small, large; - int entries; - - if (!latency_ns) { - *sprite_wm = 0; - return false; - } - - crtc = intel_get_crtc_for_plane(dev, plane); - clock = crtc->mode.clock; - if (!clock) { - *sprite_wm = 0; - return false; - } - - line_time_us = (sprite_width * 1000) / clock; - if (!line_time_us) { - *sprite_wm = 0; - return false; - } - - line_count = (latency_ns / line_time_us + 1000) / 1000; - line_size = sprite_width * pixel_size; - - /* Use the minimum of the small and large buffer method for primary */ - small = ((clock * pixel_size / 1000) * latency_ns) / 1000; - large = line_count * line_size; - - entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); - *sprite_wm = entries + display->guard_size; - - return *sprite_wm > 0x3ff ? false : true; -} - -static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, - uint32_t sprite_width, int pixel_size) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ - u32 val; - int sprite_wm, reg; - int ret; - - switch (pipe) { - case 0: - reg = WM0_PIPEA_ILK; - break; - case 1: - reg = WM0_PIPEB_ILK; - break; - case 2: - reg = WM0_PIPEC_IVB; - break; - default: - return; /* bad pipe */ - } - - ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size, - &sandybridge_display_wm_info, - latency, &sprite_wm); - if (!ret) { - DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n", - pipe); - return; - } - - val = I915_READ(reg); - val &= ~WM0_PIPE_SPRITE_MASK; - I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); - DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm); - - - ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, - pixel_size, - &sandybridge_display_srwm_info, - SNB_READ_WM1_LATENCY() * 500, - &sprite_wm); - if (!ret) { - DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n", - pipe); - return; - } - I915_WRITE(WM1S_LP_ILK, sprite_wm); - - /* Only IVB has two more LP watermarks for sprite */ - if (!IS_IVYBRIDGE(dev)) - return; - - ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, - pixel_size, - &sandybridge_display_srwm_info, - SNB_READ_WM2_LATENCY() * 500, - &sprite_wm); - if (!ret) { - DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n", - pipe); - return; - } - I915_WRITE(WM2S_LP_IVB, sprite_wm); - - ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, - pixel_size, - &sandybridge_display_srwm_info, - SNB_READ_WM3_LATENCY() * 500, - &sprite_wm); - if (!ret) { - DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n", - pipe); - return; - } - I915_WRITE(WM3S_LP_IVB, sprite_wm); -} - -/** - * intel_update_watermarks - update FIFO watermark values based on current modes - * - * Calculate watermark values for the various WM regs based on current mode - * and plane configuration. - * - * There are several cases to deal with here: - * - normal (i.e. non-self-refresh) - * - self-refresh (SR) mode - * - lines are large relative to FIFO size (buffer can hold up to 2) - * - lines are small relative to FIFO size (buffer can hold more than 2 - * lines), so need to account for TLB latency - * - * The normal calculation is: - * watermark = dotclock * bytes per pixel * latency - * where latency is platform & configuration dependent (we assume pessimal - * values here). - * - * The SR calculation is: - * watermark = (trunc(latency/line time)+1) * surface width * - * bytes per pixel - * where - * line time = htotal / dotclock - * surface width = hdisplay for normal plane and 64 for cursor - * and latency is assumed to be high, as above. - * - * The final value programmed to the register should always be rounded up, - * and include an extra 2 entries to account for clock crossings. - * - * We don't use the sprite, so we can ignore that. And on Crestline we have - * to set the non-SR watermarks to 8. - */ -void intel_update_watermarks(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->display.update_wm) - dev_priv->display.update_wm(dev); -} - -void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, - uint32_t sprite_width, int pixel_size) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->display.update_sprite_wm) - dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, - pixel_size); -} - static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) { if (i915_panel_use_ssc >= 0) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index def112ee1a35..f1e27ce18f8a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -204,6 +204,25 @@ struct intel_plane { struct drm_intel_sprite_colorkey *key); }; +struct intel_watermark_params { + unsigned long fifo_size; + unsigned long max_wm; + unsigned long default_wm; + unsigned long guard_size; + unsigned long cacheline_size; +}; + +struct cxsr_latency { + int is_desktop; + int is_ddr3; + unsigned long fsb_freq; + unsigned long mem_freq; + unsigned long display_sr; + unsigned long display_hpll_disable; + unsigned long cursor_sr; + unsigned long cursor_hpll_disable; +}; + #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) #define to_intel_connector(x) container_of(x, struct intel_connector, base) #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) @@ -449,4 +468,25 @@ extern bool intel_fbc_enabled(struct drm_device *dev); extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); extern void intel_update_fbc(struct drm_device *dev); +/* Watermarks */ +extern void pineview_update_wm(struct drm_device *dev); +extern void valleyview_update_wm(struct drm_device *dev); +extern void g4x_update_wm(struct drm_device *dev); +extern void i965_update_wm(struct drm_device *dev); +extern void i9xx_update_wm(struct drm_device *dev); +extern void i830_update_wm(struct drm_device *dev); +extern void ironlake_update_wm(struct drm_device *dev); +extern void sandybridge_update_wm(struct drm_device *dev); +extern void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, + uint32_t sprite_width, int pixel_size); +extern const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, + int is_ddr3, + int fsb, + int mem); +extern void pineview_disable_cxsr(struct drm_device *dev); +extern int i9xx_get_fifo_size(struct drm_device *dev, int plane); +extern int i85x_get_fifo_size(struct drm_device *dev, int plane); +extern int i845_get_fifo_size(struct drm_device *dev, int plane); +extern int i830_get_fifo_size(struct drm_device *dev, int plane); + #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 7fbd305d3f8d..b208165157b9 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -519,3 +519,1459 @@ out_disable: } } +static const struct cxsr_latency cxsr_latency_table[] = { + {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ + {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ + {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ + {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ + {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ + + {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ + {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ + {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ + {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ + {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ + + {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ + {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ + {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ + {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ + {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ + + {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ + {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ + {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ + {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ + {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ + + {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ + {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ + {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ + {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ + {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ + + {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ + {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ + {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ + {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ + {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ +}; + +const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, + int is_ddr3, + int fsb, + int mem) +{ + const struct cxsr_latency *latency; + int i; + + if (fsb == 0 || mem == 0) + return NULL; + + for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { + latency = &cxsr_latency_table[i]; + if (is_desktop == latency->is_desktop && + is_ddr3 == latency->is_ddr3 && + fsb == latency->fsb_freq && mem == latency->mem_freq) + return latency; + } + + DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); + + return NULL; +} + +void pineview_disable_cxsr(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* deactivate cxsr */ + I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); +} + +/* + * Latency for FIFO fetches is dependent on several factors: + * - memory configuration (speed, channels) + * - chipset + * - current MCH state + * It can be fairly high in some situations, so here we assume a fairly + * pessimal value. It's a tradeoff between extra memory fetches (if we + * set this value too high, the FIFO will fetch frequently to stay full) + * and power consumption (set it too low to save power and we might see + * FIFO underruns and display "flicker"). + * + * A value of 5us seems to be a good balance; safe for very low end + * platforms but not overly aggressive on lower latency configs. + */ +static const int latency_ns = 5000; + +int i9xx_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + size = dsparb & 0x7f; + if (plane) + size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; + + DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, + plane ? "B" : "A", size); + + return size; +} + +int i85x_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + size = dsparb & 0x1ff; + if (plane) + size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; + size >>= 1; /* Convert to cachelines */ + + DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, + plane ? "B" : "A", size); + + return size; +} + +int i845_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + size = dsparb & 0x7f; + size >>= 2; /* Convert to cachelines */ + + DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, + plane ? "B" : "A", + size); + + return size; +} + +int i830_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + size = dsparb & 0x7f; + size >>= 1; /* Convert to cachelines */ + + DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, + plane ? "B" : "A", size); + + return size; +} + +/* Pineview has different values for various configs */ +static const struct intel_watermark_params pineview_display_wm = { + PINEVIEW_DISPLAY_FIFO, + PINEVIEW_MAX_WM, + PINEVIEW_DFT_WM, + PINEVIEW_GUARD_WM, + PINEVIEW_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params pineview_display_hplloff_wm = { + PINEVIEW_DISPLAY_FIFO, + PINEVIEW_MAX_WM, + PINEVIEW_DFT_HPLLOFF_WM, + PINEVIEW_GUARD_WM, + PINEVIEW_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params pineview_cursor_wm = { + PINEVIEW_CURSOR_FIFO, + PINEVIEW_CURSOR_MAX_WM, + PINEVIEW_CURSOR_DFT_WM, + PINEVIEW_CURSOR_GUARD_WM, + PINEVIEW_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params pineview_cursor_hplloff_wm = { + PINEVIEW_CURSOR_FIFO, + PINEVIEW_CURSOR_MAX_WM, + PINEVIEW_CURSOR_DFT_WM, + PINEVIEW_CURSOR_GUARD_WM, + PINEVIEW_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params g4x_wm_info = { + G4X_FIFO_SIZE, + G4X_MAX_WM, + G4X_MAX_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params g4x_cursor_wm_info = { + I965_CURSOR_FIFO, + I965_CURSOR_MAX_WM, + I965_CURSOR_DFT_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params valleyview_wm_info = { + VALLEYVIEW_FIFO_SIZE, + VALLEYVIEW_MAX_WM, + VALLEYVIEW_MAX_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params valleyview_cursor_wm_info = { + I965_CURSOR_FIFO, + VALLEYVIEW_CURSOR_MAX_WM, + I965_CURSOR_DFT_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params i965_cursor_wm_info = { + I965_CURSOR_FIFO, + I965_CURSOR_MAX_WM, + I965_CURSOR_DFT_WM, + 2, + I915_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params i945_wm_info = { + I945_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I915_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params i915_wm_info = { + I915_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I915_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params i855_wm_info = { + I855GM_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I830_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params i830_wm_info = { + I830_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I830_FIFO_LINE_SIZE +}; + +static const struct intel_watermark_params ironlake_display_wm_info = { + ILK_DISPLAY_FIFO, + ILK_DISPLAY_MAXWM, + ILK_DISPLAY_DFTWM, + 2, + ILK_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params ironlake_cursor_wm_info = { + ILK_CURSOR_FIFO, + ILK_CURSOR_MAXWM, + ILK_CURSOR_DFTWM, + 2, + ILK_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params ironlake_display_srwm_info = { + ILK_DISPLAY_SR_FIFO, + ILK_DISPLAY_MAX_SRWM, + ILK_DISPLAY_DFT_SRWM, + 2, + ILK_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params ironlake_cursor_srwm_info = { + ILK_CURSOR_SR_FIFO, + ILK_CURSOR_MAX_SRWM, + ILK_CURSOR_DFT_SRWM, + 2, + ILK_FIFO_LINE_SIZE +}; + +static const struct intel_watermark_params sandybridge_display_wm_info = { + SNB_DISPLAY_FIFO, + SNB_DISPLAY_MAXWM, + SNB_DISPLAY_DFTWM, + 2, + SNB_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params sandybridge_cursor_wm_info = { + SNB_CURSOR_FIFO, + SNB_CURSOR_MAXWM, + SNB_CURSOR_DFTWM, + 2, + SNB_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params sandybridge_display_srwm_info = { + SNB_DISPLAY_SR_FIFO, + SNB_DISPLAY_MAX_SRWM, + SNB_DISPLAY_DFT_SRWM, + 2, + SNB_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params sandybridge_cursor_srwm_info = { + SNB_CURSOR_SR_FIFO, + SNB_CURSOR_MAX_SRWM, + SNB_CURSOR_DFT_SRWM, + 2, + SNB_FIFO_LINE_SIZE +}; + + +/** + * intel_calculate_wm - calculate watermark level + * @clock_in_khz: pixel clock + * @wm: chip FIFO params + * @pixel_size: display pixel size + * @latency_ns: memory latency for the platform + * + * Calculate the watermark level (the level at which the display plane will + * start fetching from memory again). Each chip has a different display + * FIFO size and allocation, so the caller needs to figure that out and pass + * in the correct intel_watermark_params structure. + * + * As the pixel clock runs, the FIFO will be drained at a rate that depends + * on the pixel size. When it reaches the watermark level, it'll start + * fetching FIFO line sized based chunks from memory until the FIFO fills + * past the watermark point. If the FIFO drains completely, a FIFO underrun + * will occur, and a display engine hang could result. + */ +static unsigned long intel_calculate_wm(unsigned long clock_in_khz, + const struct intel_watermark_params *wm, + int fifo_size, + int pixel_size, + unsigned long latency_ns) +{ + long entries_required, wm_size; + + /* + * Note: we need to make sure we don't overflow for various clock & + * latency values. + * clocks go from a few thousand to several hundred thousand. + * latency is usually a few thousand + */ + entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / + 1000; + entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); + + DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); + + wm_size = fifo_size - (entries_required + wm->guard_size); + + DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); + + /* Don't promote wm_size to unsigned... */ + if (wm_size > (long)wm->max_wm) + wm_size = wm->max_wm; + if (wm_size <= 0) + wm_size = wm->default_wm; + return wm_size; +} + +static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) +{ + struct drm_crtc *crtc, *enabled = NULL; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (crtc->enabled && crtc->fb) { + if (enabled) + return NULL; + enabled = crtc; + } + } + + return enabled; +} + +void pineview_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + const struct cxsr_latency *latency; + u32 reg; + unsigned long wm; + + latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, + dev_priv->fsb_freq, dev_priv->mem_freq); + if (!latency) { + DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); + pineview_disable_cxsr(dev); + return; + } + + crtc = single_enabled_crtc(dev); + if (crtc) { + int clock = crtc->mode.clock; + int pixel_size = crtc->fb->bits_per_pixel / 8; + + /* Display SR */ + wm = intel_calculate_wm(clock, &pineview_display_wm, + pineview_display_wm.fifo_size, + pixel_size, latency->display_sr); + reg = I915_READ(DSPFW1); + reg &= ~DSPFW_SR_MASK; + reg |= wm << DSPFW_SR_SHIFT; + I915_WRITE(DSPFW1, reg); + DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); + + /* cursor SR */ + wm = intel_calculate_wm(clock, &pineview_cursor_wm, + pineview_display_wm.fifo_size, + pixel_size, latency->cursor_sr); + reg = I915_READ(DSPFW3); + reg &= ~DSPFW_CURSOR_SR_MASK; + reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; + I915_WRITE(DSPFW3, reg); + + /* Display HPLL off SR */ + wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, + pineview_display_hplloff_wm.fifo_size, + pixel_size, latency->display_hpll_disable); + reg = I915_READ(DSPFW3); + reg &= ~DSPFW_HPLL_SR_MASK; + reg |= wm & DSPFW_HPLL_SR_MASK; + I915_WRITE(DSPFW3, reg); + + /* cursor HPLL off SR */ + wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, + pineview_display_hplloff_wm.fifo_size, + pixel_size, latency->cursor_hpll_disable); + reg = I915_READ(DSPFW3); + reg &= ~DSPFW_HPLL_CURSOR_MASK; + reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; + I915_WRITE(DSPFW3, reg); + DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); + + /* activate cxsr */ + I915_WRITE(DSPFW3, + I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); + DRM_DEBUG_KMS("Self-refresh is enabled\n"); + } else { + pineview_disable_cxsr(dev); + DRM_DEBUG_KMS("Self-refresh is disabled\n"); + } +} + +static bool g4x_compute_wm0(struct drm_device *dev, + int plane, + const struct intel_watermark_params *display, + int display_latency_ns, + const struct intel_watermark_params *cursor, + int cursor_latency_ns, + int *plane_wm, + int *cursor_wm) +{ + struct drm_crtc *crtc; + int htotal, hdisplay, clock, pixel_size; + int line_time_us, line_count; + int entries, tlb_miss; + + crtc = intel_get_crtc_for_plane(dev, plane); + if (crtc->fb == NULL || !crtc->enabled) { + *cursor_wm = cursor->guard_size; + *plane_wm = display->guard_size; + return false; + } + + htotal = crtc->mode.htotal; + hdisplay = crtc->mode.hdisplay; + clock = crtc->mode.clock; + pixel_size = crtc->fb->bits_per_pixel / 8; + + /* Use the small buffer method to calculate plane watermark */ + entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; + tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; + if (tlb_miss > 0) + entries += tlb_miss; + entries = DIV_ROUND_UP(entries, display->cacheline_size); + *plane_wm = entries + display->guard_size; + if (*plane_wm > (int)display->max_wm) + *plane_wm = display->max_wm; + + /* Use the large buffer method to calculate cursor watermark */ + line_time_us = ((htotal * 1000) / clock); + line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; + entries = line_count * 64 * pixel_size; + tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; + if (tlb_miss > 0) + entries += tlb_miss; + entries = DIV_ROUND_UP(entries, cursor->cacheline_size); + *cursor_wm = entries + cursor->guard_size; + if (*cursor_wm > (int)cursor->max_wm) + *cursor_wm = (int)cursor->max_wm; + + return true; +} + +/* + * Check the wm result. + * + * If any calculated watermark values is larger than the maximum value that + * can be programmed into the associated watermark register, that watermark + * must be disabled. + */ +static bool g4x_check_srwm(struct drm_device *dev, + int display_wm, int cursor_wm, + const struct intel_watermark_params *display, + const struct intel_watermark_params *cursor) +{ + DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", + display_wm, cursor_wm); + + if (display_wm > display->max_wm) { + DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", + display_wm, display->max_wm); + return false; + } + + if (cursor_wm > cursor->max_wm) { + DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", + cursor_wm, cursor->max_wm); + return false; + } + + if (!(display_wm || cursor_wm)) { + DRM_DEBUG_KMS("SR latency is 0, disabling\n"); + return false; + } + + return true; +} + +static bool g4x_compute_srwm(struct drm_device *dev, + int plane, + int latency_ns, + const struct intel_watermark_params *display, + const struct intel_watermark_params *cursor, + int *display_wm, int *cursor_wm) +{ + struct drm_crtc *crtc; + int hdisplay, htotal, pixel_size, clock; + unsigned long line_time_us; + int line_count, line_size; + int small, large; + int entries; + + if (!latency_ns) { + *display_wm = *cursor_wm = 0; + return false; + } + + crtc = intel_get_crtc_for_plane(dev, plane); + hdisplay = crtc->mode.hdisplay; + htotal = crtc->mode.htotal; + clock = crtc->mode.clock; + pixel_size = crtc->fb->bits_per_pixel / 8; + + line_time_us = (htotal * 1000) / clock; + line_count = (latency_ns / line_time_us + 1000) / 1000; + line_size = hdisplay * pixel_size; + + /* Use the minimum of the small and large buffer method for primary */ + small = ((clock * pixel_size / 1000) * latency_ns) / 1000; + large = line_count * line_size; + + entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); + *display_wm = entries + display->guard_size; + + /* calculate the self-refresh watermark for display cursor */ + entries = line_count * pixel_size * 64; + entries = DIV_ROUND_UP(entries, cursor->cacheline_size); + *cursor_wm = entries + cursor->guard_size; + + return g4x_check_srwm(dev, + *display_wm, *cursor_wm, + display, cursor); +} + +static bool vlv_compute_drain_latency(struct drm_device *dev, + int plane, + int *plane_prec_mult, + int *plane_dl, + int *cursor_prec_mult, + int *cursor_dl) +{ + struct drm_crtc *crtc; + int clock, pixel_size; + int entries; + + crtc = intel_get_crtc_for_plane(dev, plane); + if (crtc->fb == NULL || !crtc->enabled) + return false; + + clock = crtc->mode.clock; /* VESA DOT Clock */ + pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */ + + entries = (clock / 1000) * pixel_size; + *plane_prec_mult = (entries > 256) ? + DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; + *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) * + pixel_size); + + entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */ + *cursor_prec_mult = (entries > 256) ? + DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; + *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4); + + return true; +} + +/* + * Update drain latency registers of memory arbiter + * + * Valleyview SoC has a new memory arbiter and needs drain latency registers + * to be programmed. Each plane has a drain latency multiplier and a drain + * latency value. + */ + +static void vlv_update_drain_latency(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int planea_prec, planea_dl, planeb_prec, planeb_dl; + int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl; + int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is + either 16 or 32 */ + + /* For plane A, Cursor A */ + if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl, + &cursor_prec_mult, &cursora_dl)) { + cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16; + planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16; + + I915_WRITE(VLV_DDL1, cursora_prec | + (cursora_dl << DDL_CURSORA_SHIFT) | + planea_prec | planea_dl); + } + + /* For plane B, Cursor B */ + if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl, + &cursor_prec_mult, &cursorb_dl)) { + cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16; + planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16; + + I915_WRITE(VLV_DDL2, cursorb_prec | + (cursorb_dl << DDL_CURSORB_SHIFT) | + planeb_prec | planeb_dl); + } +} + +#define single_plane_enabled(mask) is_power_of_2(mask) + +void valleyview_update_wm(struct drm_device *dev) +{ + static const int sr_latency_ns = 12000; + struct drm_i915_private *dev_priv = dev->dev_private; + int planea_wm, planeb_wm, cursora_wm, cursorb_wm; + int plane_sr, cursor_sr; + unsigned int enabled = 0; + + vlv_update_drain_latency(dev); + + if (g4x_compute_wm0(dev, 0, + &valleyview_wm_info, latency_ns, + &valleyview_cursor_wm_info, latency_ns, + &planea_wm, &cursora_wm)) + enabled |= 1; + + if (g4x_compute_wm0(dev, 1, + &valleyview_wm_info, latency_ns, + &valleyview_cursor_wm_info, latency_ns, + &planeb_wm, &cursorb_wm)) + enabled |= 2; + + plane_sr = cursor_sr = 0; + if (single_plane_enabled(enabled) && + g4x_compute_srwm(dev, ffs(enabled) - 1, + sr_latency_ns, + &valleyview_wm_info, + &valleyview_cursor_wm_info, + &plane_sr, &cursor_sr)) + I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); + else + I915_WRITE(FW_BLC_SELF_VLV, + I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", + planea_wm, cursora_wm, + planeb_wm, cursorb_wm, + plane_sr, cursor_sr); + + I915_WRITE(DSPFW1, + (plane_sr << DSPFW_SR_SHIFT) | + (cursorb_wm << DSPFW_CURSORB_SHIFT) | + (planeb_wm << DSPFW_PLANEB_SHIFT) | + planea_wm); + I915_WRITE(DSPFW2, + (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | + (cursora_wm << DSPFW_CURSORA_SHIFT)); + I915_WRITE(DSPFW3, + (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT))); +} + +void g4x_update_wm(struct drm_device *dev) +{ + static const int sr_latency_ns = 12000; + struct drm_i915_private *dev_priv = dev->dev_private; + int planea_wm, planeb_wm, cursora_wm, cursorb_wm; + int plane_sr, cursor_sr; + unsigned int enabled = 0; + + if (g4x_compute_wm0(dev, 0, + &g4x_wm_info, latency_ns, + &g4x_cursor_wm_info, latency_ns, + &planea_wm, &cursora_wm)) + enabled |= 1; + + if (g4x_compute_wm0(dev, 1, + &g4x_wm_info, latency_ns, + &g4x_cursor_wm_info, latency_ns, + &planeb_wm, &cursorb_wm)) + enabled |= 2; + + plane_sr = cursor_sr = 0; + if (single_plane_enabled(enabled) && + g4x_compute_srwm(dev, ffs(enabled) - 1, + sr_latency_ns, + &g4x_wm_info, + &g4x_cursor_wm_info, + &plane_sr, &cursor_sr)) + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + else + I915_WRITE(FW_BLC_SELF, + I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", + planea_wm, cursora_wm, + planeb_wm, cursorb_wm, + plane_sr, cursor_sr); + + I915_WRITE(DSPFW1, + (plane_sr << DSPFW_SR_SHIFT) | + (cursorb_wm << DSPFW_CURSORB_SHIFT) | + (planeb_wm << DSPFW_PLANEB_SHIFT) | + planea_wm); + I915_WRITE(DSPFW2, + (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | + (cursora_wm << DSPFW_CURSORA_SHIFT)); + /* HPLL off in SR has some issues on G4x... disable it */ + I915_WRITE(DSPFW3, + (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | + (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); +} + +void i965_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + int srwm = 1; + int cursor_sr = 16; + + /* Calc sr entries for one plane configs */ + crtc = single_enabled_crtc(dev); + if (crtc) { + /* self-refresh has much higher latency */ + static const int sr_latency_ns = 12000; + int clock = crtc->mode.clock; + int htotal = crtc->mode.htotal; + int hdisplay = crtc->mode.hdisplay; + int pixel_size = crtc->fb->bits_per_pixel / 8; + unsigned long line_time_us; + int entries; + + line_time_us = ((htotal * 1000) / clock); + + /* Use ns/us then divide to preserve precision */ + entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * hdisplay; + entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); + srwm = I965_FIFO_SIZE - entries; + if (srwm < 0) + srwm = 1; + srwm &= 0x1ff; + DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", + entries, srwm); + + entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * 64; + entries = DIV_ROUND_UP(entries, + i965_cursor_wm_info.cacheline_size); + cursor_sr = i965_cursor_wm_info.fifo_size - + (entries + i965_cursor_wm_info.guard_size); + + if (cursor_sr > i965_cursor_wm_info.max_wm) + cursor_sr = i965_cursor_wm_info.max_wm; + + DRM_DEBUG_KMS("self-refresh watermark: display plane %d " + "cursor %d\n", srwm, cursor_sr); + + if (IS_CRESTLINE(dev)) + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + } else { + /* Turn off self refresh if both pipes are enabled */ + if (IS_CRESTLINE(dev)) + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) + & ~FW_BLC_SELF_EN); + } + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", + srwm); + + /* 965 has limitations... */ + I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | + (8 << 16) | (8 << 8) | (8 << 0)); + I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); + /* update cursor SR watermark */ + I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); +} + +void i9xx_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + const struct intel_watermark_params *wm_info; + uint32_t fwater_lo; + uint32_t fwater_hi; + int cwm, srwm = 1; + int fifo_size; + int planea_wm, planeb_wm; + struct drm_crtc *crtc, *enabled = NULL; + + if (IS_I945GM(dev)) + wm_info = &i945_wm_info; + else if (!IS_GEN2(dev)) + wm_info = &i915_wm_info; + else + wm_info = &i855_wm_info; + + fifo_size = dev_priv->display.get_fifo_size(dev, 0); + crtc = intel_get_crtc_for_plane(dev, 0); + if (crtc->enabled && crtc->fb) { + planea_wm = intel_calculate_wm(crtc->mode.clock, + wm_info, fifo_size, + crtc->fb->bits_per_pixel / 8, + latency_ns); + enabled = crtc; + } else + planea_wm = fifo_size - wm_info->guard_size; + + fifo_size = dev_priv->display.get_fifo_size(dev, 1); + crtc = intel_get_crtc_for_plane(dev, 1); + if (crtc->enabled && crtc->fb) { + planeb_wm = intel_calculate_wm(crtc->mode.clock, + wm_info, fifo_size, + crtc->fb->bits_per_pixel / 8, + latency_ns); + if (enabled == NULL) + enabled = crtc; + else + enabled = NULL; + } else + planeb_wm = fifo_size - wm_info->guard_size; + + DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); + + /* + * Overlay gets an aggressive default since video jitter is bad. + */ + cwm = 2; + + /* Play safe and disable self-refresh before adjusting watermarks. */ + if (IS_I945G(dev) || IS_I945GM(dev)) + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); + else if (IS_I915GM(dev)) + I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); + + /* Calc sr entries for one plane configs */ + if (HAS_FW_BLC(dev) && enabled) { + /* self-refresh has much higher latency */ + static const int sr_latency_ns = 6000; + int clock = enabled->mode.clock; + int htotal = enabled->mode.htotal; + int hdisplay = enabled->mode.hdisplay; + int pixel_size = enabled->fb->bits_per_pixel / 8; + unsigned long line_time_us; + int entries; + + line_time_us = (htotal * 1000) / clock; + + /* Use ns/us then divide to preserve precision */ + entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * hdisplay; + entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); + DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); + srwm = wm_info->fifo_size - entries; + if (srwm < 0) + srwm = 1; + + if (IS_I945G(dev) || IS_I945GM(dev)) + I915_WRITE(FW_BLC_SELF, + FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); + else if (IS_I915GM(dev)) + I915_WRITE(FW_BLC_SELF, srwm & 0x3f); + } + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", + planea_wm, planeb_wm, cwm, srwm); + + fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); + fwater_hi = (cwm & 0x1f); + + /* Set request length to 8 cachelines per fetch */ + fwater_lo = fwater_lo | (1 << 24) | (1 << 8); + fwater_hi = fwater_hi | (1 << 8); + + I915_WRITE(FW_BLC, fwater_lo); + I915_WRITE(FW_BLC2, fwater_hi); + + if (HAS_FW_BLC(dev)) { + if (enabled) { + if (IS_I945G(dev) || IS_I945GM(dev)) + I915_WRITE(FW_BLC_SELF, + FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); + else if (IS_I915GM(dev)) + I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); + DRM_DEBUG_KMS("memory self refresh enabled\n"); + } else + DRM_DEBUG_KMS("memory self refresh disabled\n"); + } +} + +void i830_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + uint32_t fwater_lo; + int planea_wm; + + crtc = single_enabled_crtc(dev); + if (crtc == NULL) + return; + + planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, + dev_priv->display.get_fifo_size(dev, 0), + crtc->fb->bits_per_pixel / 8, + latency_ns); + fwater_lo = I915_READ(FW_BLC) & ~0xfff; + fwater_lo |= (3<<8) | planea_wm; + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); + + I915_WRITE(FW_BLC, fwater_lo); +} + +#define ILK_LP0_PLANE_LATENCY 700 +#define ILK_LP0_CURSOR_LATENCY 1300 + +/* + * Check the wm result. + * + * If any calculated watermark values is larger than the maximum value that + * can be programmed into the associated watermark register, that watermark + * must be disabled. + */ +static bool ironlake_check_srwm(struct drm_device *dev, int level, + int fbc_wm, int display_wm, int cursor_wm, + const struct intel_watermark_params *display, + const struct intel_watermark_params *cursor) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," + " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); + + if (fbc_wm > SNB_FBC_MAX_SRWM) { + DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", + fbc_wm, SNB_FBC_MAX_SRWM, level); + + /* fbc has it's own way to disable FBC WM */ + I915_WRITE(DISP_ARB_CTL, + I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); + return false; + } + + if (display_wm > display->max_wm) { + DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", + display_wm, SNB_DISPLAY_MAX_SRWM, level); + return false; + } + + if (cursor_wm > cursor->max_wm) { + DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", + cursor_wm, SNB_CURSOR_MAX_SRWM, level); + return false; + } + + if (!(fbc_wm || display_wm || cursor_wm)) { + DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); + return false; + } + + return true; +} + +/* + * Compute watermark values of WM[1-3], + */ +static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, + int latency_ns, + const struct intel_watermark_params *display, + const struct intel_watermark_params *cursor, + int *fbc_wm, int *display_wm, int *cursor_wm) +{ + struct drm_crtc *crtc; + unsigned long line_time_us; + int hdisplay, htotal, pixel_size, clock; + int line_count, line_size; + int small, large; + int entries; + + if (!latency_ns) { + *fbc_wm = *display_wm = *cursor_wm = 0; + return false; + } + + crtc = intel_get_crtc_for_plane(dev, plane); + hdisplay = crtc->mode.hdisplay; + htotal = crtc->mode.htotal; + clock = crtc->mode.clock; + pixel_size = crtc->fb->bits_per_pixel / 8; + + line_time_us = (htotal * 1000) / clock; + line_count = (latency_ns / line_time_us + 1000) / 1000; + line_size = hdisplay * pixel_size; + + /* Use the minimum of the small and large buffer method for primary */ + small = ((clock * pixel_size / 1000) * latency_ns) / 1000; + large = line_count * line_size; + + entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); + *display_wm = entries + display->guard_size; + + /* + * Spec says: + * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 + */ + *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; + + /* calculate the self-refresh watermark for display cursor */ + entries = line_count * pixel_size * 64; + entries = DIV_ROUND_UP(entries, cursor->cacheline_size); + *cursor_wm = entries + cursor->guard_size; + + return ironlake_check_srwm(dev, level, + *fbc_wm, *display_wm, *cursor_wm, + display, cursor); +} + +void ironlake_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int fbc_wm, plane_wm, cursor_wm; + unsigned int enabled; + + enabled = 0; + if (g4x_compute_wm0(dev, 0, + &ironlake_display_wm_info, + ILK_LP0_PLANE_LATENCY, + &ironlake_cursor_wm_info, + ILK_LP0_CURSOR_LATENCY, + &plane_wm, &cursor_wm)) { + I915_WRITE(WM0_PIPEA_ILK, + (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); + DRM_DEBUG_KMS("FIFO watermarks For pipe A -" + " plane %d, " "cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 1; + } + + if (g4x_compute_wm0(dev, 1, + &ironlake_display_wm_info, + ILK_LP0_PLANE_LATENCY, + &ironlake_cursor_wm_info, + ILK_LP0_CURSOR_LATENCY, + &plane_wm, &cursor_wm)) { + I915_WRITE(WM0_PIPEB_ILK, + (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); + DRM_DEBUG_KMS("FIFO watermarks For pipe B -" + " plane %d, cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 2; + } + + /* + * Calculate and update the self-refresh watermark only when one + * display plane is used. + */ + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + if (!single_plane_enabled(enabled)) + return; + enabled = ffs(enabled) - 1; + + /* WM1 */ + if (!ironlake_compute_srwm(dev, 1, enabled, + ILK_READ_WM1_LATENCY() * 500, + &ironlake_display_srwm_info, + &ironlake_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM1_LP_ILK, + WM1_LP_SR_EN | + (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM2 */ + if (!ironlake_compute_srwm(dev, 2, enabled, + ILK_READ_WM2_LATENCY() * 500, + &ironlake_display_srwm_info, + &ironlake_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM2_LP_ILK, + WM2_LP_EN | + (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* + * WM3 is unsupported on ILK, probably because we don't have latency + * data for that power state + */ +} + +void sandybridge_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ + u32 val; + int fbc_wm, plane_wm, cursor_wm; + unsigned int enabled; + + enabled = 0; + if (g4x_compute_wm0(dev, 0, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + val = I915_READ(WM0_PIPEA_ILK); + val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); + I915_WRITE(WM0_PIPEA_ILK, val | + ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); + DRM_DEBUG_KMS("FIFO watermarks For pipe A -" + " plane %d, " "cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 1; + } + + if (g4x_compute_wm0(dev, 1, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + val = I915_READ(WM0_PIPEB_ILK); + val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); + I915_WRITE(WM0_PIPEB_ILK, val | + ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); + DRM_DEBUG_KMS("FIFO watermarks For pipe B -" + " plane %d, cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 2; + } + + /* IVB has 3 pipes */ + if (IS_IVYBRIDGE(dev) && + g4x_compute_wm0(dev, 2, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + val = I915_READ(WM0_PIPEC_IVB); + val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); + I915_WRITE(WM0_PIPEC_IVB, val | + ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); + DRM_DEBUG_KMS("FIFO watermarks For pipe C -" + " plane %d, cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 3; + } + + /* + * Calculate and update the self-refresh watermark only when one + * display plane is used. + * + * SNB support 3 levels of watermark. + * + * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, + * and disabled in the descending order + * + */ + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + if (!single_plane_enabled(enabled) || + dev_priv->sprite_scaling_enabled) + return; + enabled = ffs(enabled) - 1; + + /* WM1 */ + if (!ironlake_compute_srwm(dev, 1, enabled, + SNB_READ_WM1_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM1_LP_ILK, + WM1_LP_SR_EN | + (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM2 */ + if (!ironlake_compute_srwm(dev, 2, enabled, + SNB_READ_WM2_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM2_LP_ILK, + WM2_LP_EN | + (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM3 */ + if (!ironlake_compute_srwm(dev, 3, enabled, + SNB_READ_WM3_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM3_LP_ILK, + WM3_LP_EN | + (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); +} + +static bool +sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, + uint32_t sprite_width, int pixel_size, + const struct intel_watermark_params *display, + int display_latency_ns, int *sprite_wm) +{ + struct drm_crtc *crtc; + int clock; + int entries, tlb_miss; + + crtc = intel_get_crtc_for_plane(dev, plane); + if (crtc->fb == NULL || !crtc->enabled) { + *sprite_wm = display->guard_size; + return false; + } + + clock = crtc->mode.clock; + + /* Use the small buffer method to calculate the sprite watermark */ + entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; + tlb_miss = display->fifo_size*display->cacheline_size - + sprite_width * 8; + if (tlb_miss > 0) + entries += tlb_miss; + entries = DIV_ROUND_UP(entries, display->cacheline_size); + *sprite_wm = entries + display->guard_size; + if (*sprite_wm > (int)display->max_wm) + *sprite_wm = display->max_wm; + + return true; +} + +static bool +sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, + uint32_t sprite_width, int pixel_size, + const struct intel_watermark_params *display, + int latency_ns, int *sprite_wm) +{ + struct drm_crtc *crtc; + unsigned long line_time_us; + int clock; + int line_count, line_size; + int small, large; + int entries; + + if (!latency_ns) { + *sprite_wm = 0; + return false; + } + + crtc = intel_get_crtc_for_plane(dev, plane); + clock = crtc->mode.clock; + if (!clock) { + *sprite_wm = 0; + return false; + } + + line_time_us = (sprite_width * 1000) / clock; + if (!line_time_us) { + *sprite_wm = 0; + return false; + } + + line_count = (latency_ns / line_time_us + 1000) / 1000; + line_size = sprite_width * pixel_size; + + /* Use the minimum of the small and large buffer method for primary */ + small = ((clock * pixel_size / 1000) * latency_ns) / 1000; + large = line_count * line_size; + + entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); + *sprite_wm = entries + display->guard_size; + + return *sprite_wm > 0x3ff ? false : true; +} + +void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, + uint32_t sprite_width, int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ + u32 val; + int sprite_wm, reg; + int ret; + + switch (pipe) { + case 0: + reg = WM0_PIPEA_ILK; + break; + case 1: + reg = WM0_PIPEB_ILK; + break; + case 2: + reg = WM0_PIPEC_IVB; + break; + default: + return; /* bad pipe */ + } + + ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size, + &sandybridge_display_wm_info, + latency, &sprite_wm); + if (!ret) { + DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n", + pipe); + return; + } + + val = I915_READ(reg); + val &= ~WM0_PIPE_SPRITE_MASK; + I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); + DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm); + + + ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, + pixel_size, + &sandybridge_display_srwm_info, + SNB_READ_WM1_LATENCY() * 500, + &sprite_wm); + if (!ret) { + DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n", + pipe); + return; + } + I915_WRITE(WM1S_LP_ILK, sprite_wm); + + /* Only IVB has two more LP watermarks for sprite */ + if (!IS_IVYBRIDGE(dev)) + return; + + ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, + pixel_size, + &sandybridge_display_srwm_info, + SNB_READ_WM2_LATENCY() * 500, + &sprite_wm); + if (!ret) { + DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n", + pipe); + return; + } + I915_WRITE(WM2S_LP_IVB, sprite_wm); + + ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, + pixel_size, + &sandybridge_display_srwm_info, + SNB_READ_WM3_LATENCY() * 500, + &sprite_wm); + if (!ret) { + DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n", + pipe); + return; + } + I915_WRITE(WM3S_LP_IVB, sprite_wm); +} + +/** + * intel_update_watermarks - update FIFO watermark values based on current modes + * + * Calculate watermark values for the various WM regs based on current mode + * and plane configuration. + * + * There are several cases to deal with here: + * - normal (i.e. non-self-refresh) + * - self-refresh (SR) mode + * - lines are large relative to FIFO size (buffer can hold up to 2) + * - lines are small relative to FIFO size (buffer can hold more than 2 + * lines), so need to account for TLB latency + * + * The normal calculation is: + * watermark = dotclock * bytes per pixel * latency + * where latency is platform & configuration dependent (we assume pessimal + * values here). + * + * The SR calculation is: + * watermark = (trunc(latency/line time)+1) * surface width * + * bytes per pixel + * where + * line time = htotal / dotclock + * surface width = hdisplay for normal plane and 64 for cursor + * and latency is assumed to be high, as above. + * + * The final value programmed to the register should always be rounded up, + * and include an extra 2 entries to account for clock crossings. + * + * We don't use the sprite, so we can ignore that. And on Crestline we have + * to set the non-SR watermarks to 8. + */ +void intel_update_watermarks(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->display.update_wm) + dev_priv->display.update_wm(dev); +} + +void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, + uint32_t sprite_width, int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->display.update_sprite_wm) + dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, + pixel_size); +} + -- cgit v1.2.3-59-g8ed1b From f6750b3cc6e9284f373a2fd155ec0bba38d02ad0 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Wed, 18 Apr 2012 11:51:14 -0300 Subject: drm/i915: fix line breaks in intel_pm The previous patch had way too long lines, this fixes them to fit into a reasonable screen space. Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b208165157b9..c5bc4c456baa 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -28,11 +28,15 @@ #include "i915_drv.h" #include "intel_drv.h" -/* FBC, or Frame Buffer Compression, is a technique employed to compress the framebuffer contents in-memory, aiming at reducing the required bandwidth during in-memory transfers and, therefore, reduce the power packet. +/* FBC, or Frame Buffer Compression, is a technique employed to compress the + * framebuffer contents in-memory, aiming at reducing the required bandwidth + * during in-memory transfers and, therefore, reduce the power packet. * - * The benefits of FBC are mostly visible with solid backgrounds and variation-less patterns. + * The benefits of FBC are mostly visible with solid backgrounds and + * variation-less patterns. * - * FBC-related functionality can be enabled by the means of the i915.i915_enable_fbc parameter + * FBC-related functionality can be enabled by the means of the + * i915.i915_enable_fbc parameter */ void i8xx_disable_fbc(struct drm_device *dev) -- cgit v1.2.3-59-g8ed1b From 2b4e57bd7a6a855dd1229f8cfbbdebfbc3f933be Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Wed, 18 Apr 2012 15:29:23 -0300 Subject: drm/i915: move drps, rps and rc6-related functions to intel_pm This moves DRPS, RPS and RC6-related functionality into intel_pm module. It also removes the linux/cpufreq.h include from intel_display, as its only user was the GPU turbo-related functionality in Gen6+ code path. v2: rebase on top of latest drm-intel-next-queued adding the bits that shifted around since the last patch. Acked-by: Jesse Barnes Acked-by: Ben Widawsky Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 513 ----------------------------------- drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/i915/intel_pm.c | 513 +++++++++++++++++++++++++++++++++++ 3 files changed, 514 insertions(+), 513 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 03c015c3adb3..d3982e9c6ff6 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -25,7 +25,6 @@ */ #include -#include #include #include #include @@ -6352,177 +6351,6 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { .output_poll_changed = intel_fb_output_poll_changed, }; -static struct drm_i915_gem_object * -intel_alloc_context_page(struct drm_device *dev) -{ - struct drm_i915_gem_object *ctx; - int ret; - - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - ctx = i915_gem_alloc_object(dev, 4096); - if (!ctx) { - DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); - return NULL; - } - - ret = i915_gem_object_pin(ctx, 4096, true); - if (ret) { - DRM_ERROR("failed to pin power context: %d\n", ret); - goto err_unref; - } - - ret = i915_gem_object_set_to_gtt_domain(ctx, 1); - if (ret) { - DRM_ERROR("failed to set-domain on power context: %d\n", ret); - goto err_unpin; - } - - return ctx; - -err_unpin: - i915_gem_object_unpin(ctx); -err_unref: - drm_gem_object_unreference(&ctx->base); - mutex_unlock(&dev->struct_mutex); - return NULL; -} - -bool ironlake_set_drps(struct drm_device *dev, u8 val) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u16 rgvswctl; - - rgvswctl = I915_READ16(MEMSWCTL); - if (rgvswctl & MEMCTL_CMD_STS) { - DRM_DEBUG("gpu busy, RCS change rejected\n"); - return false; /* still busy with another command */ - } - - rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | - (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; - I915_WRITE16(MEMSWCTL, rgvswctl); - POSTING_READ16(MEMSWCTL); - - rgvswctl |= MEMCTL_CMD_STS; - I915_WRITE16(MEMSWCTL, rgvswctl); - - return true; -} - -void ironlake_enable_drps(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 rgvmodectl = I915_READ(MEMMODECTL); - u8 fmax, fmin, fstart, vstart; - - /* Enable temp reporting */ - I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); - I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); - - /* 100ms RC evaluation intervals */ - I915_WRITE(RCUPEI, 100000); - I915_WRITE(RCDNEI, 100000); - - /* Set max/min thresholds to 90ms and 80ms respectively */ - I915_WRITE(RCBMAXAVG, 90000); - I915_WRITE(RCBMINAVG, 80000); - - I915_WRITE(MEMIHYST, 1); - - /* Set up min, max, and cur for interrupt handling */ - fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; - fmin = (rgvmodectl & MEMMODE_FMIN_MASK); - fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> - MEMMODE_FSTART_SHIFT; - - vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> - PXVFREQ_PX_SHIFT; - - dev_priv->fmax = fmax; /* IPS callback will increase this */ - dev_priv->fstart = fstart; - - dev_priv->max_delay = fstart; - dev_priv->min_delay = fmin; - dev_priv->cur_delay = fstart; - - DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", - fmax, fmin, fstart); - - I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); - - /* - * Interrupts will be enabled in ironlake_irq_postinstall - */ - - I915_WRITE(VIDSTART, vstart); - POSTING_READ(VIDSTART); - - rgvmodectl |= MEMMODE_SWMODE_EN; - I915_WRITE(MEMMODECTL, rgvmodectl); - - if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) - DRM_ERROR("stuck trying to change perf mode\n"); - msleep(1); - - ironlake_set_drps(dev, fstart); - - dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + - I915_READ(0x112e0); - dev_priv->last_time1 = jiffies_to_msecs(jiffies); - dev_priv->last_count2 = I915_READ(0x112f4); - getrawmonotonic(&dev_priv->last_time2); -} - -void ironlake_disable_drps(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u16 rgvswctl = I915_READ16(MEMSWCTL); - - /* Ack interrupts, disable EFC interrupt */ - I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); - I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); - I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); - I915_WRITE(DEIIR, DE_PCU_EVENT); - I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); - - /* Go back to the starting frequency */ - ironlake_set_drps(dev, dev_priv->fstart); - msleep(1); - rgvswctl |= MEMCTL_CMD_STS; - I915_WRITE(MEMSWCTL, rgvswctl); - msleep(1); - -} - -void gen6_set_rps(struct drm_device *dev, u8 val) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 swreq; - - swreq = (val & 0x3ff) << 25; - I915_WRITE(GEN6_RPNSWREQ, swreq); -} - -void gen6_disable_rps(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_WRITE(GEN6_RPNSWREQ, 1 << 31); - I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); - I915_WRITE(GEN6_PMIER, 0); - /* Complete PM interrupt masking here doesn't race with the rps work - * item again unmasking PM interrupts because that is using a different - * register (PMIMR) to mask PM interrupts. The only risk is in leaving - * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ - - spin_lock_irq(&dev_priv->rps_lock); - dev_priv->pm_iir = 0; - spin_unlock_irq(&dev_priv->rps_lock); - - I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); -} - static unsigned long intel_pxfreq(u32 vidfreq) { unsigned long freq; @@ -6609,232 +6437,6 @@ void intel_init_emon(struct drm_device *dev) dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); } -int intel_enable_rc6(const struct drm_device *dev) -{ - /* - * Respect the kernel parameter if it is set - */ - if (i915_enable_rc6 >= 0) - return i915_enable_rc6; - - /* - * Disable RC6 on Ironlake - */ - if (INTEL_INFO(dev)->gen == 5) - return 0; - - /* Sorry Haswell, no RC6 for you for now. */ - if (IS_HASWELL(dev)) - return 0; - - /* - * Disable rc6 on Sandybridge - */ - if (INTEL_INFO(dev)->gen == 6) { - DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); - return INTEL_RC6_ENABLE; - } - DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); - return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); -} - -void gen6_enable_rps(struct drm_i915_private *dev_priv) -{ - u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); - u32 pcu_mbox, rc6_mask = 0; - u32 gtfifodbg; - int cur_freq, min_freq, max_freq; - int rc6_mode; - int i; - - /* Here begins a magic sequence of register writes to enable - * auto-downclocking. - * - * Perhaps there might be some value in exposing these to - * userspace... - */ - I915_WRITE(GEN6_RC_STATE, 0); - mutex_lock(&dev_priv->dev->struct_mutex); - - /* Clear the DBG now so we don't confuse earlier errors */ - if ((gtfifodbg = I915_READ(GTFIFODBG))) { - DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); - I915_WRITE(GTFIFODBG, gtfifodbg); - } - - gen6_gt_force_wake_get(dev_priv); - - /* disable the counters and set deterministic thresholds */ - I915_WRITE(GEN6_RC_CONTROL, 0); - - I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); - I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); - - for (i = 0; i < I915_NUM_RINGS; i++) - I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); - - I915_WRITE(GEN6_RC_SLEEP, 0); - I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); - I915_WRITE(GEN6_RC6_THRESHOLD, 50000); - I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); - I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ - - rc6_mode = intel_enable_rc6(dev_priv->dev); - if (rc6_mode & INTEL_RC6_ENABLE) - rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; - - if (rc6_mode & INTEL_RC6p_ENABLE) - rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; - - if (rc6_mode & INTEL_RC6pp_ENABLE) - rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; - - DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", - (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off", - (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off", - (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off"); - - I915_WRITE(GEN6_RC_CONTROL, - rc6_mask | - GEN6_RC_CTL_EI_MODE(1) | - GEN6_RC_CTL_HW_ENABLE); - - I915_WRITE(GEN6_RPNSWREQ, - GEN6_FREQUENCY(10) | - GEN6_OFFSET(0) | - GEN6_AGGRESSIVE_TURBO); - I915_WRITE(GEN6_RC_VIDEO_FREQ, - GEN6_FREQUENCY(12)); - - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, - 18 << 24 | - 6 << 16); - I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); - I915_WRITE(GEN6_RP_UP_EI, 100000); - I915_WRITE(GEN6_RP_DOWN_EI, 5000000); - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); - I915_WRITE(GEN6_RP_CONTROL, - GEN6_RP_MEDIA_TURBO | - GEN6_RP_MEDIA_HW_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_CONT); - - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) - DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); - - I915_WRITE(GEN6_PCODE_DATA, 0); - I915_WRITE(GEN6_PCODE_MAILBOX, - GEN6_PCODE_READY | - GEN6_PCODE_WRITE_MIN_FREQ_TABLE); - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) - DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); - - min_freq = (rp_state_cap & 0xff0000) >> 16; - max_freq = rp_state_cap & 0xff; - cur_freq = (gt_perf_status & 0xff00) >> 8; - - /* Check for overclock support */ - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) - DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); - I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); - pcu_mbox = I915_READ(GEN6_PCODE_DATA); - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) - DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); - if (pcu_mbox & (1<<31)) { /* OC supported */ - max_freq = pcu_mbox & 0xff; - DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); - } - - /* In units of 100MHz */ - dev_priv->max_delay = max_freq; - dev_priv->min_delay = min_freq; - dev_priv->cur_delay = cur_freq; - - /* requires MSI enabled */ - I915_WRITE(GEN6_PMIER, - GEN6_PM_MBOX_EVENT | - GEN6_PM_THERMAL_EVENT | - GEN6_PM_RP_DOWN_TIMEOUT | - GEN6_PM_RP_UP_THRESHOLD | - GEN6_PM_RP_DOWN_THRESHOLD | - GEN6_PM_RP_UP_EI_EXPIRED | - GEN6_PM_RP_DOWN_EI_EXPIRED); - spin_lock_irq(&dev_priv->rps_lock); - WARN_ON(dev_priv->pm_iir != 0); - I915_WRITE(GEN6_PMIMR, 0); - spin_unlock_irq(&dev_priv->rps_lock); - /* enable all PM interrupts */ - I915_WRITE(GEN6_PMINTRMSK, 0); - - gen6_gt_force_wake_put(dev_priv); - mutex_unlock(&dev_priv->dev->struct_mutex); -} - -void gen6_update_ring_freq(struct drm_i915_private *dev_priv) -{ - int min_freq = 15; - int gpu_freq, ia_freq, max_ia_freq; - int scaling_factor = 180; - - max_ia_freq = cpufreq_quick_get_max(0); - /* - * Default to measured freq if none found, PCU will ensure we don't go - * over - */ - if (!max_ia_freq) - max_ia_freq = tsc_khz; - - /* Convert from kHz to MHz */ - max_ia_freq /= 1000; - - mutex_lock(&dev_priv->dev->struct_mutex); - - /* - * For each potential GPU frequency, load a ring frequency we'd like - * to use for memory access. We do this by specifying the IA frequency - * the PCU should use as a reference to determine the ring frequency. - */ - for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; - gpu_freq--) { - int diff = dev_priv->max_delay - gpu_freq; - - /* - * For GPU frequencies less than 750MHz, just use the lowest - * ring freq. - */ - if (gpu_freq < min_freq) - ia_freq = 800; - else - ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); - ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); - - I915_WRITE(GEN6_PCODE_DATA, - (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | - gpu_freq); - I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | - GEN6_PCODE_WRITE_MIN_FREQ_TABLE); - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & - GEN6_PCODE_READY) == 0, 10)) { - DRM_ERROR("pcode write of freq table timed out\n"); - continue; - } - } - - mutex_unlock(&dev_priv->dev->struct_mutex); -} - static void ironlake_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -7178,121 +6780,6 @@ static void cpt_init_clock_gating(struct drm_device *dev) I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); } -static void ironlake_teardown_rc6(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->renderctx) { - i915_gem_object_unpin(dev_priv->renderctx); - drm_gem_object_unreference(&dev_priv->renderctx->base); - dev_priv->renderctx = NULL; - } - - if (dev_priv->pwrctx) { - i915_gem_object_unpin(dev_priv->pwrctx); - drm_gem_object_unreference(&dev_priv->pwrctx->base); - dev_priv->pwrctx = NULL; - } -} - -static void ironlake_disable_rc6(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (I915_READ(PWRCTXA)) { - /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ - I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); - wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), - 50); - - I915_WRITE(PWRCTXA, 0); - POSTING_READ(PWRCTXA); - - I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); - POSTING_READ(RSTDBYCTL); - } - - ironlake_teardown_rc6(dev); -} - -static int ironlake_setup_rc6(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->renderctx == NULL) - dev_priv->renderctx = intel_alloc_context_page(dev); - if (!dev_priv->renderctx) - return -ENOMEM; - - if (dev_priv->pwrctx == NULL) - dev_priv->pwrctx = intel_alloc_context_page(dev); - if (!dev_priv->pwrctx) { - ironlake_teardown_rc6(dev); - return -ENOMEM; - } - - return 0; -} - -void ironlake_enable_rc6(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - /* rc6 disabled by default due to repeated reports of hanging during - * boot and resume. - */ - if (!intel_enable_rc6(dev)) - return; - - mutex_lock(&dev->struct_mutex); - ret = ironlake_setup_rc6(dev); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return; - } - - /* - * GPU can automatically power down the render unit if given a page - * to save state. - */ - ret = BEGIN_LP_RING(6); - if (ret) { - ironlake_teardown_rc6(dev); - mutex_unlock(&dev->struct_mutex); - return; - } - - OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); - OUT_RING(MI_SET_CONTEXT); - OUT_RING(dev_priv->renderctx->gtt_offset | - MI_MM_SPACE_GTT | - MI_SAVE_EXT_STATE_EN | - MI_RESTORE_EXT_STATE_EN | - MI_RESTORE_INHIBIT); - OUT_RING(MI_SUSPEND_FLUSH); - OUT_RING(MI_NOOP); - OUT_RING(MI_FLUSH); - ADVANCE_LP_RING(); - - /* - * Wait for the command parser to advance past MI_SET_CONTEXT. The HW - * does an implicit flush, combined with MI_FLUSH above, it should be - * safe to assume that renderctx is valid - */ - ret = intel_wait_ring_idle(LP_RING(dev_priv)); - if (ret) { - DRM_ERROR("failed to enable ironlake power power savings\n"); - ironlake_teardown_rc6(dev); - mutex_unlock(&dev->struct_mutex); - return; - } - - I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); - I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); - mutex_unlock(&dev->struct_mutex); -} - void intel_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f1e27ce18f8a..c87f29a2aeba 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -396,6 +396,7 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno); extern void intel_enable_clock_gating(struct drm_device *dev); +extern void ironlake_disable_rc6(struct drm_device *dev); extern void ironlake_enable_drps(struct drm_device *dev); extern void ironlake_disable_drps(struct drm_device *dev); extern void gen6_enable_rps(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index c5bc4c456baa..2f45de3339bf 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -25,6 +25,7 @@ * */ +#include #include "i915_drv.h" #include "intel_drv.h" @@ -1979,3 +1980,515 @@ void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, pixel_size); } +static struct drm_i915_gem_object * +intel_alloc_context_page(struct drm_device *dev) +{ + struct drm_i915_gem_object *ctx; + int ret; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + ctx = i915_gem_alloc_object(dev, 4096); + if (!ctx) { + DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); + return NULL; + } + + ret = i915_gem_object_pin(ctx, 4096, true); + if (ret) { + DRM_ERROR("failed to pin power context: %d\n", ret); + goto err_unref; + } + + ret = i915_gem_object_set_to_gtt_domain(ctx, 1); + if (ret) { + DRM_ERROR("failed to set-domain on power context: %d\n", ret); + goto err_unpin; + } + + return ctx; + +err_unpin: + i915_gem_object_unpin(ctx); +err_unref: + drm_gem_object_unreference(&ctx->base); + mutex_unlock(&dev->struct_mutex); + return NULL; +} + +bool ironlake_set_drps(struct drm_device *dev, u8 val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u16 rgvswctl; + + rgvswctl = I915_READ16(MEMSWCTL); + if (rgvswctl & MEMCTL_CMD_STS) { + DRM_DEBUG("gpu busy, RCS change rejected\n"); + return false; /* still busy with another command */ + } + + rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | + (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; + I915_WRITE16(MEMSWCTL, rgvswctl); + POSTING_READ16(MEMSWCTL); + + rgvswctl |= MEMCTL_CMD_STS; + I915_WRITE16(MEMSWCTL, rgvswctl); + + return true; +} + +void ironlake_enable_drps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 rgvmodectl = I915_READ(MEMMODECTL); + u8 fmax, fmin, fstart, vstart; + + /* Enable temp reporting */ + I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); + I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); + + /* 100ms RC evaluation intervals */ + I915_WRITE(RCUPEI, 100000); + I915_WRITE(RCDNEI, 100000); + + /* Set max/min thresholds to 90ms and 80ms respectively */ + I915_WRITE(RCBMAXAVG, 90000); + I915_WRITE(RCBMINAVG, 80000); + + I915_WRITE(MEMIHYST, 1); + + /* Set up min, max, and cur for interrupt handling */ + fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; + fmin = (rgvmodectl & MEMMODE_FMIN_MASK); + fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> + MEMMODE_FSTART_SHIFT; + + vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> + PXVFREQ_PX_SHIFT; + + dev_priv->fmax = fmax; /* IPS callback will increase this */ + dev_priv->fstart = fstart; + + dev_priv->max_delay = fstart; + dev_priv->min_delay = fmin; + dev_priv->cur_delay = fstart; + + DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", + fmax, fmin, fstart); + + I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); + + /* + * Interrupts will be enabled in ironlake_irq_postinstall + */ + + I915_WRITE(VIDSTART, vstart); + POSTING_READ(VIDSTART); + + rgvmodectl |= MEMMODE_SWMODE_EN; + I915_WRITE(MEMMODECTL, rgvmodectl); + + if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) + DRM_ERROR("stuck trying to change perf mode\n"); + msleep(1); + + ironlake_set_drps(dev, fstart); + + dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + + I915_READ(0x112e0); + dev_priv->last_time1 = jiffies_to_msecs(jiffies); + dev_priv->last_count2 = I915_READ(0x112f4); + getrawmonotonic(&dev_priv->last_time2); +} + +void ironlake_disable_drps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u16 rgvswctl = I915_READ16(MEMSWCTL); + + /* Ack interrupts, disable EFC interrupt */ + I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); + I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); + I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); + I915_WRITE(DEIIR, DE_PCU_EVENT); + I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); + + /* Go back to the starting frequency */ + ironlake_set_drps(dev, dev_priv->fstart); + msleep(1); + rgvswctl |= MEMCTL_CMD_STS; + I915_WRITE(MEMSWCTL, rgvswctl); + msleep(1); + +} + +void gen6_set_rps(struct drm_device *dev, u8 val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 swreq; + + swreq = (val & 0x3ff) << 25; + I915_WRITE(GEN6_RPNSWREQ, swreq); +} + +void gen6_disable_rps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(GEN6_RPNSWREQ, 1 << 31); + I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); + I915_WRITE(GEN6_PMIER, 0); + /* Complete PM interrupt masking here doesn't race with the rps work + * item again unmasking PM interrupts because that is using a different + * register (PMIMR) to mask PM interrupts. The only risk is in leaving + * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ + + spin_lock_irq(&dev_priv->rps_lock); + dev_priv->pm_iir = 0; + spin_unlock_irq(&dev_priv->rps_lock); + + I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); +} + +int intel_enable_rc6(const struct drm_device *dev) +{ + /* + * Respect the kernel parameter if it is set + */ + if (i915_enable_rc6 >= 0) + return i915_enable_rc6; + + /* + * Disable RC6 on Ironlake + */ + if (INTEL_INFO(dev)->gen == 5) + return 0; + + /* Sorry Haswell, no RC6 for you for now. */ + if (IS_HASWELL(dev)) + return 0; + + /* + * Disable rc6 on Sandybridge + */ + if (INTEL_INFO(dev)->gen == 6) { + DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); + return INTEL_RC6_ENABLE; + } + DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); + return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); +} + +void gen6_enable_rps(struct drm_i915_private *dev_priv) +{ + u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); + u32 pcu_mbox, rc6_mask = 0; + u32 gtfifodbg; + int cur_freq, min_freq, max_freq; + int rc6_mode; + int i; + + /* Here begins a magic sequence of register writes to enable + * auto-downclocking. + * + * Perhaps there might be some value in exposing these to + * userspace... + */ + I915_WRITE(GEN6_RC_STATE, 0); + mutex_lock(&dev_priv->dev->struct_mutex); + + /* Clear the DBG now so we don't confuse earlier errors */ + if ((gtfifodbg = I915_READ(GTFIFODBG))) { + DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); + I915_WRITE(GTFIFODBG, gtfifodbg); + } + + gen6_gt_force_wake_get(dev_priv); + + /* disable the counters and set deterministic thresholds */ + I915_WRITE(GEN6_RC_CONTROL, 0); + + I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); + I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); + I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); + I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); + I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); + + for (i = 0; i < I915_NUM_RINGS; i++) + I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); + + I915_WRITE(GEN6_RC_SLEEP, 0); + I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); + I915_WRITE(GEN6_RC6_THRESHOLD, 50000); + I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); + I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ + + rc6_mode = intel_enable_rc6(dev_priv->dev); + if (rc6_mode & INTEL_RC6_ENABLE) + rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; + + if (rc6_mode & INTEL_RC6p_ENABLE) + rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; + + if (rc6_mode & INTEL_RC6pp_ENABLE) + rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; + + DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", + (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off", + (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off", + (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off"); + + I915_WRITE(GEN6_RC_CONTROL, + rc6_mask | + GEN6_RC_CTL_EI_MODE(1) | + GEN6_RC_CTL_HW_ENABLE); + + I915_WRITE(GEN6_RPNSWREQ, + GEN6_FREQUENCY(10) | + GEN6_OFFSET(0) | + GEN6_AGGRESSIVE_TURBO); + I915_WRITE(GEN6_RC_VIDEO_FREQ, + GEN6_FREQUENCY(12)); + + I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, + 18 << 24 | + 6 << 16); + I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); + I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); + I915_WRITE(GEN6_RP_UP_EI, 100000); + I915_WRITE(GEN6_RP_DOWN_EI, 5000000); + I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); + I915_WRITE(GEN6_RP_CONTROL, + GEN6_RP_MEDIA_TURBO | + GEN6_RP_MEDIA_HW_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_CONT); + + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); + + I915_WRITE(GEN6_PCODE_DATA, 0); + I915_WRITE(GEN6_PCODE_MAILBOX, + GEN6_PCODE_READY | + GEN6_PCODE_WRITE_MIN_FREQ_TABLE); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); + + min_freq = (rp_state_cap & 0xff0000) >> 16; + max_freq = rp_state_cap & 0xff; + cur_freq = (gt_perf_status & 0xff00) >> 8; + + /* Check for overclock support */ + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); + I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); + pcu_mbox = I915_READ(GEN6_PCODE_DATA); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); + if (pcu_mbox & (1<<31)) { /* OC supported */ + max_freq = pcu_mbox & 0xff; + DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); + } + + /* In units of 100MHz */ + dev_priv->max_delay = max_freq; + dev_priv->min_delay = min_freq; + dev_priv->cur_delay = cur_freq; + + /* requires MSI enabled */ + I915_WRITE(GEN6_PMIER, + GEN6_PM_MBOX_EVENT | + GEN6_PM_THERMAL_EVENT | + GEN6_PM_RP_DOWN_TIMEOUT | + GEN6_PM_RP_UP_THRESHOLD | + GEN6_PM_RP_DOWN_THRESHOLD | + GEN6_PM_RP_UP_EI_EXPIRED | + GEN6_PM_RP_DOWN_EI_EXPIRED); + spin_lock_irq(&dev_priv->rps_lock); + WARN_ON(dev_priv->pm_iir != 0); + I915_WRITE(GEN6_PMIMR, 0); + spin_unlock_irq(&dev_priv->rps_lock); + /* enable all PM interrupts */ + I915_WRITE(GEN6_PMINTRMSK, 0); + + gen6_gt_force_wake_put(dev_priv); + mutex_unlock(&dev_priv->dev->struct_mutex); +} + +void gen6_update_ring_freq(struct drm_i915_private *dev_priv) +{ + int min_freq = 15; + int gpu_freq, ia_freq, max_ia_freq; + int scaling_factor = 180; + + max_ia_freq = cpufreq_quick_get_max(0); + /* + * Default to measured freq if none found, PCU will ensure we don't go + * over + */ + if (!max_ia_freq) + max_ia_freq = tsc_khz; + + /* Convert from kHz to MHz */ + max_ia_freq /= 1000; + + mutex_lock(&dev_priv->dev->struct_mutex); + + /* + * For each potential GPU frequency, load a ring frequency we'd like + * to use for memory access. We do this by specifying the IA frequency + * the PCU should use as a reference to determine the ring frequency. + */ + for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; + gpu_freq--) { + int diff = dev_priv->max_delay - gpu_freq; + + /* + * For GPU frequencies less than 750MHz, just use the lowest + * ring freq. + */ + if (gpu_freq < min_freq) + ia_freq = 800; + else + ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); + ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); + + I915_WRITE(GEN6_PCODE_DATA, + (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | + gpu_freq); + I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | + GEN6_PCODE_WRITE_MIN_FREQ_TABLE); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & + GEN6_PCODE_READY) == 0, 10)) { + DRM_ERROR("pcode write of freq table timed out\n"); + continue; + } + } + + mutex_unlock(&dev_priv->dev->struct_mutex); +} + +static void ironlake_teardown_rc6(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->renderctx) { + i915_gem_object_unpin(dev_priv->renderctx); + drm_gem_object_unreference(&dev_priv->renderctx->base); + dev_priv->renderctx = NULL; + } + + if (dev_priv->pwrctx) { + i915_gem_object_unpin(dev_priv->pwrctx); + drm_gem_object_unreference(&dev_priv->pwrctx->base); + dev_priv->pwrctx = NULL; + } +} + +void ironlake_disable_rc6(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (I915_READ(PWRCTXA)) { + /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ + I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); + wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), + 50); + + I915_WRITE(PWRCTXA, 0); + POSTING_READ(PWRCTXA); + + I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); + POSTING_READ(RSTDBYCTL); + } + + ironlake_teardown_rc6(dev); +} + +static int ironlake_setup_rc6(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->renderctx == NULL) + dev_priv->renderctx = intel_alloc_context_page(dev); + if (!dev_priv->renderctx) + return -ENOMEM; + + if (dev_priv->pwrctx == NULL) + dev_priv->pwrctx = intel_alloc_context_page(dev); + if (!dev_priv->pwrctx) { + ironlake_teardown_rc6(dev); + return -ENOMEM; + } + + return 0; +} + +void ironlake_enable_rc6(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + /* rc6 disabled by default due to repeated reports of hanging during + * boot and resume. + */ + if (!intel_enable_rc6(dev)) + return; + + mutex_lock(&dev->struct_mutex); + ret = ironlake_setup_rc6(dev); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return; + } + + /* + * GPU can automatically power down the render unit if given a page + * to save state. + */ + ret = BEGIN_LP_RING(6); + if (ret) { + ironlake_teardown_rc6(dev); + mutex_unlock(&dev->struct_mutex); + return; + } + + OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); + OUT_RING(MI_SET_CONTEXT); + OUT_RING(dev_priv->renderctx->gtt_offset | + MI_MM_SPACE_GTT | + MI_SAVE_EXT_STATE_EN | + MI_RESTORE_EXT_STATE_EN | + MI_RESTORE_INHIBIT); + OUT_RING(MI_SUSPEND_FLUSH); + OUT_RING(MI_NOOP); + OUT_RING(MI_FLUSH); + ADVANCE_LP_RING(); + + /* + * Wait for the command parser to advance past MI_SET_CONTEXT. The HW + * does an implicit flush, combined with MI_FLUSH above, it should be + * safe to assume that renderctx is valid + */ + ret = intel_wait_ring_idle(LP_RING(dev_priv)); + if (ret) { + DRM_ERROR("failed to enable ironlake power power savings\n"); + ironlake_teardown_rc6(dev); + mutex_unlock(&dev->struct_mutex); + return; + } + + I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); + I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); + mutex_unlock(&dev->struct_mutex); +} + -- cgit v1.2.3-59-g8ed1b From dde18883def89af28bd0c01aff3c7962b82dda18 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Wed, 18 Apr 2012 15:29:24 -0300 Subject: drm/i915: move emon functionality into intel_pm module This moves the Ironlake energy monitoring functionality into intel_pm module. Acked-by: Jesse Barnes Acked-by: Ben Widawsky Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 86 ------------------------------------ drivers/gpu/drm/i915/intel_pm.c | 86 ++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 86 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d3982e9c6ff6..4fb1982475d2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6351,92 +6351,6 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { .output_poll_changed = intel_fb_output_poll_changed, }; -static unsigned long intel_pxfreq(u32 vidfreq) -{ - unsigned long freq; - int div = (vidfreq & 0x3f0000) >> 16; - int post = (vidfreq & 0x3000) >> 12; - int pre = (vidfreq & 0x7); - - if (!pre) - return 0; - - freq = ((div * 133333) / ((1<dev_private; - u32 lcfuse; - u8 pxw[16]; - int i; - - /* Disable to program */ - I915_WRITE(ECR, 0); - POSTING_READ(ECR); - - /* Program energy weights for various events */ - I915_WRITE(SDEW, 0x15040d00); - I915_WRITE(CSIEW0, 0x007f0000); - I915_WRITE(CSIEW1, 0x1e220004); - I915_WRITE(CSIEW2, 0x04000004); - - for (i = 0; i < 5; i++) - I915_WRITE(PEW + (i * 4), 0); - for (i = 0; i < 3; i++) - I915_WRITE(DEW + (i * 4), 0); - - /* Program P-state weights to account for frequency power adjustment */ - for (i = 0; i < 16; i++) { - u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); - unsigned long freq = intel_pxfreq(pxvidfreq); - unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> - PXVFREQ_PX_SHIFT; - unsigned long val; - - val = vid * vid; - val *= (freq / 1000); - val *= 255; - val /= (127*127*900); - if (val > 0xff) - DRM_ERROR("bad pxval: %ld\n", val); - pxw[i] = val; - } - /* Render standby states get 0 weight */ - pxw[14] = 0; - pxw[15] = 0; - - for (i = 0; i < 4; i++) { - u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | - (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); - I915_WRITE(PXW + (i * 4), val); - } - - /* Adjust magic regs to magic values (more experimental results) */ - I915_WRITE(OGW0, 0); - I915_WRITE(OGW1, 0); - I915_WRITE(EG0, 0x00007f00); - I915_WRITE(EG1, 0x0000000e); - I915_WRITE(EG2, 0x000e0000); - I915_WRITE(EG3, 0x68000300); - I915_WRITE(EG4, 0x42000000); - I915_WRITE(EG5, 0x00140031); - I915_WRITE(EG6, 0); - I915_WRITE(EG7, 0); - - for (i = 0; i < 8; i++) - I915_WRITE(PXWL + (i * 4), 0); - - /* Enable PMON + select events */ - I915_WRITE(ECR, 0x80000019); - - lcfuse = I915_READ(LCFUSE02); - - dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); -} - static void ironlake_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 2f45de3339bf..832130e57731 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2492,3 +2492,89 @@ void ironlake_enable_rc6(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); } +static unsigned long intel_pxfreq(u32 vidfreq) +{ + unsigned long freq; + int div = (vidfreq & 0x3f0000) >> 16; + int post = (vidfreq & 0x3000) >> 12; + int pre = (vidfreq & 0x7); + + if (!pre) + return 0; + + freq = ((div * 133333) / ((1<dev_private; + u32 lcfuse; + u8 pxw[16]; + int i; + + /* Disable to program */ + I915_WRITE(ECR, 0); + POSTING_READ(ECR); + + /* Program energy weights for various events */ + I915_WRITE(SDEW, 0x15040d00); + I915_WRITE(CSIEW0, 0x007f0000); + I915_WRITE(CSIEW1, 0x1e220004); + I915_WRITE(CSIEW2, 0x04000004); + + for (i = 0; i < 5; i++) + I915_WRITE(PEW + (i * 4), 0); + for (i = 0; i < 3; i++) + I915_WRITE(DEW + (i * 4), 0); + + /* Program P-state weights to account for frequency power adjustment */ + for (i = 0; i < 16; i++) { + u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); + unsigned long freq = intel_pxfreq(pxvidfreq); + unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> + PXVFREQ_PX_SHIFT; + unsigned long val; + + val = vid * vid; + val *= (freq / 1000); + val *= 255; + val /= (127*127*900); + if (val > 0xff) + DRM_ERROR("bad pxval: %ld\n", val); + pxw[i] = val; + } + /* Render standby states get 0 weight */ + pxw[14] = 0; + pxw[15] = 0; + + for (i = 0; i < 4; i++) { + u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | + (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); + I915_WRITE(PXW + (i * 4), val); + } + + /* Adjust magic regs to magic values (more experimental results) */ + I915_WRITE(OGW0, 0); + I915_WRITE(OGW1, 0); + I915_WRITE(EG0, 0x00007f00); + I915_WRITE(EG1, 0x0000000e); + I915_WRITE(EG2, 0x000e0000); + I915_WRITE(EG3, 0x68000300); + I915_WRITE(EG4, 0x42000000); + I915_WRITE(EG5, 0x00140031); + I915_WRITE(EG6, 0); + I915_WRITE(EG7, 0); + + for (i = 0; i < 8; i++) + I915_WRITE(PXWL + (i * 4), 0); + + /* Enable PMON + select events */ + I915_WRITE(ECR, 0x80000019); + + lcfuse = I915_READ(LCFUSE02); + + dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); +} + -- cgit v1.2.3-59-g8ed1b From 6f1d69b04fcd7ba16791165e8287d95e88bef848 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Wed, 18 Apr 2012 15:29:25 -0300 Subject: drm/i915: move clock gating functionality into intel_pm module This moves the clock gating-related functions into intel_pm module. Also, please note that we do change the function type from static to non-static in this patch for the move, to prevent breaking bisecting with non-working intermediate commit. Those are returned back to static form in the following patch which setups a generic PM initialization function, which was split into a different one to simplify review. v2: rebase on top of latest drm-intel-next-queued to incorporate all the changes that went there meanwhile. Acked-by: Jesse Barnes Acked-by: Ben Widawsky Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 355 +---------------------------------- drivers/gpu/drm/i915/intel_drv.h | 16 ++ drivers/gpu/drm/i915/intel_pm.c | 353 ++++++++++++++++++++++++++++++++++ 3 files changed, 370 insertions(+), 354 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4fb1982475d2..dec67aafbdc6 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1515,7 +1515,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, * Plane regs are double buffered, going from enabled->disabled needs a * trigger in order to latch. The display address reg provides this. */ -static void intel_flush_display_plane(struct drm_i915_private *dev_priv, +void intel_flush_display_plane(struct drm_i915_private *dev_priv, enum plane plane) { I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); @@ -6351,359 +6351,6 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { .output_poll_changed = intel_fb_output_poll_changed, }; -static void ironlake_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; - - /* Required for FBC */ - dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | - DPFCRUNIT_CLOCK_GATE_DISABLE | - DPFDUNIT_CLOCK_GATE_DISABLE; - /* Required for CxSR */ - dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; - - I915_WRITE(PCH_3DCGDIS0, - MARIUNIT_CLOCK_GATE_DISABLE | - SVSMUNIT_CLOCK_GATE_DISABLE); - I915_WRITE(PCH_3DCGDIS1, - VFMUNIT_CLOCK_GATE_DISABLE); - - I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); - - /* - * According to the spec the following bits should be set in - * order to enable memory self-refresh - * The bit 22/21 of 0x42004 - * The bit 5 of 0x42020 - * The bit 15 of 0x45000 - */ - I915_WRITE(ILK_DISPLAY_CHICKEN2, - (I915_READ(ILK_DISPLAY_CHICKEN2) | - ILK_DPARB_GATE | ILK_VSDPFD_FULL)); - I915_WRITE(ILK_DSPCLK_GATE, - (I915_READ(ILK_DSPCLK_GATE) | - ILK_DPARB_CLK_GATE)); - I915_WRITE(DISP_ARB_CTL, - (I915_READ(DISP_ARB_CTL) | - DISP_FBC_WM_DIS)); - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); - - /* - * Based on the document from hardware guys the following bits - * should be set unconditionally in order to enable FBC. - * The bit 22 of 0x42000 - * The bit 22 of 0x42004 - * The bit 7,8,9 of 0x42020. - */ - if (IS_IRONLAKE_M(dev)) { - I915_WRITE(ILK_DISPLAY_CHICKEN1, - I915_READ(ILK_DISPLAY_CHICKEN1) | - ILK_FBCQ_DIS); - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | - ILK_DPARB_GATE); - I915_WRITE(ILK_DSPCLK_GATE, - I915_READ(ILK_DSPCLK_GATE) | - ILK_DPFC_DIS1 | - ILK_DPFC_DIS2 | - ILK_CLK_FBC); - } - - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | - ILK_ELPIN_409_SELECT); - I915_WRITE(_3D_CHICKEN2, - _3D_CHICKEN2_WM_READ_PIPELINED << 16 | - _3D_CHICKEN2_WM_READ_PIPELINED); -} - -static void gen6_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; - uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; - - I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); - - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | - ILK_ELPIN_409_SELECT); - - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); - - /* clear masked bit */ - I915_WRITE(CACHE_MODE_0, - CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT); - - I915_WRITE(GEN6_UCGCTL1, - I915_READ(GEN6_UCGCTL1) | - GEN6_BLBUNIT_CLOCK_GATE_DISABLE | - GEN6_CSUNIT_CLOCK_GATE_DISABLE); - - /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock - * gating disable must be set. Failure to set it results in - * flickering pixels due to Z write ordering failures after - * some amount of runtime in the Mesa "fire" demo, and Unigine - * Sanctuary and Tropics, and apparently anything else with - * alpha test or pixel discard. - * - * According to the spec, bit 11 (RCCUNIT) must also be set, - * but we didn't debug actual testcases to find it out. - */ - I915_WRITE(GEN6_UCGCTL2, - GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | - GEN6_RCCUNIT_CLOCK_GATE_DISABLE); - - /* Bspec says we need to always set all mask bits. */ - I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) | - _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL); - - /* - * According to the spec the following bits should be - * set in order to enable memory self-refresh and fbc: - * The bit21 and bit22 of 0x42000 - * The bit21 and bit22 of 0x42004 - * The bit5 and bit7 of 0x42020 - * The bit14 of 0x70180 - * The bit14 of 0x71180 - */ - I915_WRITE(ILK_DISPLAY_CHICKEN1, - I915_READ(ILK_DISPLAY_CHICKEN1) | - ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | - ILK_DPARB_GATE | ILK_VSDPFD_FULL); - I915_WRITE(ILK_DSPCLK_GATE, - I915_READ(ILK_DSPCLK_GATE) | - ILK_DPARB_CLK_GATE | - ILK_DPFD_CLK_GATE); - - for_each_pipe(pipe) { - I915_WRITE(DSPCNTR(pipe), - I915_READ(DSPCNTR(pipe)) | - DISPPLANE_TRICKLE_FEED_DISABLE); - intel_flush_display_plane(dev_priv, pipe); - } -} - -static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) -{ - uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); - - reg &= ~GEN7_FF_SCHED_MASK; - reg |= GEN7_FF_TS_SCHED_HW; - reg |= GEN7_FF_VS_SCHED_HW; - reg |= GEN7_FF_DS_SCHED_HW; - - I915_WRITE(GEN7_FF_THREAD_MODE, reg); -} - -static void ivybridge_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; - uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; - - I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); - - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); - - /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. - * This implements the WaDisableRCZUnitClockGating workaround. - */ - I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); - - I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); - - I915_WRITE(IVB_CHICKEN3, - CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | - CHICKEN3_DGMG_DONE_FIX_DISABLE); - - /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ - I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, - GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); - - /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ - I915_WRITE(GEN7_L3CNTLREG1, - GEN7_WA_FOR_GEN7_L3_CONTROL); - I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, - GEN7_WA_L3_CHICKEN_MODE); - - /* This is required by WaCatErrorRejectionIssue */ - I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, - I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | - GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); - - for_each_pipe(pipe) { - I915_WRITE(DSPCNTR(pipe), - I915_READ(DSPCNTR(pipe)) | - DISPPLANE_TRICKLE_FEED_DISABLE); - intel_flush_display_plane(dev_priv, pipe); - } - - gen7_setup_fixed_func_scheduler(dev_priv); -} - -static void valleyview_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; - uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; - - I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); - - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); - - /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. - * This implements the WaDisableRCZUnitClockGating workaround. - */ - I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); - - I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); - - I915_WRITE(IVB_CHICKEN3, - CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | - CHICKEN3_DGMG_DONE_FIX_DISABLE); - - /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ - I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, - GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); - - /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ - I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); - I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); - - /* This is required by WaCatErrorRejectionIssue */ - I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, - I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | - GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); - - for_each_pipe(pipe) { - I915_WRITE(DSPCNTR(pipe), - I915_READ(DSPCNTR(pipe)) | - DISPPLANE_TRICKLE_FEED_DISABLE); - intel_flush_display_plane(dev_priv, pipe); - } - - I915_WRITE(CACHE_MODE_1, I915_READ(CACHE_MODE_1) | - (PIXEL_SUBSPAN_COLLECT_OPT_DISABLE << 16) | - PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); -} - -static void g4x_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dspclk_gate; - - I915_WRITE(RENCLK_GATE_D1, 0); - I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | - GS_UNIT_CLOCK_GATE_DISABLE | - CL_UNIT_CLOCK_GATE_DISABLE); - I915_WRITE(RAMCLK_GATE_D, 0); - dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | - OVRUNIT_CLOCK_GATE_DISABLE | - OVCUNIT_CLOCK_GATE_DISABLE; - if (IS_GM45(dev)) - dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(DSPCLK_GATE_D, dspclk_gate); -} - -static void crestline_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); - I915_WRITE(RENCLK_GATE_D2, 0); - I915_WRITE(DSPCLK_GATE_D, 0); - I915_WRITE(RAMCLK_GATE_D, 0); - I915_WRITE16(DEUC, 0); -} - -static void broadwater_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | - I965_RCC_CLOCK_GATE_DISABLE | - I965_RCPB_CLOCK_GATE_DISABLE | - I965_ISC_CLOCK_GATE_DISABLE | - I965_FBC_CLOCK_GATE_DISABLE); - I915_WRITE(RENCLK_GATE_D2, 0); -} - -static void gen3_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 dstate = I915_READ(D_STATE); - - dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | - DSTATE_DOT_CLOCK_GATING; - I915_WRITE(D_STATE, dstate); -} - -static void i85x_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); -} - -static void i830_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); -} - -static void ibx_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - /* - * On Ibex Peak and Cougar Point, we need to disable clock - * gating for the panel power sequencer or it will fail to - * start up when no ports are active. - */ - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); -} - -static void cpt_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; - - /* - * On Ibex Peak and Cougar Point, we need to disable clock - * gating for the panel power sequencer or it will fail to - * start up when no ports are active. - */ - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); - I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | - DPLS_EDP_PPS_FIX_DIS); - /* Without this, mode sets may fail silently on FDI */ - for_each_pipe(pipe) - I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); -} - -void intel_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - dev_priv->display.init_clock_gating(dev); - - if (dev_priv->display.init_pch_clock_gating) - dev_priv->display.init_pch_clock_gating(dev); -} - /* Set up chip specific display functions */ static void intel_init_display(struct drm_device *dev) { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index c87f29a2aeba..771075cedefa 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -339,6 +339,8 @@ extern bool intel_dpd_is_edp(struct drm_device *dev); extern void intel_edp_link_config(struct intel_encoder *, int *, int *); extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); extern int intel_plane_init(struct drm_device *dev, enum pipe pipe); +extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, + enum plane plane); /* intel_panel.c */ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, @@ -490,4 +492,18 @@ extern int i85x_get_fifo_size(struct drm_device *dev, int plane); extern int i845_get_fifo_size(struct drm_device *dev, int plane); extern int i830_get_fifo_size(struct drm_device *dev, int plane); +/* Clock gating */ +extern void ironlake_init_clock_gating(struct drm_device *dev); +extern void gen6_init_clock_gating(struct drm_device *dev); +extern void ivybridge_init_clock_gating(struct drm_device *dev); +extern void valleyview_init_clock_gating(struct drm_device *dev); +extern void g4x_init_clock_gating(struct drm_device *dev); +extern void crestline_init_clock_gating(struct drm_device *dev); +extern void broadwater_init_clock_gating(struct drm_device *dev); +extern void gen3_init_clock_gating(struct drm_device *dev); +extern void i85x_init_clock_gating(struct drm_device *dev); +extern void i830_init_clock_gating(struct drm_device *dev); +extern void ibx_init_clock_gating(struct drm_device *dev); +extern void cpt_init_clock_gating(struct drm_device *dev); + #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 832130e57731..4f35df22a435 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2578,3 +2578,356 @@ void intel_init_emon(struct drm_device *dev) dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); } +void ironlake_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + + /* Required for FBC */ + dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | + DPFCRUNIT_CLOCK_GATE_DISABLE | + DPFDUNIT_CLOCK_GATE_DISABLE; + /* Required for CxSR */ + dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(PCH_3DCGDIS0, + MARIUNIT_CLOCK_GATE_DISABLE | + SVSMUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(PCH_3DCGDIS1, + VFMUNIT_CLOCK_GATE_DISABLE); + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + + /* + * According to the spec the following bits should be set in + * order to enable memory self-refresh + * The bit 22/21 of 0x42004 + * The bit 5 of 0x42020 + * The bit 15 of 0x45000 + */ + I915_WRITE(ILK_DISPLAY_CHICKEN2, + (I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_DPARB_GATE | ILK_VSDPFD_FULL)); + I915_WRITE(ILK_DSPCLK_GATE, + (I915_READ(ILK_DSPCLK_GATE) | + ILK_DPARB_CLK_GATE)); + I915_WRITE(DISP_ARB_CTL, + (I915_READ(DISP_ARB_CTL) | + DISP_FBC_WM_DIS)); + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + /* + * Based on the document from hardware guys the following bits + * should be set unconditionally in order to enable FBC. + * The bit 22 of 0x42000 + * The bit 22 of 0x42004 + * The bit 7,8,9 of 0x42020. + */ + if (IS_IRONLAKE_M(dev)) { + I915_WRITE(ILK_DISPLAY_CHICKEN1, + I915_READ(ILK_DISPLAY_CHICKEN1) | + ILK_FBCQ_DIS); + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_DPARB_GATE); + I915_WRITE(ILK_DSPCLK_GATE, + I915_READ(ILK_DSPCLK_GATE) | + ILK_DPFC_DIS1 | + ILK_DPFC_DIS2 | + ILK_CLK_FBC); + } + + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_ELPIN_409_SELECT); + I915_WRITE(_3D_CHICKEN2, + _3D_CHICKEN2_WM_READ_PIPELINED << 16 | + _3D_CHICKEN2_WM_READ_PIPELINED); +} + +void gen6_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_ELPIN_409_SELECT); + + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + /* clear masked bit */ + I915_WRITE(CACHE_MODE_0, + CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT); + + I915_WRITE(GEN6_UCGCTL1, + I915_READ(GEN6_UCGCTL1) | + GEN6_BLBUNIT_CLOCK_GATE_DISABLE | + GEN6_CSUNIT_CLOCK_GATE_DISABLE); + + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock + * gating disable must be set. Failure to set it results in + * flickering pixels due to Z write ordering failures after + * some amount of runtime in the Mesa "fire" demo, and Unigine + * Sanctuary and Tropics, and apparently anything else with + * alpha test or pixel discard. + * + * According to the spec, bit 11 (RCCUNIT) must also be set, + * but we didn't debug actual testcases to find it out. + */ + I915_WRITE(GEN6_UCGCTL2, + GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | + GEN6_RCCUNIT_CLOCK_GATE_DISABLE); + + /* Bspec says we need to always set all mask bits. */ + I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) | + _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL); + + /* + * According to the spec the following bits should be + * set in order to enable memory self-refresh and fbc: + * The bit21 and bit22 of 0x42000 + * The bit21 and bit22 of 0x42004 + * The bit5 and bit7 of 0x42020 + * The bit14 of 0x70180 + * The bit14 of 0x71180 + */ + I915_WRITE(ILK_DISPLAY_CHICKEN1, + I915_READ(ILK_DISPLAY_CHICKEN1) | + ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_DPARB_GATE | ILK_VSDPFD_FULL); + I915_WRITE(ILK_DSPCLK_GATE, + I915_READ(ILK_DSPCLK_GATE) | + ILK_DPARB_CLK_GATE | + ILK_DPFD_CLK_GATE); + + for_each_pipe(pipe) { + I915_WRITE(DSPCNTR(pipe), + I915_READ(DSPCNTR(pipe)) | + DISPPLANE_TRICKLE_FEED_DISABLE); + intel_flush_display_plane(dev_priv, pipe); + } +} + +static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) +{ + uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); + + reg &= ~GEN7_FF_SCHED_MASK; + reg |= GEN7_FF_TS_SCHED_HW; + reg |= GEN7_FF_VS_SCHED_HW; + reg |= GEN7_FF_DS_SCHED_HW; + + I915_WRITE(GEN7_FF_THREAD_MODE, reg); +} + +void ivybridge_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. + * This implements the WaDisableRCZUnitClockGating workaround. + */ + I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); + + I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); + + I915_WRITE(IVB_CHICKEN3, + CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | + CHICKEN3_DGMG_DONE_FIX_DISABLE); + + /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ + I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, + GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); + + /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ + I915_WRITE(GEN7_L3CNTLREG1, + GEN7_WA_FOR_GEN7_L3_CONTROL); + I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, + GEN7_WA_L3_CHICKEN_MODE); + + /* This is required by WaCatErrorRejectionIssue */ + I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, + I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | + GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); + + for_each_pipe(pipe) { + I915_WRITE(DSPCNTR(pipe), + I915_READ(DSPCNTR(pipe)) | + DISPPLANE_TRICKLE_FEED_DISABLE); + intel_flush_display_plane(dev_priv, pipe); + } + + gen7_setup_fixed_func_scheduler(dev_priv); +} + +void valleyview_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. + * This implements the WaDisableRCZUnitClockGating workaround. + */ + I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); + + I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); + + I915_WRITE(IVB_CHICKEN3, + CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | + CHICKEN3_DGMG_DONE_FIX_DISABLE); + + /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ + I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, + GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); + + /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ + I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); + I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); + + /* This is required by WaCatErrorRejectionIssue */ + I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, + I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | + GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); + + for_each_pipe(pipe) { + I915_WRITE(DSPCNTR(pipe), + I915_READ(DSPCNTR(pipe)) | + DISPPLANE_TRICKLE_FEED_DISABLE); + intel_flush_display_plane(dev_priv, pipe); + } + + I915_WRITE(CACHE_MODE_1, I915_READ(CACHE_MODE_1) | + (PIXEL_SUBSPAN_COLLECT_OPT_DISABLE << 16) | + PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); +} + +void g4x_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dspclk_gate; + + I915_WRITE(RENCLK_GATE_D1, 0); + I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | + GS_UNIT_CLOCK_GATE_DISABLE | + CL_UNIT_CLOCK_GATE_DISABLE); + I915_WRITE(RAMCLK_GATE_D, 0); + dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | + OVRUNIT_CLOCK_GATE_DISABLE | + OVCUNIT_CLOCK_GATE_DISABLE; + if (IS_GM45(dev)) + dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; + I915_WRITE(DSPCLK_GATE_D, dspclk_gate); +} + +void crestline_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); + I915_WRITE(RENCLK_GATE_D2, 0); + I915_WRITE(DSPCLK_GATE_D, 0); + I915_WRITE(RAMCLK_GATE_D, 0); + I915_WRITE16(DEUC, 0); +} + +void broadwater_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | + I965_RCC_CLOCK_GATE_DISABLE | + I965_RCPB_CLOCK_GATE_DISABLE | + I965_ISC_CLOCK_GATE_DISABLE | + I965_FBC_CLOCK_GATE_DISABLE); + I915_WRITE(RENCLK_GATE_D2, 0); +} + +void gen3_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dstate = I915_READ(D_STATE); + + dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | + DSTATE_DOT_CLOCK_GATING; + I915_WRITE(D_STATE, dstate); +} + +void i85x_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); +} + +void i830_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); +} + +void ibx_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* + * On Ibex Peak and Cougar Point, we need to disable clock + * gating for the panel power sequencer or it will fail to + * start up when no ports are active. + */ + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); +} + +void cpt_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + + /* + * On Ibex Peak and Cougar Point, we need to disable clock + * gating for the panel power sequencer or it will fail to + * start up when no ports are active. + */ + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | + DPLS_EDP_PPS_FIX_DIS); + /* Without this, mode sets may fail silently on FDI */ + for_each_pipe(pipe) + I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); +} + +void intel_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->display.init_clock_gating(dev); + + if (dev_priv->display.init_pch_clock_gating) + dev_priv->display.init_pch_clock_gating(dev); +} + -- cgit v1.2.3-59-g8ed1b From 1fa611065080cf86d783a8e3acaa3de0f04eacd3 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Wed, 18 Apr 2012 15:29:26 -0300 Subject: drm/i915: add generic power management initialization This adds intel_pm routine for generic power-related infrastructure initialization. v2: now that all the platform-specific stuff is initialized in one place, we can also add back the static definitions to platform-specific functions which we abstract now. Acked-by: Jesse Barnes Acked-by: Ben Widawsky Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 125 +-------------------- drivers/gpu/drm/i915/intel_drv.h | 45 +------- drivers/gpu/drm/i915/intel_pm.c | 212 +++++++++++++++++++++++++++++------ 3 files changed, 180 insertions(+), 202 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dec67aafbdc6..aa647c654f85 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6367,23 +6367,6 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.update_plane = i9xx_update_plane; } - if (I915_HAS_FBC(dev)) { - if (HAS_PCH_SPLIT(dev)) { - dev_priv->display.fbc_enabled = ironlake_fbc_enabled; - dev_priv->display.enable_fbc = ironlake_enable_fbc; - dev_priv->display.disable_fbc = ironlake_disable_fbc; - } else if (IS_GM45(dev)) { - dev_priv->display.fbc_enabled = g4x_fbc_enabled; - dev_priv->display.enable_fbc = g4x_enable_fbc; - dev_priv->display.disable_fbc = g4x_disable_fbc; - } else if (IS_CRESTLINE(dev)) { - dev_priv->display.fbc_enabled = i8xx_fbc_enabled; - dev_priv->display.enable_fbc = i8xx_enable_fbc; - dev_priv->display.disable_fbc = i8xx_disable_fbc; - } - /* 855GM needs testing */ - } - /* Returns the core display clock speed */ if (IS_VALLEYVIEW(dev)) dev_priv->display.get_display_clock_speed = @@ -6410,130 +6393,24 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.get_display_clock_speed = i830_get_display_clock_speed; - /* For FIFO watermark updates */ if (HAS_PCH_SPLIT(dev)) { - dev_priv->display.force_wake_get = __gen6_gt_force_wake_get; - dev_priv->display.force_wake_put = __gen6_gt_force_wake_put; - - /* IVB configs may use multi-threaded forcewake */ - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { - u32 ecobus; - - /* A small trick here - if the bios hasn't configured MT forcewake, - * and if the device is in RC6, then force_wake_mt_get will not wake - * the device and the ECOBUS read will return zero. Which will be - * (correctly) interpreted by the test below as MT forcewake being - * disabled. - */ - mutex_lock(&dev->struct_mutex); - __gen6_gt_force_wake_mt_get(dev_priv); - ecobus = I915_READ_NOTRACE(ECOBUS); - __gen6_gt_force_wake_mt_put(dev_priv); - mutex_unlock(&dev->struct_mutex); - - if (ecobus & FORCEWAKE_MT_ENABLE) { - DRM_DEBUG_KMS("Using MT version of forcewake\n"); - dev_priv->display.force_wake_get = - __gen6_gt_force_wake_mt_get; - dev_priv->display.force_wake_put = - __gen6_gt_force_wake_mt_put; - } - } - - if (HAS_PCH_IBX(dev)) - dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; - else if (HAS_PCH_CPT(dev)) - dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; - if (IS_GEN5(dev)) { - if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) - dev_priv->display.update_wm = ironlake_update_wm; - else { - DRM_DEBUG_KMS("Failed to get proper latency. " - "Disable CxSR\n"); - dev_priv->display.update_wm = NULL; - } dev_priv->display.fdi_link_train = ironlake_fdi_link_train; - dev_priv->display.init_clock_gating = ironlake_init_clock_gating; dev_priv->display.write_eld = ironlake_write_eld; } else if (IS_GEN6(dev)) { - if (SNB_READ_WM0_LATENCY()) { - dev_priv->display.update_wm = sandybridge_update_wm; - dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; - } else { - DRM_DEBUG_KMS("Failed to read display plane latency. " - "Disable CxSR\n"); - dev_priv->display.update_wm = NULL; - } dev_priv->display.fdi_link_train = gen6_fdi_link_train; - dev_priv->display.init_clock_gating = gen6_init_clock_gating; dev_priv->display.write_eld = ironlake_write_eld; } else if (IS_IVYBRIDGE(dev)) { /* FIXME: detect B0+ stepping and use auto training */ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; - if (SNB_READ_WM0_LATENCY()) { - dev_priv->display.update_wm = sandybridge_update_wm; - dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; - } else { - DRM_DEBUG_KMS("Failed to read display plane latency. " - "Disable CxSR\n"); - dev_priv->display.update_wm = NULL; - } - dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; dev_priv->display.write_eld = ironlake_write_eld; } else dev_priv->display.update_wm = NULL; } else if (IS_VALLEYVIEW(dev)) { - dev_priv->display.update_wm = valleyview_update_wm; - dev_priv->display.init_clock_gating = - valleyview_init_clock_gating; dev_priv->display.force_wake_get = vlv_force_wake_get; dev_priv->display.force_wake_put = vlv_force_wake_put; - } else if (IS_PINEVIEW(dev)) { - if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), - dev_priv->is_ddr3, - dev_priv->fsb_freq, - dev_priv->mem_freq)) { - DRM_INFO("failed to find known CxSR latency " - "(found ddr%s fsb freq %d, mem freq %d), " - "disabling CxSR\n", - (dev_priv->is_ddr3 == 1) ? "3" : "2", - dev_priv->fsb_freq, dev_priv->mem_freq); - /* Disable CxSR and never update its watermark again */ - pineview_disable_cxsr(dev); - dev_priv->display.update_wm = NULL; - } else - dev_priv->display.update_wm = pineview_update_wm; - dev_priv->display.init_clock_gating = gen3_init_clock_gating; } else if (IS_G4X(dev)) { dev_priv->display.write_eld = g4x_write_eld; - dev_priv->display.update_wm = g4x_update_wm; - dev_priv->display.init_clock_gating = g4x_init_clock_gating; - } else if (IS_GEN4(dev)) { - dev_priv->display.update_wm = i965_update_wm; - if (IS_CRESTLINE(dev)) - dev_priv->display.init_clock_gating = crestline_init_clock_gating; - else if (IS_BROADWATER(dev)) - dev_priv->display.init_clock_gating = broadwater_init_clock_gating; - } else if (IS_GEN3(dev)) { - dev_priv->display.update_wm = i9xx_update_wm; - dev_priv->display.get_fifo_size = i9xx_get_fifo_size; - dev_priv->display.init_clock_gating = gen3_init_clock_gating; - } else if (IS_I865G(dev)) { - dev_priv->display.update_wm = i830_update_wm; - dev_priv->display.init_clock_gating = i85x_init_clock_gating; - dev_priv->display.get_fifo_size = i830_get_fifo_size; - } else if (IS_I85X(dev)) { - dev_priv->display.update_wm = i9xx_update_wm; - dev_priv->display.get_fifo_size = i85x_get_fifo_size; - dev_priv->display.init_clock_gating = i85x_init_clock_gating; - } else { - dev_priv->display.update_wm = i830_update_wm; - dev_priv->display.init_clock_gating = i830_init_clock_gating; - if (IS_845G(dev)) - dev_priv->display.get_fifo_size = i845_get_fifo_size; - else - dev_priv->display.get_fifo_size = i830_get_fifo_size; } /* Default just returns -ENODEV to indicate unsupported */ @@ -6723,6 +6600,8 @@ void intel_modeset_init(struct drm_device *dev) intel_init_quirks(dev); + intel_init_pm(dev); + intel_init_display(dev); if (IS_GEN2(dev)) { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 771075cedefa..c5bf8bebf0b0 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -457,53 +457,10 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg); /* Power-related functions, located in intel_pm.c */ +extern void intel_init_pm(struct drm_device *dev); /* FBC */ -extern void i8xx_disable_fbc(struct drm_device *dev); -extern void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval); -extern bool i8xx_fbc_enabled(struct drm_device *dev); -extern void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval); -extern void g4x_disable_fbc(struct drm_device *dev); -extern bool g4x_fbc_enabled(struct drm_device *dev); -extern void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval); -extern void ironlake_disable_fbc(struct drm_device *dev); -extern bool ironlake_fbc_enabled(struct drm_device *dev); extern bool intel_fbc_enabled(struct drm_device *dev); extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); extern void intel_update_fbc(struct drm_device *dev); -/* Watermarks */ -extern void pineview_update_wm(struct drm_device *dev); -extern void valleyview_update_wm(struct drm_device *dev); -extern void g4x_update_wm(struct drm_device *dev); -extern void i965_update_wm(struct drm_device *dev); -extern void i9xx_update_wm(struct drm_device *dev); -extern void i830_update_wm(struct drm_device *dev); -extern void ironlake_update_wm(struct drm_device *dev); -extern void sandybridge_update_wm(struct drm_device *dev); -extern void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, - uint32_t sprite_width, int pixel_size); -extern const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, - int is_ddr3, - int fsb, - int mem); -extern void pineview_disable_cxsr(struct drm_device *dev); -extern int i9xx_get_fifo_size(struct drm_device *dev, int plane); -extern int i85x_get_fifo_size(struct drm_device *dev, int plane); -extern int i845_get_fifo_size(struct drm_device *dev, int plane); -extern int i830_get_fifo_size(struct drm_device *dev, int plane); - -/* Clock gating */ -extern void ironlake_init_clock_gating(struct drm_device *dev); -extern void gen6_init_clock_gating(struct drm_device *dev); -extern void ivybridge_init_clock_gating(struct drm_device *dev); -extern void valleyview_init_clock_gating(struct drm_device *dev); -extern void g4x_init_clock_gating(struct drm_device *dev); -extern void crestline_init_clock_gating(struct drm_device *dev); -extern void broadwater_init_clock_gating(struct drm_device *dev); -extern void gen3_init_clock_gating(struct drm_device *dev); -extern void i85x_init_clock_gating(struct drm_device *dev); -extern void i830_init_clock_gating(struct drm_device *dev); -extern void ibx_init_clock_gating(struct drm_device *dev); -extern void cpt_init_clock_gating(struct drm_device *dev); - #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 4f35df22a435..36940a390ef2 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -40,7 +40,7 @@ * i915.i915_enable_fbc parameter */ -void i8xx_disable_fbc(struct drm_device *dev) +static void i8xx_disable_fbc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 fbc_ctl; @@ -62,7 +62,7 @@ void i8xx_disable_fbc(struct drm_device *dev) DRM_DEBUG_KMS("disabled FBC\n"); } -void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -105,14 +105,14 @@ void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) cfb_pitch, crtc->y, intel_crtc->plane); } -bool i8xx_fbc_enabled(struct drm_device *dev) +static bool i8xx_fbc_enabled(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; return I915_READ(FBC_CONTROL) & FBC_CTL_EN; } -void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -139,7 +139,7 @@ void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); } -void g4x_disable_fbc(struct drm_device *dev) +static void g4x_disable_fbc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 dpfc_ctl; @@ -154,7 +154,7 @@ void g4x_disable_fbc(struct drm_device *dev) } } -bool g4x_fbc_enabled(struct drm_device *dev) +static bool g4x_fbc_enabled(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -181,7 +181,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) gen6_gt_force_wake_put(dev_priv); } -void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -219,7 +219,7 @@ void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); } -void ironlake_disable_fbc(struct drm_device *dev) +static void ironlake_disable_fbc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 dpfc_ctl; @@ -234,7 +234,7 @@ void ironlake_disable_fbc(struct drm_device *dev) } } -bool ironlake_fbc_enabled(struct drm_device *dev) +static bool ironlake_fbc_enabled(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -586,7 +586,7 @@ const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, return NULL; } -void pineview_disable_cxsr(struct drm_device *dev) +static void pineview_disable_cxsr(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -610,7 +610,7 @@ void pineview_disable_cxsr(struct drm_device *dev) */ static const int latency_ns = 5000; -int i9xx_get_fifo_size(struct drm_device *dev, int plane) +static int i9xx_get_fifo_size(struct drm_device *dev, int plane) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dsparb = I915_READ(DSPARB); @@ -626,7 +626,7 @@ int i9xx_get_fifo_size(struct drm_device *dev, int plane) return size; } -int i85x_get_fifo_size(struct drm_device *dev, int plane) +static int i85x_get_fifo_size(struct drm_device *dev, int plane) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dsparb = I915_READ(DSPARB); @@ -643,7 +643,7 @@ int i85x_get_fifo_size(struct drm_device *dev, int plane) return size; } -int i845_get_fifo_size(struct drm_device *dev, int plane) +static int i845_get_fifo_size(struct drm_device *dev, int plane) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dsparb = I915_READ(DSPARB); @@ -659,7 +659,7 @@ int i845_get_fifo_size(struct drm_device *dev, int plane) return size; } -int i830_get_fifo_size(struct drm_device *dev, int plane) +static int i830_get_fifo_size(struct drm_device *dev, int plane) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dsparb = I915_READ(DSPARB); @@ -891,7 +891,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) return enabled; } -void pineview_update_wm(struct drm_device *dev) +static void pineview_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; @@ -1169,7 +1169,7 @@ static void vlv_update_drain_latency(struct drm_device *dev) #define single_plane_enabled(mask) is_power_of_2(mask) -void valleyview_update_wm(struct drm_device *dev) +static void valleyview_update_wm(struct drm_device *dev) { static const int sr_latency_ns = 12000; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1220,7 +1220,7 @@ void valleyview_update_wm(struct drm_device *dev) (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT))); } -void g4x_update_wm(struct drm_device *dev) +static void g4x_update_wm(struct drm_device *dev) { static const int sr_latency_ns = 12000; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1271,7 +1271,7 @@ void g4x_update_wm(struct drm_device *dev) (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); } -void i965_update_wm(struct drm_device *dev) +static void i965_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; @@ -1336,7 +1336,7 @@ void i965_update_wm(struct drm_device *dev) I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); } -void i9xx_update_wm(struct drm_device *dev) +static void i9xx_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; const struct intel_watermark_params *wm_info; @@ -1447,7 +1447,7 @@ void i9xx_update_wm(struct drm_device *dev) } } -void i830_update_wm(struct drm_device *dev) +static void i830_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; @@ -1574,7 +1574,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, display, cursor); } -void ironlake_update_wm(struct drm_device *dev) +static void ironlake_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int fbc_wm, plane_wm, cursor_wm; @@ -1657,7 +1657,7 @@ void ironlake_update_wm(struct drm_device *dev) */ } -void sandybridge_update_wm(struct drm_device *dev) +static void sandybridge_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ @@ -1851,7 +1851,7 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, return *sprite_wm > 0x3ff ? false : true; } -void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, +static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, uint32_t sprite_width, int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2578,7 +2578,7 @@ void intel_init_emon(struct drm_device *dev) dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); } -void ironlake_init_clock_gating(struct drm_device *dev) +static void ironlake_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; @@ -2647,7 +2647,7 @@ void ironlake_init_clock_gating(struct drm_device *dev) _3D_CHICKEN2_WM_READ_PIPELINED); } -void gen6_init_clock_gating(struct drm_device *dev) +static void gen6_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; @@ -2730,7 +2730,7 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) I915_WRITE(GEN7_FF_THREAD_MODE, reg); } -void ivybridge_init_clock_gating(struct drm_device *dev) +static void ivybridge_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; @@ -2778,7 +2778,7 @@ void ivybridge_init_clock_gating(struct drm_device *dev) gen7_setup_fixed_func_scheduler(dev_priv); } -void valleyview_init_clock_gating(struct drm_device *dev) +static void valleyview_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; @@ -2826,7 +2826,7 @@ void valleyview_init_clock_gating(struct drm_device *dev) PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); } -void g4x_init_clock_gating(struct drm_device *dev) +static void g4x_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dspclk_gate; @@ -2844,7 +2844,7 @@ void g4x_init_clock_gating(struct drm_device *dev) I915_WRITE(DSPCLK_GATE_D, dspclk_gate); } -void crestline_init_clock_gating(struct drm_device *dev) +static void crestline_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2855,7 +2855,7 @@ void crestline_init_clock_gating(struct drm_device *dev) I915_WRITE16(DEUC, 0); } -void broadwater_init_clock_gating(struct drm_device *dev) +static void broadwater_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2867,7 +2867,7 @@ void broadwater_init_clock_gating(struct drm_device *dev) I915_WRITE(RENCLK_GATE_D2, 0); } -void gen3_init_clock_gating(struct drm_device *dev) +static void gen3_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 dstate = I915_READ(D_STATE); @@ -2877,21 +2877,21 @@ void gen3_init_clock_gating(struct drm_device *dev) I915_WRITE(D_STATE, dstate); } -void i85x_init_clock_gating(struct drm_device *dev) +static void i85x_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); } -void i830_init_clock_gating(struct drm_device *dev) +static void i830_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); } -void ibx_init_clock_gating(struct drm_device *dev) +static void ibx_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2903,7 +2903,7 @@ void ibx_init_clock_gating(struct drm_device *dev) I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); } -void cpt_init_clock_gating(struct drm_device *dev) +static void cpt_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; @@ -2931,3 +2931,145 @@ void intel_init_clock_gating(struct drm_device *dev) dev_priv->display.init_pch_clock_gating(dev); } +/* Set up chip specific power management-related functions */ +void intel_init_pm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (I915_HAS_FBC(dev)) { + if (HAS_PCH_SPLIT(dev)) { + dev_priv->display.fbc_enabled = ironlake_fbc_enabled; + dev_priv->display.enable_fbc = ironlake_enable_fbc; + dev_priv->display.disable_fbc = ironlake_disable_fbc; + } else if (IS_GM45(dev)) { + dev_priv->display.fbc_enabled = g4x_fbc_enabled; + dev_priv->display.enable_fbc = g4x_enable_fbc; + dev_priv->display.disable_fbc = g4x_disable_fbc; + } else if (IS_CRESTLINE(dev)) { + dev_priv->display.fbc_enabled = i8xx_fbc_enabled; + dev_priv->display.enable_fbc = i8xx_enable_fbc; + dev_priv->display.disable_fbc = i8xx_disable_fbc; + } + /* 855GM needs testing */ + } + + /* For FIFO watermark updates */ + if (HAS_PCH_SPLIT(dev)) { + dev_priv->display.force_wake_get = __gen6_gt_force_wake_get; + dev_priv->display.force_wake_put = __gen6_gt_force_wake_put; + + /* IVB configs may use multi-threaded forcewake */ + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { + u32 ecobus; + + /* A small trick here - if the bios hasn't configured MT forcewake, + * and if the device is in RC6, then force_wake_mt_get will not wake + * the device and the ECOBUS read will return zero. Which will be + * (correctly) interpreted by the test below as MT forcewake being + * disabled. + */ + mutex_lock(&dev->struct_mutex); + __gen6_gt_force_wake_mt_get(dev_priv); + ecobus = I915_READ_NOTRACE(ECOBUS); + __gen6_gt_force_wake_mt_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + + if (ecobus & FORCEWAKE_MT_ENABLE) { + DRM_DEBUG_KMS("Using MT version of forcewake\n"); + dev_priv->display.force_wake_get = + __gen6_gt_force_wake_mt_get; + dev_priv->display.force_wake_put = + __gen6_gt_force_wake_mt_put; + } + } + + if (HAS_PCH_IBX(dev)) + dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; + else if (HAS_PCH_CPT(dev)) + dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; + + if (IS_GEN5(dev)) { + if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) + dev_priv->display.update_wm = ironlake_update_wm; + else { + DRM_DEBUG_KMS("Failed to get proper latency. " + "Disable CxSR\n"); + dev_priv->display.update_wm = NULL; + } + dev_priv->display.init_clock_gating = ironlake_init_clock_gating; + } else if (IS_GEN6(dev)) { + if (SNB_READ_WM0_LATENCY()) { + dev_priv->display.update_wm = sandybridge_update_wm; + dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; + } else { + DRM_DEBUG_KMS("Failed to read display plane latency. " + "Disable CxSR\n"); + dev_priv->display.update_wm = NULL; + } + dev_priv->display.init_clock_gating = gen6_init_clock_gating; + } else if (IS_IVYBRIDGE(dev)) { + /* FIXME: detect B0+ stepping and use auto training */ + if (SNB_READ_WM0_LATENCY()) { + dev_priv->display.update_wm = sandybridge_update_wm; + dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; + } else { + DRM_DEBUG_KMS("Failed to read display plane latency. " + "Disable CxSR\n"); + dev_priv->display.update_wm = NULL; + } + dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; + } else + dev_priv->display.update_wm = NULL; + } else if (IS_VALLEYVIEW(dev)) { + dev_priv->display.update_wm = valleyview_update_wm; + dev_priv->display.init_clock_gating = + valleyview_init_clock_gating; + dev_priv->display.force_wake_get = vlv_force_wake_get; + dev_priv->display.force_wake_put = vlv_force_wake_put; + } else if (IS_PINEVIEW(dev)) { + if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), + dev_priv->is_ddr3, + dev_priv->fsb_freq, + dev_priv->mem_freq)) { + DRM_INFO("failed to find known CxSR latency " + "(found ddr%s fsb freq %d, mem freq %d), " + "disabling CxSR\n", + (dev_priv->is_ddr3 == 1) ? "3" : "2", + dev_priv->fsb_freq, dev_priv->mem_freq); + /* Disable CxSR and never update its watermark again */ + pineview_disable_cxsr(dev); + dev_priv->display.update_wm = NULL; + } else + dev_priv->display.update_wm = pineview_update_wm; + dev_priv->display.init_clock_gating = gen3_init_clock_gating; + } else if (IS_G4X(dev)) { + dev_priv->display.update_wm = g4x_update_wm; + dev_priv->display.init_clock_gating = g4x_init_clock_gating; + } else if (IS_GEN4(dev)) { + dev_priv->display.update_wm = i965_update_wm; + if (IS_CRESTLINE(dev)) + dev_priv->display.init_clock_gating = crestline_init_clock_gating; + else if (IS_BROADWATER(dev)) + dev_priv->display.init_clock_gating = broadwater_init_clock_gating; + } else if (IS_GEN3(dev)) { + dev_priv->display.update_wm = i9xx_update_wm; + dev_priv->display.get_fifo_size = i9xx_get_fifo_size; + dev_priv->display.init_clock_gating = gen3_init_clock_gating; + } else if (IS_I865G(dev)) { + dev_priv->display.update_wm = i830_update_wm; + dev_priv->display.init_clock_gating = i85x_init_clock_gating; + dev_priv->display.get_fifo_size = i830_get_fifo_size; + } else if (IS_I85X(dev)) { + dev_priv->display.update_wm = i9xx_update_wm; + dev_priv->display.get_fifo_size = i85x_get_fifo_size; + dev_priv->display.init_clock_gating = i85x_init_clock_gating; + } else { + dev_priv->display.update_wm = i830_update_wm; + dev_priv->display.init_clock_gating = i830_init_clock_gating; + if (IS_845G(dev)) + dev_priv->display.get_fifo_size = i845_get_fifo_size; + else + dev_priv->display.get_fifo_size = i830_get_fifo_size; + } +} + -- cgit v1.2.3-59-g8ed1b From 284d5df5716aad3c729d1ac4a2a93b8d0ac33ecc Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 14 Apr 2012 17:41:59 +0100 Subject: drm/i915: Silence the change of LVDS sync polarity When the change to start adjusting the sync polarity of the LVDS mode was introduced in commit aa9b500ddf1a6318e7cf8b1754696edddae86db9 Author: Bryan Freed Date: Wed Jan 12 13:43:19 2011 -0800 drm/i915: Honour LVDS sync polarity from EDID we made the change in state verbose so that we could quickly spot any regressions that made have also been introduced with it. As there do not appear to have been any, remove the extra logging. v2: Remove the no longer used variables. Signed-off-by: Chris Wilson Acked-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 37 +++++++----------------------------- 1 file changed, 7 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index aa647c654f85..4c844c68ec80 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3416,7 +3416,7 @@ static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - u32 temp, lvds_sync = 0; + u32 temp; temp = I915_READ(LVDS); temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; @@ -3446,22 +3446,11 @@ static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock, else temp &= ~LVDS_ENABLE_DITHER; } + temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) - lvds_sync |= LVDS_HSYNC_POLARITY; + temp |= LVDS_HSYNC_POLARITY; if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) - lvds_sync |= LVDS_VSYNC_POLARITY; - if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) - != lvds_sync) { - char flags[2] = "-+"; - DRM_INFO("Changing LVDS panel from " - "(%chsync, %cvsync) to (%chsync, %cvsync)\n", - flags[!(temp & LVDS_HSYNC_POLARITY)], - flags[!(temp & LVDS_VSYNC_POLARITY)], - flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], - flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); - temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); - temp |= lvds_sync; - } + temp |= LVDS_VSYNC_POLARITY; I915_WRITE(LVDS, temp); } @@ -4012,7 +4001,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, int ret; struct fdi_m_n m_n = {0}; u32 temp; - u32 lvds_sync = 0; int target_clock, pixel_multiplier, lane, link_bw, factor; unsigned int pipe_bpp; bool dither; @@ -4305,22 +4293,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, * appropriately here, but we need to look more thoroughly into how * panels behave in the two modes. */ + temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) - lvds_sync |= LVDS_HSYNC_POLARITY; + temp |= LVDS_HSYNC_POLARITY; if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) - lvds_sync |= LVDS_VSYNC_POLARITY; - if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) - != lvds_sync) { - char flags[2] = "-+"; - DRM_INFO("Changing LVDS panel from " - "(%chsync, %cvsync) to (%chsync, %cvsync)\n", - flags[!(temp & LVDS_HSYNC_POLARITY)], - flags[!(temp & LVDS_VSYNC_POLARITY)], - flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], - flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); - temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); - temp |= lvds_sync; - } + temp |= LVDS_VSYNC_POLARITY; I915_WRITE(PCH_LVDS, temp); } -- cgit v1.2.3-59-g8ed1b From 31b14c9fc596a68a2a982d7e3c136922f81a2e25 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 19 Apr 2012 16:45:22 +0200 Subject: drm/i915: invalidate render cache on gen2 It looks like we also need to flush the render cache when we just invalidate it. This fixes a regression in i-g-t/gem_tiled_blits on my i855gm. I guess the render cache there is virtually indexed, so we need to clean it when changing gtt mappings. This regression has been introduce in commit 46f0f8d120c4afae53a5670bf3ac80a928340ff3 Author: Chris Wilson Date: Wed Apr 18 11:12:11 2012 +0100 drm/i915: Don't set a MBZ bit in gen2/3 MI_FLUSH Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6f610f28b1be..12d9bc789dfb 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -61,7 +61,7 @@ gen2_render_ring_flush(struct intel_ring_buffer *ring, int ret; cmd = MI_FLUSH; - if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) + if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) cmd |= MI_NO_WRITE_FLUSH; if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) -- cgit v1.2.3-59-g8ed1b From 141670e9b4356b59b5b39a99e10ac0118d12b16d Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 5 Apr 2012 21:35:15 +0300 Subject: drm: Move drm_format_num_planes() to drm_crtc.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There will be a need for this function in drm_crtc.c later. This avoids making drm_crtc.c depend on drm_crtc_helper.c. Signed-off-by: Ville Syrjälä Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 32 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/drm_crtc_helper.c | 33 --------------------------------- include/drm/drm_crtc.h | 2 ++ include/drm/drm_crtc_helper.h | 2 -- 4 files changed, 34 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index d3aaeb6ae236..32ab669f4aed 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -3466,3 +3466,35 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, } } EXPORT_SYMBOL(drm_fb_get_bpp_depth); + +/** + * drm_format_num_planes - get the number of planes for format + * @format: pixel format (DRM_FORMAT_*) + * + * RETURNS: + * The number of planes used by the specified pixel format. + */ +int drm_format_num_planes(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV444: + case DRM_FORMAT_YVU444: + return 3; + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + return 2; + default: + return 1; + } +} +EXPORT_SYMBOL(drm_format_num_planes); diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 81118893264c..974196ab7b22 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -1023,36 +1023,3 @@ void drm_helper_hpd_irq_event(struct drm_device *dev) queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); } EXPORT_SYMBOL(drm_helper_hpd_irq_event); - - -/** - * drm_format_num_planes - get the number of planes for format - * @format: pixel format (DRM_FORMAT_*) - * - * RETURNS: - * The number of planes used by the specified pixel format. - */ -int drm_format_num_planes(uint32_t format) -{ - switch (format) { - case DRM_FORMAT_YUV410: - case DRM_FORMAT_YVU410: - case DRM_FORMAT_YUV411: - case DRM_FORMAT_YVU411: - case DRM_FORMAT_YUV420: - case DRM_FORMAT_YVU420: - case DRM_FORMAT_YUV422: - case DRM_FORMAT_YVU422: - case DRM_FORMAT_YUV444: - case DRM_FORMAT_YVU444: - return 3; - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - case DRM_FORMAT_NV16: - case DRM_FORMAT_NV61: - return 2; - default: - return 1; - } -} -EXPORT_SYMBOL(drm_format_num_planes); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index e250eda4e3a8..9dd3ed85547d 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -1026,4 +1026,6 @@ extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp); +extern int drm_format_num_planes(uint32_t format); + #endif /* __DRM_CRTC_H__ */ diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 37515d1afab3..3add00e03388 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -145,6 +145,4 @@ extern void drm_helper_hpd_irq_event(struct drm_device *dev); extern void drm_kms_helper_poll_disable(struct drm_device *dev); extern void drm_kms_helper_poll_enable(struct drm_device *dev); -extern int drm_format_num_planes(uint32_t format); - #endif -- cgit v1.2.3-59-g8ed1b From 5a86bd552407bd6b3e0df4e88636797484d06430 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 5 Apr 2012 21:35:16 +0300 Subject: drm: Add drm_format_plane_cpp() utility function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This function returns the bytes per pixel value based on the pixel format and plane index. Signed-off-by: Ville Syrjälä Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ include/drm/drm_crtc.h | 1 + 2 files changed, 46 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 32ab669f4aed..2c4e9cf2a1d2 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -3498,3 +3498,48 @@ int drm_format_num_planes(uint32_t format) } } EXPORT_SYMBOL(drm_format_num_planes); + +/** + * drm_format_plane_cpp - determine the bytes per pixel value + * @format: pixel format (DRM_FORMAT_*) + * @plane: plane index + * + * RETURNS: + * The bytes per pixel value for the specified plane. + */ +int drm_format_plane_cpp(uint32_t format, int plane) +{ + unsigned int depth; + int bpp; + + if (plane >= drm_format_num_planes(format)) + return 0; + + switch (format) { + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + return 2; + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + return plane ? 2 : 1; + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV444: + case DRM_FORMAT_YVU444: + return 1; + default: + drm_fb_get_bpp_depth(format, &depth, &bpp); + return bpp >> 3; + } +} +EXPORT_SYMBOL(drm_format_plane_cpp); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 9dd3ed85547d..2d128eb4293f 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -1027,5 +1027,6 @@ extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp); extern int drm_format_num_planes(uint32_t format); +extern int drm_format_plane_cpp(uint32_t format, int plane); #endif /* __DRM_CRTC_H__ */ -- cgit v1.2.3-59-g8ed1b From 01b68b0483627631c738dcfca0dee7e22892c420 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 5 Apr 2012 21:35:17 +0300 Subject: drm: Add drm_format_{horz, vert}_chroma_subsampling() utility functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These functions return the chroma subsampling factors for the specified pixel format. Signed-off-by: Ville Syrjälä Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++ include/drm/drm_crtc.h | 2 ++ 2 files changed, 62 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 2c4e9cf2a1d2..1b79c953b4cc 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -3543,3 +3543,63 @@ int drm_format_plane_cpp(uint32_t format, int plane) } } EXPORT_SYMBOL(drm_format_plane_cpp); + +/** + * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor + * @format: pixel format (DRM_FORMAT_*) + * + * RETURNS: + * The horizontal chroma subsampling factor for the + * specified pixel format. + */ +int drm_format_horz_chroma_subsampling(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + return 4; + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + return 2; + default: + return 1; + } +} +EXPORT_SYMBOL(drm_format_horz_chroma_subsampling); + +/** + * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor + * @format: pixel format (DRM_FORMAT_*) + * + * RETURNS: + * The vertical chroma subsampling factor for the + * specified pixel format. + */ +int drm_format_vert_chroma_subsampling(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + return 4; + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + return 2; + default: + return 1; + } +} +EXPORT_SYMBOL(drm_format_vert_chroma_subsampling); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 2d128eb4293f..2d63a02571ff 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -1028,5 +1028,7 @@ extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp); extern int drm_format_num_planes(uint32_t format); extern int drm_format_plane_cpp(uint32_t format, int plane); +extern int drm_format_horz_chroma_subsampling(uint32_t format); +extern int drm_format_vert_chroma_subsampling(uint32_t format); #endif /* __DRM_CRTC_H__ */ -- cgit v1.2.3-59-g8ed1b From d1b45d5f0586041fe750d90a62ba09cffb3eace1 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 5 Apr 2012 21:35:18 +0300 Subject: drm: Add sanity checks to framebuffer creation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Perform some basic sanity check on some of the parameters in drm_mode_fb_cmd2. Signed-off-by: Ville Syrjälä Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 47 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 43 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 1b79c953b4cc..4d4e8b055731 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2185,6 +2185,47 @@ static int format_check(struct drm_mode_fb_cmd2 *r) } } +static int framebuffer_check(struct drm_mode_fb_cmd2 *r) +{ + int ret, hsub, vsub, num_planes, i; + + ret = format_check(r); + if (ret) { + DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format); + return ret; + } + + hsub = drm_format_horz_chroma_subsampling(r->pixel_format); + vsub = drm_format_vert_chroma_subsampling(r->pixel_format); + num_planes = drm_format_num_planes(r->pixel_format); + + if (r->width == 0 || r->width % hsub) { + DRM_ERROR("bad framebuffer width %u\n", r->height); + return -EINVAL; + } + + if (r->height == 0 || r->height % vsub) { + DRM_ERROR("bad framebuffer height %u\n", r->height); + return -EINVAL; + } + + for (i = 0; i < num_planes; i++) { + unsigned int width = r->width / (i != 0 ? hsub : 1); + + if (!r->handles[i]) { + DRM_ERROR("no buffer object handle for plane %d\n", i); + return -EINVAL; + } + + if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) { + DRM_ERROR("bad pitch %u for plane %d\n", r->pitches[i], i); + return -EINVAL; + } + } + + return 0; +} + /** * drm_mode_addfb2 - add an FB to the graphics configuration * @inode: inode from the ioctl @@ -2224,11 +2265,9 @@ int drm_mode_addfb2(struct drm_device *dev, return -EINVAL; } - ret = format_check(r); - if (ret) { - DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format); + ret = framebuffer_check(r); + if (ret) return ret; - } mutex_lock(&dev->mode_config.mutex); -- cgit v1.2.3-59-g8ed1b From ee58808deca66bd0879562abf606e171752e8e15 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 16 Apr 2012 15:16:18 +0200 Subject: drm: Fix EDID color format parsing The code should obviously check the EDID feature field for EDID feature flags and not the color_formats field of the drm_display_info struct. Also update the color_formats field with new modes instead of overwriting the current mode. Signed-off-by: Lars-Peter Clausen Reviewed-by: Jesse Barnes Reviewed-by: Adam Jackson Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 5a18b0df8285..8a4580c2a1d0 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1699,10 +1699,10 @@ static void drm_add_display_info(struct edid *edid, } info->color_formats = DRM_COLOR_FORMAT_RGB444; - if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444) - info->color_formats = DRM_COLOR_FORMAT_YCRCB444; - if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422) - info->color_formats = DRM_COLOR_FORMAT_YCRCB422; + if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; + if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; /* Get data from CEA blocks if present */ edid_ext = drm_find_cea_extension(edid); -- cgit v1.2.3-59-g8ed1b From a988bc728f93b58d7d4c6657513c89a5f0eb4706 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 16 Apr 2012 15:16:19 +0200 Subject: drm: Parse color format information in CEA blocks The CEA extension block has a field which describes which YCbCr modes are supported by the device, use it to fill the drm_display_info color_formats fields. Also the existence of a CEA extension block is used as indication that the device supports RGB. Signed-off-by: Lars-Peter Clausen Reviewed-by: Jesse Barnes Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 8a4580c2a1d0..e07491088400 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1312,6 +1312,8 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, #define VENDOR_BLOCK 0x03 #define SPEAKER_BLOCK 0x04 #define EDID_BASIC_AUDIO (1 << 6) +#define EDID_CEA_YCRCB444 (1 << 5) +#define EDID_CEA_YCRCB422 (1 << 4) /** * Search EDID for CEA extension block. @@ -1666,13 +1668,29 @@ static void drm_add_display_info(struct edid *edid, info->bpc = 0; info->color_formats = 0; - /* Only defined for 1.4 with digital displays */ - if (edid->revision < 4) + if (edid->revision < 3) return; if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) return; + /* Get data from CEA blocks if present */ + edid_ext = drm_find_cea_extension(edid); + if (edid_ext) { + info->cea_rev = edid_ext[1]; + + /* The existence of a CEA block should imply RGB support */ + info->color_formats = DRM_COLOR_FORMAT_RGB444; + if (edid_ext[3] & EDID_CEA_YCRCB444) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; + if (edid_ext[3] & EDID_CEA_YCRCB422) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; + } + + /* Only defined for 1.4 with digital displays */ + if (edid->revision < 4) + return; + switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) { case DRM_EDID_DIGITAL_DEPTH_6: info->bpc = 6; @@ -1698,18 +1716,11 @@ static void drm_add_display_info(struct edid *edid, break; } - info->color_formats = DRM_COLOR_FORMAT_RGB444; + info->color_formats |= DRM_COLOR_FORMAT_RGB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; - - /* Get data from CEA blocks if present */ - edid_ext = drm_find_cea_extension(edid); - if (!edid_ext) - return; - - info->cea_rev = edid_ext[1]; } /** -- cgit v1.2.3-59-g8ed1b From 099e014b056665c8271a1cd613b5126896c4dac5 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 13 Apr 2012 16:31:38 -0300 Subject: drm: add the VIC number to the CEA EDID modes The specification defines a VIC (Video Identification Code) for each mode. When we're browsing drm_edid_modes.h, it really helps to have the number available (otherwise we have to count...). These numbers are also used in the EDID data (by the CEA-EXT extension block). Signed-off-by: Paulo Zanoni Reviewed-by: Adam Jackson Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid_modes.h | 128 +++++++++++++++++++-------------------- 1 file changed, 64 insertions(+), 64 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h index a91ffb117220..712786a1256b 100644 --- a/drivers/gpu/drm/drm_edid_modes.h +++ b/drivers/gpu/drm/drm_edid_modes.h @@ -384,278 +384,278 @@ static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]); * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c. */ static const struct drm_display_mode edid_cea_modes[] = { - /* 640x480@60Hz */ + /* 1 - 640x480@60Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 752, 800, 0, 480, 490, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 720x480@60Hz */ + /* 2 - 720x480@60Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 720x480@60Hz */ + /* 3 - 720x480@60Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1280x720@60Hz */ + /* 4 - 1280x720@60Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, 1430, 1650, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1920x1080i@60Hz */ + /* 5 - 1920x1080i@60Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 1440x480i@60Hz */ + /* 6 - 1440x480i@60Hz */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 1440x480i@60Hz */ + /* 7 - 1440x480i@60Hz */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 1440x240@60Hz */ + /* 8 - 1440x240@60Hz */ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1440x240@60Hz */ + /* 9 - 1440x240@60Hz */ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 2880x480i@60Hz */ + /* 10 - 2880x480i@60Hz */ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 2880x480i@60Hz */ + /* 11 - 2880x480i@60Hz */ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 2880x240@60Hz */ + /* 12 - 2880x240@60Hz */ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 2880x240@60Hz */ + /* 13 - 2880x240@60Hz */ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1440x480@60Hz */ + /* 14 - 1440x480@60Hz */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, 1596, 1716, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1440x480@60Hz */ + /* 15 - 1440x480@60Hz */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, 1596, 1716, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1920x1080@60Hz */ + /* 16 - 1920x1080@60Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 720x576@50Hz */ + /* 17 - 720x576@50Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 720x576@50Hz */ + /* 18 - 720x576@50Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1280x720@50Hz */ + /* 19 - 1280x720@50Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, 1760, 1980, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1920x1080i@50Hz */ + /* 20 - 1920x1080i@50Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 1440x576i@50Hz */ + /* 21 - 1440x576i@50Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 1440x576i@50Hz */ + /* 22 - 1440x576i@50Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 1440x288@50Hz */ + /* 23 - 1440x288@50Hz */ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1440x288@50Hz */ + /* 24 - 1440x288@50Hz */ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 2880x576i@50Hz */ + /* 25 - 2880x576i@50Hz */ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 2880x576i@50Hz */ + /* 26 - 2880x576i@50Hz */ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 2880x288@50Hz */ + /* 27 - 2880x288@50Hz */ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 2880x288@50Hz */ + /* 28 - 2880x288@50Hz */ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1440x576@50Hz */ + /* 29 - 1440x576@50Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1592, 1728, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1440x576@50Hz */ + /* 30 - 1440x576@50Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1592, 1728, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1920x1080@50Hz */ + /* 31 - 1920x1080@50Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1920x1080@24Hz */ + /* 32 - 1920x1080@24Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1920x1080@25Hz */ + /* 33 - 1920x1080@25Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1920x1080@30Hz */ + /* 34 - 1920x1080@30Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 2880x480@60Hz */ + /* 35 - 2880x480@60Hz */ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, 3192, 3432, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 2880x480@60Hz */ + /* 36 - 2880x480@60Hz */ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, 3192, 3432, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 2880x576@50Hz */ + /* 37 - 2880x576@50Hz */ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, 3184, 3456, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 2880x576@50Hz */ + /* 38 - 2880x576@50Hz */ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, 3184, 3456, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1920x1080i@50Hz */ + /* 39 - 1920x1080i@50Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, 2120, 2304, 0, 1080, 1126, 1136, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 1920x1080i@100Hz */ + /* 40 - 1920x1080i@100Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 1280x720@100Hz */ + /* 41 - 1280x720@100Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, 1760, 1980, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 720x576@100Hz */ + /* 42 - 720x576@100Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 720x576@100Hz */ + /* 43 - 720x576@100Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1440x576i@100Hz */ + /* 44 - 1440x576i@100Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1440x576i@100Hz */ + /* 45 - 1440x576i@100Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1920x1080i@120Hz */ + /* 46 - 1920x1080i@120Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 1280x720@120Hz */ + /* 47 - 1280x720@120Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, 1430, 1650, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 720x480@120Hz */ + /* 48 - 720x480@120Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 720x480@120Hz */ + /* 49 - 720x480@120Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1440x480i@120Hz */ + /* 50 - 1440x480i@120Hz */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 1440x480i@120Hz */ + /* 51 - 1440x480i@120Hz */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 720x576@200Hz */ + /* 52 - 720x576@200Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 720x576@200Hz */ + /* 53 - 720x576@200Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1440x576i@200Hz */ + /* 54 - 1440x576i@200Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 1440x576i@200Hz */ + /* 55 - 1440x576i@200Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 720x480@240Hz */ + /* 56 - 720x480@240Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 720x480@240Hz */ + /* 57 - 720x480@240Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1440x480i@240 */ + /* 58 - 1440x480i@240 */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 1440x480i@240 */ + /* 59 - 1440x480i@240 */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, - /* 1280x720@24Hz */ + /* 60 - 1280x720@24Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, 3080, 3300, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1280x720@25Hz */ + /* 61 - 1280x720@25Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, 3740, 3960, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1280x720@30Hz */ + /* 62 - 1280x720@30Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, 3080, 3300, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1920x1080@120Hz */ + /* 63 - 1920x1080@120Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1920x1080@100Hz */ + /* 64 - 1920x1080@100Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, -- cgit v1.2.3-59-g8ed1b From a31546e25563b13f2a5d7216ec83da93ecf6d1ca Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 13 Apr 2012 16:31:39 -0300 Subject: drm: add DRM_MODE_FLAG_DBLCLK to CEA modes requiring it CEA modes 6, 7, 8, 9, 21, 22, 23, 24, 44, 45, 50, 51, 54, 55, 58 and 59 require sending pixel data 2 times. This doesn't mean the modes will work yet, but now the drivers know they're different. Signed-off-by: Paulo Zanoni Reviewed-by: Adam Jackson Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid_modes.h | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h index 712786a1256b..96190aed2174 100644 --- a/drivers/gpu/drm/drm_edid_modes.h +++ b/drivers/gpu/drm/drm_edid_modes.h @@ -409,20 +409,22 @@ static const struct drm_display_mode edid_cea_modes[] = { { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | - DRM_MODE_FLAG_INTERLACE) }, + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 7 - 1440x480i@60Hz */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | - DRM_MODE_FLAG_INTERLACE) }, + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 8 - 1440x240@60Hz */ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 240, 244, 247, 262, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK) }, /* 9 - 1440x240@60Hz */ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 240, 244, 247, 262, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK) }, /* 10 - 2880x480i@60Hz */ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 480, 488, 494, 525, 0, @@ -474,20 +476,22 @@ static const struct drm_display_mode edid_cea_modes[] = { { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | - DRM_MODE_FLAG_INTERLACE) }, + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 22 - 1440x576i@50Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | - DRM_MODE_FLAG_INTERLACE) }, + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 23 - 1440x288@50Hz */ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 288, 290, 293, 312, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK) }, /* 24 - 1440x288@50Hz */ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 288, 290, 293, 312, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK) }, /* 25 - 2880x576i@50Hz */ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 576, 580, 586, 625, 0, @@ -571,11 +575,13 @@ static const struct drm_display_mode edid_cea_modes[] = { /* 44 - 1440x576i@100Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK) }, /* 45 - 1440x576i@100Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_DBLCLK) }, /* 46 - 1920x1080i@120Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, @@ -597,12 +603,12 @@ static const struct drm_display_mode edid_cea_modes[] = { { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | - DRM_MODE_FLAG_INTERLACE) }, + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 51 - 1440x480i@120Hz */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | - DRM_MODE_FLAG_INTERLACE) }, + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 52 - 720x576@200Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, @@ -615,12 +621,12 @@ static const struct drm_display_mode edid_cea_modes[] = { { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | - DRM_MODE_FLAG_INTERLACE) }, + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 55 - 1440x576i@200Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | - DRM_MODE_FLAG_INTERLACE) }, + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 56 - 720x480@240Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, @@ -633,12 +639,12 @@ static const struct drm_display_mode edid_cea_modes[] = { { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | - DRM_MODE_FLAG_INTERLACE) }, + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 59 - 1440x480i@240 */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | - DRM_MODE_FLAG_INTERLACE) }, + DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 60 - 1280x720@24Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, 3080, 3300, 0, 720, 725, 730, 750, 0, -- cgit v1.2.3-59-g8ed1b From 33c7531df850869a7b623d4dfcfcb7e2ab873844 Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Fri, 13 Apr 2012 16:33:29 -0400 Subject: drm/edid: Document drm_mode_find_dmt Signed-off-by: Adam Jackson Tested-by: Takashi Iwai Reviewed-by: Rodrigo Vivi Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index e07491088400..a75aefef528e 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -486,6 +486,16 @@ static void edid_fixup_preferred(struct drm_connector *connector, preferred_mode->type |= DRM_MODE_TYPE_PREFERRED; } +/* + * drm_mode_find_dmt - Create a copy of a mode if present in DMT + * @dev: Device to duplicate against + * @hsize: Mode width + * @vsize: Mode height + * @fresh: Mode refresh rate + * + * Walk the DMT mode list looking for a match for the given parameters. + * Return a newly allocated copy of the mode, or NULL if not found. + */ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, int hsize, int vsize, int fresh) { -- cgit v1.2.3-59-g8ed1b From f8b46a05e6ced02e75cd782c015a57e67d5c644d Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Fri, 13 Apr 2012 16:33:30 -0400 Subject: drm/edid: Rewrite drm_mode_find_dmt search loop No functional change, but will make an upcoming change clearer. Signed-off-by: Adam Jackson Tested-by: Takashi Iwai Reviewed-by: Rodrigo Vivi Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index a75aefef528e..9c8fa8860f6b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -499,20 +499,21 @@ static void edid_fixup_preferred(struct drm_connector *connector, struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, int hsize, int vsize, int fresh) { - struct drm_display_mode *mode = NULL; int i; for (i = 0; i < drm_num_dmt_modes; i++) { const struct drm_display_mode *ptr = &drm_dmt_modes[i]; - if (hsize == ptr->hdisplay && - vsize == ptr->vdisplay && - fresh == drm_mode_vrefresh(ptr)) { - /* get the expected default mode */ - mode = drm_mode_duplicate(dev, ptr); - break; - } + if (hsize != ptr->hdisplay) + continue; + if (vsize != ptr->vdisplay) + continue; + if (fresh != drm_mode_vrefresh(ptr)) + continue; + + return drm_mode_duplicate(dev, ptr); } - return mode; + + return NULL; } EXPORT_SYMBOL(drm_mode_find_dmt); -- cgit v1.2.3-59-g8ed1b From f6e252bac45cab5edc30c2ede971def51e272c9b Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Fri, 13 Apr 2012 16:33:31 -0400 Subject: drm/edid: Allow drm_mode_find_dmt to hunt for reduced-blanking modes It won't find any, yet. Fix up callers to match: standard mode codes will look prefer r-b modes for a given size if present, EST3 mode codes will look for exactly the r-b-ness mentioned in the mode code. This might mean fewer modes matched for EST3 mode codes between now and when the DMT mode list regrows the r-b modes, but practically speaking EST3 codes don't exist in the wild. Signed-off-by: Adam Jackson Tested-by: Takashi Iwai Reviewed-by: Rodrigo Vivi Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 37 ++++++++++++++++++++++++------------- drivers/gpu/drm/drm_fb_helper.c | 2 +- include/drm/drm_crtc.h | 3 ++- 3 files changed, 27 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 9c8fa8860f6b..ec0464c91847 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -486,18 +486,29 @@ static void edid_fixup_preferred(struct drm_connector *connector, preferred_mode->type |= DRM_MODE_TYPE_PREFERRED; } +static bool +mode_is_rb(const struct drm_display_mode *mode) +{ + return (mode->htotal - mode->hdisplay == 160) && + (mode->hsync_end - mode->hdisplay == 80) && + (mode->hsync_end - mode->hsync_start == 32) && + (mode->vsync_start - mode->vdisplay == 3); +} + /* * drm_mode_find_dmt - Create a copy of a mode if present in DMT * @dev: Device to duplicate against * @hsize: Mode width * @vsize: Mode height * @fresh: Mode refresh rate + * @rb: Mode reduced-blanking-ness * * Walk the DMT mode list looking for a match for the given parameters. * Return a newly allocated copy of the mode, or NULL if not found. */ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, - int hsize, int vsize, int fresh) + int hsize, int vsize, int fresh, + bool rb) { int i; @@ -509,6 +520,8 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, continue; if (fresh != drm_mode_vrefresh(ptr)) continue; + if (rb != mode_is_rb(ptr)) + continue; return drm_mode_duplicate(dev, ptr); } @@ -742,10 +755,17 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid, } /* check whether it can be found in default mode table */ - mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate); + if (drm_monitor_supports_rb(edid)) { + mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, + true); + if (mode) + return mode; + } + mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false); if (mode) return mode; + /* okay, generate it */ switch (timing_level) { case LEVEL_DMT: break; @@ -919,15 +939,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, return mode; } -static bool -mode_is_rb(const struct drm_display_mode *mode) -{ - return (mode->htotal - mode->hdisplay == 160) && - (mode->hsync_end - mode->hdisplay == 80) && - (mode->hsync_end - mode->hsync_start == 32) && - (mode->vsync_start - mode->vdisplay == 3); -} - static bool mode_in_hsync_range(const struct drm_display_mode *mode, struct edid *edid, u8 *t) @@ -1073,8 +1084,8 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing) mode = drm_mode_find_dmt(connector->dev, est3_modes[m].w, est3_modes[m].h, - est3_modes[m].r - /*, est3_modes[m].rb */); + est3_modes[m].r, + est3_modes[m].rb); if (mode) { drm_mode_probed_add(connector, mode); modes++; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index a0d6e894d97c..6e19dd156be0 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1083,7 +1083,7 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper, /* try and find a 1024x768 mode on each connector */ can_clone = true; - dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60); + dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false); for (i = 0; i < fb_helper->connector_count; i++) { diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 2d63a02571ff..6f5faf669959 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -1015,7 +1015,8 @@ extern int drm_edid_header_is_valid(const u8 *raw_edid); extern bool drm_edid_block_valid(u8 *raw_edid); extern bool drm_edid_is_valid(struct edid *edid); struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, - int hsize, int vsize, int fresh); + int hsize, int vsize, int fresh, + bool rb); extern int drm_mode_create_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -- cgit v1.2.3-59-g8ed1b From 9a225c9ce2b03ed70c13fc49ee8ee332cefe4e3a Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Fri, 13 Apr 2012 16:33:32 -0400 Subject: drm/edid: Remove a misleading comment mode_in_range() handles what this was warning about. Signed-off-by: Adam Jackson Tested-by: Takashi Iwai Reviewed-by: Rodrigo Vivi Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ec0464c91847..043fa5a2412c 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1016,10 +1016,6 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid, return true; } -/* - * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will - * need to account for them. - */ static int drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, struct detailed_timing *timing) -- cgit v1.2.3-59-g8ed1b From cd4cd3ded8efc49de6f5056dfb0d60e69b388b71 Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Fri, 13 Apr 2012 16:33:33 -0400 Subject: drm/edid: s/drm_gtf_modes_for_range/drm_dmt_modes_for_range/ Slightly more honest naming. Signed-off-by: Adam Jackson Tested-by: Takashi Iwai Reviewed-by: Rodrigo Vivi Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 043fa5a2412c..cb40611a1d1d 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1017,7 +1017,7 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid, } static int -drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, +drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid, struct detailed_timing *timing) { int i, modes = 0; @@ -1045,7 +1045,7 @@ do_inferred_modes(struct detailed_timing *timing, void *c) int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF); if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE) - closure->modes += drm_gtf_modes_for_range(closure->connector, + closure->modes += drm_dmt_modes_for_range(closure->connector, closure->edid, timing); } -- cgit v1.2.3-59-g8ed1b From 383a6d5fdd9a36832a7f35e2ba17de95a8a10eba Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Fri, 13 Apr 2012 16:33:34 -0400 Subject: drm/edid: Add the reduced blanking DMT modes to the DMT list Copied from the list in xserver. Signed-off-by: Adam Jackson Tested-by: Takashi Iwai Reviewed-by: Rodrigo Vivi Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid_modes.h | 94 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 93 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h index 96190aed2174..a8f604a6bcb3 100644 --- a/drivers/gpu/drm/drm_edid_modes.h +++ b/drivers/gpu/drm/drm_edid_modes.h @@ -30,7 +30,6 @@ /* * Autogenerated from the DMT spec. * This table is copied from xfree86/modes/xf86EdidModes.c. - * But the mode with Reduced blank feature is deleted. */ static const struct drm_display_mode drm_dmt_modes[] = { /* 640x350@85Hz */ @@ -81,6 +80,10 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832, 896, 1048, 0, 600, 601, 604, 631, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 800x600@120Hz RB */ + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848, + 880, 960, 0, 600, 603, 607, 636, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 848x480@60Hz */ { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864, 976, 1088, 0, 480, 486, 494, 517, 0, @@ -106,10 +109,18 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, 1168, 1376, 0, 768, 769, 772, 808, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1024x768@120Hz RB */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072, + 1104, 1184, 0, 768, 771, 775, 813, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1152x864@75Hz */ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 1344, 1600, 0, 864, 865, 868, 900, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1280x768@60Hz RB */ + { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328, + 1360, 1440, 0, 768, 771, 778, 790, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x768@60Hz */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, 1472, 1664, 0, 768, 771, 778, 798, 0, @@ -122,6 +133,14 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360, 1496, 1712, 0, 768, 771, 778, 809, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1280x768@120Hz RB */ + { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328, + 1360, 1440, 0, 768, 771, 778, 813, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 1280x800@60Hz RB */ + { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328, + 1360, 1440, 0, 800, 803, 809, 823, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x800@60Hz */ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, 1480, 1680, 0, 800, 803, 809, 831, 0, @@ -134,6 +153,10 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360, 1496, 1712, 0, 800, 803, 809, 843, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1280x800@120Hz RB */ + { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328, + 1360, 1440, 0, 800, 803, 809, 847, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x960@60Hz */ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, 1488, 1800, 0, 960, 961, 964, 1000, 0, @@ -142,6 +165,10 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344, 1504, 1728, 0, 960, 961, 964, 1011, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1280x960@120Hz RB */ + { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328, + 1360, 1440, 0, 960, 963, 967, 1017, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x1024@60Hz */ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, @@ -154,10 +181,22 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344, 1504, 1728, 0, 1024, 1025, 1028, 1072, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1280x1024@120Hz RB */ + { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328, + 1360, 1440, 0, 1024, 1027, 1034, 1084, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1360x768@60Hz */ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, 1536, 1792, 0, 768, 771, 777, 795, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1360x768@120Hz RB */ + { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408, + 1440, 1520, 0, 768, 771, 776, 813, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 1400x1050@60Hz RB */ + { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448, + 1480, 1560, 0, 1050, 1053, 1057, 1080, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1440x1050@60Hz */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, @@ -170,6 +209,14 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504, 1656, 1912, 0, 1050, 1053, 1057, 1105, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1400x1050@120Hz RB */ + { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448, + 1480, 1560, 0, 1050, 1053, 1057, 1112, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 1440x900@60Hz RB */ + { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488, + 1520, 1600, 0, 900, 903, 909, 926, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1440x900@60Hz */ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, 1672, 1904, 0, 900, 903, 909, 934, 0, @@ -182,6 +229,10 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544, 1696, 1952, 0, 900, 903, 909, 948, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1440x900@120Hz RB */ + { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488, + 1520, 1600, 0, 900, 903, 909, 953, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1600x1200@60Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, @@ -202,6 +253,14 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1600x1200@120Hz RB */ + { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648, + 1680, 1760, 0, 1200, 1203, 1207, 1271, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 1680x1050@60Hz RB */ + { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728, + 1760, 1840, 0, 1050, 1053, 1059, 1080, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1680x1050@60Hz */ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, @@ -214,6 +273,10 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808, 1984, 2288, 0, 1050, 1053, 1059, 1105, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1680x1050@120Hz RB */ + { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728, + 1760, 1840, 0, 1050, 1053, 1059, 1112, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1792x1344@60Hz */ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, @@ -222,6 +285,10 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888, 2104, 2456, 0, 1344, 1345, 1348, 1417, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1792x1344@120Hz RB */ + { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840, + 1872, 1952, 0, 1344, 1347, 1351, 1423, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1853x1392@60Hz */ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, @@ -230,6 +297,14 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984, 2208, 2560, 0, 1392, 1395, 1399, 1500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1856x1392@120Hz RB */ + { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904, + 1936, 2016, 0, 1392, 1395, 1399, 1474, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 1920x1200@60Hz RB */ + { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968, + 2000, 2080, 0, 1200, 1203, 1209, 1235, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1920x1200@60Hz */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, @@ -242,6 +317,10 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064, 2272, 2624, 0, 1200, 1203, 1209, 1262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1920x1200@120Hz RB */ + { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968, + 2000, 2080, 0, 1200, 1203, 1209, 1271, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1920x1440@60Hz */ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, @@ -250,6 +329,14 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064, 2288, 2640, 0, 1440, 1441, 1444, 1500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1920x1440@120Hz RB */ + { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968, + 2000, 2080, 0, 1440, 1443, 1447, 1525, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 2560x1600@60Hz RB */ + { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608, + 2640, 2720, 0, 1600, 1603, 1609, 1646, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 2560x1600@60Hz */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, @@ -262,6 +349,11 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768, 3048, 3536, 0, 1600, 1603, 1609, 1682, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 2560x1600@120Hz RB */ + { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608, + 2640, 2720, 0, 1600, 1603, 1609, 1694, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + }; static const int drm_num_dmt_modes = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); -- cgit v1.2.3-59-g8ed1b From 6201ee39263e9ae251648e90e0cbb2496de92016 Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Fri, 13 Apr 2012 16:33:35 -0400 Subject: drm/edid: Fix some comment typos in the DMT mode list Signed-off-by: Adam Jackson Tested-by: Takashi Iwai Reviewed-by: Rodrigo Vivi Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid_modes.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h index a8f604a6bcb3..8bf8235f8ba4 100644 --- a/drivers/gpu/drm/drm_edid_modes.h +++ b/drivers/gpu/drm/drm_edid_modes.h @@ -197,15 +197,15 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448, 1480, 1560, 0, 1050, 1053, 1057, 1080, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1440x1050@60Hz */ + /* 1400x1050@60Hz */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1440x1050@75Hz */ + /* 1400x1050@75Hz */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504, 1648, 1896, 0, 1050, 1053, 1057, 1099, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1440x1050@85Hz */ + /* 1400x1050@85Hz */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504, 1656, 1912, 0, 1050, 1053, 1057, 1105, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, @@ -281,7 +281,7 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1729x1344@75Hz */ + /* 1792x1344@75Hz */ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888, 2104, 2456, 0, 1344, 1345, 1348, 1417, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, @@ -289,7 +289,7 @@ static const struct drm_display_mode drm_dmt_modes[] = { { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840, 1872, 1952, 0, 1344, 1347, 1351, 1423, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 1853x1392@60Hz */ + /* 1856x1392@60Hz */ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, -- cgit v1.2.3-59-g8ed1b From cb21aafe121b1c3ad4c77cc5c22320163f16ba42 Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Fri, 13 Apr 2012 16:33:36 -0400 Subject: drm/edid: Do drm_dmt_modes_for_range() for all range descriptor types EDID 1.4 retcons the meaning of the "GTF feature" bit to mean "is continuous frequency", and moves the set of supported timing formulas into the range descriptor itself. In any event, the range descriptor can act as a filter on the DMT list without regard to a specific timing formula. Signed-off-by: Adam Jackson Tested-by: Takashi Iwai Reviewed-by: Rodrigo Vivi Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index cb40611a1d1d..9363349fa034 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1042,12 +1042,13 @@ do_inferred_modes(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct detailed_non_pixel *data = &timing->data.other_data; - int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF); - if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE) - closure->modes += drm_dmt_modes_for_range(closure->connector, - closure->edid, - timing); + if (data->type != EDID_DETAIL_MONITOR_RANGE) + return; + + closure->modes += drm_dmt_modes_for_range(closure->connector, + closure->edid, + timing); } static int -- cgit v1.2.3-59-g8ed1b From cffd75480ceb1cefffb5595b03ce8383d0ba40ad Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Fri, 13 Apr 2012 16:33:38 -0400 Subject: drm/edid: Give the est3 mode struct a real name We want the same type for extra modes inferred from ranges. Signed-off-by: Adam Jackson Tested-by: Takashi Iwai Reviewed-by: Rodrigo Vivi Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid_modes.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h index 8bf8235f8ba4..c212133ca36c 100644 --- a/drivers/gpu/drm/drm_edid_modes.h +++ b/drivers/gpu/drm/drm_edid_modes.h @@ -412,12 +412,14 @@ static const struct drm_display_mode edid_est_modes[] = { DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ }; -static const struct { +struct minimode { short w; short h; short r; short rb; -} est3_modes[] = { +}; + +static const struct minimode est3_modes[] = { /* byte 6 */ { 640, 350, 85, 0 }, { 640, 400, 85, 0 }, -- cgit v1.2.3-59-g8ed1b From b61b2140feaa6aca51c63db94aa5217cd82705d1 Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Fri, 13 Apr 2012 16:33:39 -0400 Subject: drm/edid: Add extra_modes Some common sizes that don't show up in DMT. Signed-off-by: Adam Jackson Tested-by: Takashi Iwai Reviewed-by: Rodrigo Vivi Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid_modes.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h index c212133ca36c..b027fd50cf51 100644 --- a/drivers/gpu/drm/drm_edid_modes.h +++ b/drivers/gpu/drm/drm_edid_modes.h @@ -473,6 +473,17 @@ static const struct minimode est3_modes[] = { }; static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]); +static const struct minimode extra_modes[] = { + { 1024, 576, 60, 0 }, + { 1366, 768, 60, 0 }, + { 1600, 900, 60, 0 }, + { 1680, 945, 60, 0 }, + { 1920, 1080, 60, 0 }, + { 2048, 1152, 60, 0 }, + { 2048, 1536, 60, 0 }, +}; +static const int num_extra_modes = sizeof(extra_modes) / sizeof(extra_modes[0]); + /* * Probably taken from CEA-861 spec. * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c. -- cgit v1.2.3-59-g8ed1b From b309bd37a1357bd4391dace247cceb9d9121d20a Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Fri, 13 Apr 2012 16:33:40 -0400 Subject: drm/edid: Generate modes from extra_modes for range descriptors Signed-off-by: Adam Jackson Tested-by: Takashi Iwai Reviewed-by: Rodrigo Vivi Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 9363349fa034..38ee2f22304c 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1037,11 +1037,61 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid, return modes; } +static int +drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, + struct detailed_timing *timing) +{ + int i, modes = 0; + struct drm_display_mode *newmode; + struct drm_device *dev = connector->dev; + + for (i = 0; i < num_extra_modes; i++) { + const struct minimode *m = &extra_modes[i]; + newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0); + + if (!mode_in_range(newmode, edid, timing)) { + drm_mode_destroy(dev, newmode); + continue; + } + + drm_mode_probed_add(connector, newmode); + modes++; + } + + return modes; +} + +static int +drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid, + struct detailed_timing *timing) +{ + int i, modes = 0; + struct drm_display_mode *newmode; + struct drm_device *dev = connector->dev; + bool rb = drm_monitor_supports_rb(edid); + + for (i = 0; i < num_extra_modes; i++) { + const struct minimode *m = &extra_modes[i]; + newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0); + + if (!mode_in_range(newmode, edid, timing)) { + drm_mode_destroy(dev, newmode); + continue; + } + + drm_mode_probed_add(connector, newmode); + modes++; + } + + return modes; +} + static void do_inferred_modes(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct detailed_non_pixel *data = &timing->data.other_data; + struct detailed_data_monitor_range *range = &data->data.range; if (data->type != EDID_DETAIL_MONITOR_RANGE) return; @@ -1049,6 +1099,29 @@ do_inferred_modes(struct detailed_timing *timing, void *c) closure->modes += drm_dmt_modes_for_range(closure->connector, closure->edid, timing); + + if (!version_greater(closure->edid, 1, 1)) + return; /* GTF not defined yet */ + + switch (range->flags) { + case 0x02: /* secondary gtf, XXX could do more */ + case 0x00: /* default gtf */ + closure->modes += drm_gtf_modes_for_range(closure->connector, + closure->edid, + timing); + break; + case 0x04: /* cvt, only in 1.4+ */ + if (!version_greater(closure->edid, 1, 3)) + break; + + closure->modes += drm_cvt_modes_for_range(closure->connector, + closure->edid, + timing); + break; + case 0x01: /* just the ranges, no formula */ + default: + break; + } } static int -- cgit v1.2.3-59-g8ed1b From fc48f169dd2e461e687a63c3a69ade57b4ece59e Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 20 Apr 2012 12:59:33 +0100 Subject: drm/edid: add missing NULL checks. Reviewed-by: Dave Airlie Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 38ee2f22304c..bad2f11aa224 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -779,6 +779,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid, * secondary GTF curve. Please don't do that. */ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); + if (!mode) + return NULL; if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) { drm_mode_destroy(dev, mode); mode = drm_gtf_mode_complex(dev, hsize, vsize, @@ -1048,6 +1050,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, for (i = 0; i < num_extra_modes; i++) { const struct minimode *m = &extra_modes[i]; newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0); + if (!newmode) + return modes; if (!mode_in_range(newmode, edid, timing)) { drm_mode_destroy(dev, newmode); @@ -1073,6 +1077,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid, for (i = 0; i < num_extra_modes; i++) { const struct minimode *m = &extra_modes[i]; newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0); + if (!newmode) + return modes; if (!mode_in_range(newmode, edid, timing)) { drm_mode_destroy(dev, newmode); -- cgit v1.2.3-59-g8ed1b From 02809179bb10f73be38cb2d221f3cc9b871daaa5 Mon Sep 17 00:00:00 2001 From: Jim Cromie Date: Fri, 20 Apr 2012 13:12:16 +0100 Subject: drm: replace open-coded ARRAY_SIZE with macro [airlied: fixed one more new one added since] Signed-off-by: Jim Cromie Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid_modes.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h index b027fd50cf51..ff98a7eb38dd 100644 --- a/drivers/gpu/drm/drm_edid_modes.h +++ b/drivers/gpu/drm/drm_edid_modes.h @@ -471,7 +471,7 @@ static const struct minimode est3_modes[] = { { 1920, 1440, 60, 0 }, { 1920, 1440, 75, 0 }, }; -static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]); +static const int num_est3_modes = ARRAY_SIZE(est3_modes); static const struct minimode extra_modes[] = { { 1024, 576, 60, 0 }, @@ -482,7 +482,7 @@ static const struct minimode extra_modes[] = { { 2048, 1152, 60, 0 }, { 2048, 1536, 60, 0 }, }; -static const int num_extra_modes = sizeof(extra_modes) / sizeof(extra_modes[0]); +static const int num_extra_modes = ARRAY_SIZE(extra_modes); /* * Probably taken from CEA-861 spec. @@ -771,5 +771,4 @@ static const struct drm_display_mode edid_cea_modes[] = { 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, }; -static const int drm_num_cea_modes = - sizeof (edid_cea_modes) / sizeof (edid_cea_modes[0]); +static const int drm_num_cea_modes = ARRAY_SIZE(edid_cea_modes); -- cgit v1.2.3-59-g8ed1b From 3801a7fd8615365a58960561d3bd3479b485ed3e Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 20 Apr 2012 13:13:54 +0100 Subject: drm/i915/tv: fix open-coded ARRAY_SIZE. Signed-off-by: Dave Airlie Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_tv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index ca12c709f3eb..67f444d632fb 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -811,7 +811,7 @@ intel_tv_mode_lookup(const char *tv_format) { int i; - for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) { + for (i = 0; i < ARRAY_SIZE(tv_modes); i++) { const struct tv_mode *tv_mode = &tv_modes[i]; if (!strcmp(tv_format, tv_mode->name)) -- cgit v1.2.3-59-g8ed1b From a85d4bcb8a0cd5b3c754f98ff91ef2b9b3a73bc5 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 20 Apr 2012 11:50:01 -0700 Subject: drm/i915: rc6 residency (fix the fix) Chris' fix for my 32b breakage was incorrect. do_div returns a remainder. Go back to a divide macro which is more 32b friendly. Tested on x86-64. This has only been compile tested on 32b systems. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48756 Cc: Chris Wilson Sincere-apologies: Chris Wilson Signed-off-by: Ben Widawsky [danvet: fixup 32bit compile-fail.] Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_sysfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index f1b5108931fe..79f83445afa0 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -39,8 +39,8 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg) if (!intel_enable_rc6(dev)) return 0; - raw_time = I915_READ(reg) * 128ULL + 500; - return do_div(raw_time, 100000); + raw_time = I915_READ(reg) * 128ULL; + return DIV_ROUND_UP_ULL(raw_time, 100000); } static ssize_t -- cgit v1.2.3-59-g8ed1b From c09dedb7a50e23f0166e0bbae61c75c7ec23cf7f Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 23 Apr 2012 17:40:33 +0100 Subject: drm/edid: Add a workaround for 1366x768 HD panel HD panel (1366x768) found most commonly on laptops can't be represented exactly in CVT/DMT expression, which leads to 1368x768 instead, because 1366 can't be divided by 8. Add a hack to convert to 1366x768 manually as an exception. Signed-off-by: Takashi Iwai Acked-by: Adam Jackson Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index bad2f11aa224..c6366e90041e 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1039,6 +1039,19 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid, return modes; } +/* fix up 1366x768 mode from 1368x768; + * GFT/CVT can't express 1366 width which isn't dividable by 8 + */ +static void fixup_mode_1366x768(struct drm_display_mode *mode) +{ + if (mode->hdisplay == 1368 && mode->vdisplay == 768) { + mode->hdisplay = 1366; + mode->hsync_start--; + mode->hsync_end--; + drm_mode_set_name(mode); + } +} + static int drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, struct detailed_timing *timing) @@ -1053,6 +1066,7 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, if (!newmode) return modes; + fixup_mode_1366x768(newmode); if (!mode_in_range(newmode, edid, timing)) { drm_mode_destroy(dev, newmode); continue; @@ -1080,6 +1094,7 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid, if (!newmode) return modes; + fixup_mode_1366x768(newmode); if (!mode_in_range(newmode, edid, timing)) { drm_mode_destroy(dev, newmode); continue; -- cgit v1.2.3-59-g8ed1b From eccea7920cfb009c2fa40e9ecdce8c36f61cab66 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 26 Mar 2012 15:12:54 -0400 Subject: drm/radeon/kms: improve bpc handling (v2) Improve handling of bpc (bits per color) in radeon. In most cases we want 8 except for HDMI, DP, LVDS, and eDP. v2: handle DP better. Signed-off-by: Alex Deucher Tested-by: Lennert Buytenhek Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atombios_crtc.c | 7 ++-- drivers/gpu/drm/radeon/atombios_dp.c | 7 ++-- drivers/gpu/drm/radeon/atombios_encoders.c | 4 +-- drivers/gpu/drm/radeon/radeon_connectors.c | 56 ++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_mode.h | 1 + 5 files changed, 63 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index b5ff1f7b6f7e..2fab38f5a08e 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -588,8 +588,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (encoder->crtc == crtc) { radeon_encoder = to_radeon_encoder(encoder); connector = radeon_get_connector_for_encoder(encoder); - /* if (connector && connector->display_info.bpc) - bpc = connector->display_info.bpc; */ + bpc = radeon_get_monitor_bpc(connector); encoder_mode = atombios_get_encoder_mode(encoder); is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || @@ -965,9 +964,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; int dp_clock; - - /* if (connector->display_info.bpc) - bpc = connector->display_info.bpc; */ + bpc = radeon_get_monitor_bpc(connector); switch (encoder_mode) { case ATOM_ENCODER_MODE_DP_MST: diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index c57d85664e77..cadbb107c803 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -405,13 +405,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], /* get bpc from the EDID */ static int convert_bpc_to_bpp(int bpc) { -#if 0 if (bpc == 0) return 24; else return bpc * 3; -#endif - return 24; } /* get the max pix clock supported by the link rate and lane num */ @@ -463,7 +460,7 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector, u8 dpcd[DP_DPCD_SIZE], int pix_clock) { - int bpp = convert_bpc_to_bpp(connector->display_info.bpc); + int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); int max_link_rate = dp_get_max_link_rate(dpcd); int max_lane_num = dp_get_max_lane_number(dpcd); int lane_num; @@ -482,7 +479,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector, u8 dpcd[DP_DPCD_SIZE], int pix_clock) { - int bpp = convert_bpc_to_bpp(connector->display_info.bpc); + int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); int lane_num, max_pix_clock; if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index e607c4d7dd98..06b209b2e229 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -541,7 +541,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo dp_clock = dig_connector->dp_clock; dp_lane_count = dig_connector->dp_lane_count; hpd_id = radeon_connector->hpd.hpd; - /* bpc = connector->display_info.bpc; */ + bpc = radeon_get_monitor_bpc(connector); } /* no dig encoder assigned */ @@ -1159,7 +1159,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, dp_lane_count = dig_connector->dp_lane_count; connector_object_id = (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; - /* bpc = connector->display_info.bpc; */ + bpc = radeon_get_monitor_bpc(connector); } memset(&args, 0, sizeof(args)); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index bd05156edbdb..71fa389e10fe 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -84,6 +84,62 @@ static void radeon_property_change_mode(struct drm_encoder *encoder) crtc->x, crtc->y, crtc->fb); } } + +int radeon_get_monitor_bpc(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *dig_connector; + int bpc = 8; + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_HDMIB: + if (radeon_connector->use_digital) { + if (drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (connector->display_info.bpc) + bpc = connector->display_info.bpc; + } + } + break; + case DRM_MODE_CONNECTOR_DVID: + case DRM_MODE_CONNECTOR_HDMIA: + if (drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (connector->display_info.bpc) + bpc = connector->display_info.bpc; + } + break; + case DRM_MODE_CONNECTOR_DisplayPort: + dig_connector = radeon_connector->con_priv; + if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || + (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) || + drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (connector->display_info.bpc) + bpc = connector->display_info.bpc; + } + break; + case DRM_MODE_CONNECTOR_eDP: + case DRM_MODE_CONNECTOR_LVDS: + if (connector->display_info.bpc) + bpc = connector->display_info.bpc; + else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + struct drm_connector_helper_funcs *connector_funcs = + connector->helper_private; + struct drm_encoder *encoder = connector_funcs->best_encoder(connector); + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + + if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR) + bpc = 6; + else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR) + bpc = 8; + } + break; + } + return bpc; +} + static void radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status) { diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index f7eb5d8b9fd3..b2cca6a2395c 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -476,6 +476,7 @@ extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder); extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector); extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector); extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector); +extern int radeon_get_monitor_bpc(struct drm_connector *connector); extern void radeon_connector_hotplug(struct drm_connector *connector); extern int radeon_dp_mode_valid_helper(struct drm_connector *connector, -- cgit v1.2.3-59-g8ed1b From 3a2a67aa28725bb500505087067e7830cfa9c137 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 28 Mar 2012 13:19:06 -0400 Subject: drm/radeon/kms: add register definitions for audio This adds register definitions for HDMI/DP audio on DCE2/3/4/5 hardware. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreend.h | 220 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/r600d.h | 236 ++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/rs600d.h | 14 +++ drivers/gpu/drm/radeon/rv770d.h | 191 +++++++++++++++++++++++++++++ 4 files changed, 661 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index b4eefc355f16..79130bfd1d6f 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -112,6 +112,226 @@ #define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8 #define CP_DEBUG 0xC1FC +/* Audio clocks */ +#define DCCG_AUDIO_DTO_SOURCE 0x05ac +# define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */ +# define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */ + +#define DCCG_AUDIO_DTO0_PHASE 0x05b0 +#define DCCG_AUDIO_DTO0_MODULE 0x05b4 +#define DCCG_AUDIO_DTO0_LOAD 0x05b8 +#define DCCG_AUDIO_DTO0_CNTL 0x05bc + +#define DCCG_AUDIO_DTO1_PHASE 0x05c0 +#define DCCG_AUDIO_DTO1_MODULE 0x05c4 +#define DCCG_AUDIO_DTO1_LOAD 0x05c8 +#define DCCG_AUDIO_DTO1_CNTL 0x05cc + +/* DCE 4.0 AFMT */ +#define HDMI_CONTROL 0x7030 +# define HDMI_KEEPOUT_MODE (1 << 0) +# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */ +# define HDMI_ERROR_ACK (1 << 8) +# define HDMI_ERROR_MASK (1 << 9) +# define HDMI_DEEP_COLOR_ENABLE (1 << 24) +# define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28) +# define HDMI_24BIT_DEEP_COLOR 0 +# define HDMI_30BIT_DEEP_COLOR 1 +# define HDMI_36BIT_DEEP_COLOR 2 +#define HDMI_STATUS 0x7034 +# define HDMI_ACTIVE_AVMUTE (1 << 0) +# define HDMI_AUDIO_PACKET_ERROR (1 << 16) +# define HDMI_VBI_PACKET_ERROR (1 << 20) +#define HDMI_AUDIO_PACKET_CONTROL 0x7038 +# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4) +# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16) +#define HDMI_ACR_PACKET_CONTROL 0x703c +# define HDMI_ACR_SEND (1 << 0) +# define HDMI_ACR_CONT (1 << 1) +# define HDMI_ACR_SELECT(x) (((x) & 3) << 4) +# define HDMI_ACR_HW 0 +# define HDMI_ACR_32 1 +# define HDMI_ACR_44 2 +# define HDMI_ACR_48 3 +# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */ +# define HDMI_ACR_AUTO_SEND (1 << 12) +# define HDMI_ACR_N_MULTIPLE(x) (((x) & 7) << 16) +# define HDMI_ACR_X1 1 +# define HDMI_ACR_X2 2 +# define HDMI_ACR_X4 4 +# define HDMI_ACR_AUDIO_PRIORITY (1 << 31) +#define HDMI_VBI_PACKET_CONTROL 0x7040 +# define HDMI_NULL_SEND (1 << 0) +# define HDMI_GC_SEND (1 << 4) +# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */ +#define HDMI_INFOFRAME_CONTROL0 0x7044 +# define HDMI_AVI_INFO_SEND (1 << 0) +# define HDMI_AVI_INFO_CONT (1 << 1) +# define HDMI_AUDIO_INFO_SEND (1 << 4) +# define HDMI_AUDIO_INFO_CONT (1 << 5) +# define HDMI_MPEG_INFO_SEND (1 << 8) +# define HDMI_MPEG_INFO_CONT (1 << 9) +#define HDMI_INFOFRAME_CONTROL1 0x7048 +# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0) +# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8) +# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16) +#define HDMI_GENERIC_PACKET_CONTROL 0x704c +# define HDMI_GENERIC0_SEND (1 << 0) +# define HDMI_GENERIC0_CONT (1 << 1) +# define HDMI_GENERIC1_SEND (1 << 4) +# define HDMI_GENERIC1_CONT (1 << 5) +# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16) +# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24) +#define HDMI_GC 0x7058 +# define HDMI_GC_AVMUTE (1 << 0) +# define HDMI_GC_AVMUTE_CONT (1 << 2) +#define AFMT_AUDIO_PACKET_CONTROL2 0x705c +# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0) +# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1) +# define AFMT_60958_CS_SOURCE (1 << 4) +# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8) +# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16) +#define AFMT_AVI_INFO0 0x7084 +# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0) +# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8) +# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10) +# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12) +# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13) +# define AFMT_AVI_INFO_Y_RGB 0 +# define AFMT_AVI_INFO_Y_YCBCR422 1 +# define AFMT_AVI_INFO_Y_YCBCR444 2 +# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8) +# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16) +# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20) +# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22) +# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16) +# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24) +# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26) +# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28) +# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31) +# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24) +#define AFMT_AVI_INFO1 0x7088 +# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */ +# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */ +# define AFMT_AVI_INFO_CN(x) (((x) & 0x3) << 12) +# define AFMT_AVI_INFO_YQ(x) (((x) & 0x3) << 14) +# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16) +#define AFMT_AVI_INFO2 0x708c +# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0) +# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16) +#define AFMT_AVI_INFO3 0x7090 +# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0) +# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24) +#define AFMT_MPEG_INFO0 0x7094 +# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0) +# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8) +# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16) +# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24) +#define AFMT_MPEG_INFO1 0x7098 +# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0) +# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8) +# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12) +#define AFMT_GENERIC0_HDR 0x709c +#define AFMT_GENERIC0_0 0x70a0 +#define AFMT_GENERIC0_1 0x70a4 +#define AFMT_GENERIC0_2 0x70a8 +#define AFMT_GENERIC0_3 0x70ac +#define AFMT_GENERIC0_4 0x70b0 +#define AFMT_GENERIC0_5 0x70b4 +#define AFMT_GENERIC0_6 0x70b8 +#define AFMT_GENERIC1_HDR 0x70bc +#define AFMT_GENERIC1_0 0x70c0 +#define AFMT_GENERIC1_1 0x70c4 +#define AFMT_GENERIC1_2 0x70c8 +#define AFMT_GENERIC1_3 0x70cc +#define AFMT_GENERIC1_4 0x70d0 +#define AFMT_GENERIC1_5 0x70d4 +#define AFMT_GENERIC1_6 0x70d8 +#define HDMI_ACR_32_0 0x70dc +# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12) +#define HDMI_ACR_32_1 0x70e0 +# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0) +#define HDMI_ACR_44_0 0x70e4 +# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12) +#define HDMI_ACR_44_1 0x70e8 +# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0) +#define HDMI_ACR_48_0 0x70ec +# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12) +#define HDMI_ACR_48_1 0x70f0 +# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0) +#define HDMI_ACR_STATUS_0 0x70f4 +#define HDMI_ACR_STATUS_1 0x70f8 +#define AFMT_AUDIO_INFO0 0x70fc +# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0) +# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8) +# define AFMT_AUDIO_INFO_CT(x) (((x) & 0xf) << 11) +# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16) +# define AFMT_AUDIO_INFO_CXT(x) (((x) & 0x1f) << 24) +#define AFMT_AUDIO_INFO1 0x7100 +# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0) +# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11) +# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15) +# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8) +# define AFMT_AUDIO_INFO_LFEBPL(x) (((x) & 3) << 16) +#define AFMT_60958_0 0x7104 +# define AFMT_60958_CS_A(x) (((x) & 1) << 0) +# define AFMT_60958_CS_B(x) (((x) & 1) << 1) +# define AFMT_60958_CS_C(x) (((x) & 1) << 2) +# define AFMT_60958_CS_D(x) (((x) & 3) << 3) +# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6) +# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8) +# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16) +# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20) +# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24) +# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28) +#define AFMT_60958_1 0x7108 +# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0) +# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4) +# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16) +# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18) +# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20) +#define AFMT_AUDIO_CRC_CONTROL 0x710c +# define AFMT_AUDIO_CRC_EN (1 << 0) +#define AFMT_RAMP_CONTROL0 0x7110 +# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0) +# define AFMT_RAMP_DATA_SIGN (1 << 31) +#define AFMT_RAMP_CONTROL1 0x7114 +# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0) +# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24) +#define AFMT_RAMP_CONTROL2 0x7118 +# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0) +#define AFMT_RAMP_CONTROL3 0x711c +# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0) +#define AFMT_60958_2 0x7120 +# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0) +# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4) +# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8) +# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12) +# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16) +# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20) +#define AFMT_STATUS 0x7128 +# define AFMT_AUDIO_ENABLE (1 << 4) +# define AFMT_AUDIO_HBR_ENABLE (1 << 8) +# define AFMT_AZ_FORMAT_WTRIG (1 << 28) +# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29) +# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30) +#define AFMT_AUDIO_PACKET_CONTROL 0x712c +# define AFMT_AUDIO_SAMPLE_SEND (1 << 0) +# define AFMT_RESET_FIFO_WHEN_AUDIO_DIS (1 << 11) /* set to 1 */ +# define AFMT_AUDIO_TEST_EN (1 << 12) +# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24) +# define AFMT_60958_CS_UPDATE (1 << 26) +# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27) +# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28) +# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29) +# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30) +#define AFMT_VBI_PACKET_CONTROL 0x7130 +# define AFMT_GENERIC0_UPDATE (1 << 2) +#define AFMT_INFOFRAME_CONTROL0 0x7134 +# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - afmt regs */ +# define AFMT_AUDIO_INFO_UPDATE (1 << 7) +# define AFMT_MPEG_INFO_UPDATE (1 << 10) +#define AFMT_GENERIC0_7 0x7138 #define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define INACTIVE_QD_PIPES(x) ((x) << 8) diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 59f9c993cc31..426e5a7de778 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -824,6 +824,242 @@ # define TARGET_LINK_SPEED_MASK (0xf << 0) # define SELECTABLE_DEEMPHASIS (1 << 6) +/* Audio clocks */ +#define DCCG_AUDIO_DTO0_PHASE 0x0514 +#define DCCG_AUDIO_DTO0_MODULE 0x0518 +#define DCCG_AUDIO_DTO0_LOAD 0x051c +# define DTO_LOAD (1 << 31) +#define DCCG_AUDIO_DTO0_CNTL 0x0520 + +#define DCCG_AUDIO_DTO1_PHASE 0x0524 +#define DCCG_AUDIO_DTO1_MODULE 0x0528 +#define DCCG_AUDIO_DTO1_LOAD 0x052c +#define DCCG_AUDIO_DTO1_CNTL 0x0530 + +#define DCCG_AUDIO_DTO_SELECT 0x0534 + +/* digital blocks */ +#define TMDSA_CNTL 0x7880 +# define TMDSA_HDMI_EN (1 << 2) +#define LVTMA_CNTL 0x7a80 +# define LVTMA_HDMI_EN (1 << 2) +#define DDIA_CNTL 0x7200 +# define DDIA_HDMI_EN (1 << 2) +#define DIG0_CNTL 0x75a0 +# define DIG_MODE(x) (((x) & 7) << 8) +# define DIG_MODE_DP 0 +# define DIG_MODE_LVDS 1 +# define DIG_MODE_TMDS_DVI 2 +# define DIG_MODE_TMDS_HDMI 3 +# define DIG_MODE_SDVO 4 +#define DIG1_CNTL 0x79a0 + +/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one + * instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly + * different due to the new DIG blocks, but also have 2 instances. + * DCE 3.0 HDMI blocks are part of each DIG encoder. + */ + +/* rs6xx/rs740/r6xx/dce3 */ +#define HDMI0_CONTROL 0x7400 +/* rs6xx/rs740/r6xx */ +# define HDMI0_ENABLE (1 << 0) +# define HDMI0_STREAM(x) (((x) & 3) << 2) +# define HDMI0_STREAM_TMDSA 0 +# define HDMI0_STREAM_LVTMA 1 +# define HDMI0_STREAM_DVOA 2 +# define HDMI0_STREAM_DDIA 3 +/* rs6xx/r6xx/dce3 */ +# define HDMI0_ERROR_ACK (1 << 8) +# define HDMI0_ERROR_MASK (1 << 9) +#define HDMI0_STATUS 0x7404 +# define HDMI0_ACTIVE_AVMUTE (1 << 0) +# define HDMI0_AUDIO_ENABLE (1 << 4) +# define HDMI0_AZ_FORMAT_WTRIG (1 << 28) +# define HDMI0_AZ_FORMAT_WTRIG_INT (1 << 29) +#define HDMI0_AUDIO_PACKET_CONTROL 0x7408 +# define HDMI0_AUDIO_SAMPLE_SEND (1 << 0) +# define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4) +# define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8) +# define HDMI0_AUDIO_TEST_EN (1 << 12) +# define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16) +# define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24) +# define HDMI0_60958_CS_UPDATE (1 << 26) +# define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28) +# define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29) +#define HDMI0_AUDIO_CRC_CONTROL 0x740c +# define HDMI0_AUDIO_CRC_EN (1 << 0) +#define HDMI0_VBI_PACKET_CONTROL 0x7410 +# define HDMI0_NULL_SEND (1 << 0) +# define HDMI0_GC_SEND (1 << 4) +# define HDMI0_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */ +#define HDMI0_INFOFRAME_CONTROL0 0x7414 +# define HDMI0_AVI_INFO_SEND (1 << 0) +# define HDMI0_AVI_INFO_CONT (1 << 1) +# define HDMI0_AUDIO_INFO_SEND (1 << 4) +# define HDMI0_AUDIO_INFO_CONT (1 << 5) +# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ +# define HDMI0_AUDIO_INFO_UPDATE (1 << 7) +# define HDMI0_MPEG_INFO_SEND (1 << 8) +# define HDMI0_MPEG_INFO_CONT (1 << 9) +# define HDMI0_MPEG_INFO_UPDATE (1 << 10) +#define HDMI0_INFOFRAME_CONTROL1 0x7418 +# define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0) +# define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8) +# define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16) +#define HDMI0_GENERIC_PACKET_CONTROL 0x741c +# define HDMI0_GENERIC0_SEND (1 << 0) +# define HDMI0_GENERIC0_CONT (1 << 1) +# define HDMI0_GENERIC0_UPDATE (1 << 2) +# define HDMI0_GENERIC1_SEND (1 << 4) +# define HDMI0_GENERIC1_CONT (1 << 5) +# define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16) +# define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24) +#define HDMI0_GC 0x7428 +# define HDMI0_GC_AVMUTE (1 << 0) +#define HDMI0_AVI_INFO0 0x7454 +# define HDMI0_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0) +# define HDMI0_AVI_INFO_S(x) (((x) & 3) << 8) +# define HDMI0_AVI_INFO_B(x) (((x) & 3) << 10) +# define HDMI0_AVI_INFO_A(x) (((x) & 1) << 12) +# define HDMI0_AVI_INFO_Y(x) (((x) & 3) << 13) +# define HDMI0_AVI_INFO_Y_RGB 0 +# define HDMI0_AVI_INFO_Y_YCBCR422 1 +# define HDMI0_AVI_INFO_Y_YCBCR444 2 +# define HDMI0_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8) +# define HDMI0_AVI_INFO_R(x) (((x) & 0xf) << 16) +# define HDMI0_AVI_INFO_M(x) (((x) & 0x3) << 20) +# define HDMI0_AVI_INFO_C(x) (((x) & 0x3) << 22) +# define HDMI0_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16) +# define HDMI0_AVI_INFO_SC(x) (((x) & 0x3) << 24) +# define HDMI0_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24) +#define HDMI0_AVI_INFO1 0x7458 +# define HDMI0_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */ +# define HDMI0_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */ +# define HDMI0_AVI_INFO_TOP(x) (((x) & 0xffff) << 16) +#define HDMI0_AVI_INFO2 0x745c +# define HDMI0_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0) +# define HDMI0_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16) +#define HDMI0_AVI_INFO3 0x7460 +# define HDMI0_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0) +# define HDMI0_AVI_INFO_VERSION(x) (((x) & 3) << 24) +#define HDMI0_MPEG_INFO0 0x7464 +# define HDMI0_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0) +# define HDMI0_MPEG_INFO_MB0(x) (((x) & 0xff) << 8) +# define HDMI0_MPEG_INFO_MB1(x) (((x) & 0xff) << 16) +# define HDMI0_MPEG_INFO_MB2(x) (((x) & 0xff) << 24) +#define HDMI0_MPEG_INFO1 0x7468 +# define HDMI0_MPEG_INFO_MB3(x) (((x) & 0xff) << 0) +# define HDMI0_MPEG_INFO_MF(x) (((x) & 3) << 8) +# define HDMI0_MPEG_INFO_FR(x) (((x) & 1) << 12) +#define HDMI0_GENERIC0_HDR 0x746c +#define HDMI0_GENERIC0_0 0x7470 +#define HDMI0_GENERIC0_1 0x7474 +#define HDMI0_GENERIC0_2 0x7478 +#define HDMI0_GENERIC0_3 0x747c +#define HDMI0_GENERIC0_4 0x7480 +#define HDMI0_GENERIC0_5 0x7484 +#define HDMI0_GENERIC0_6 0x7488 +#define HDMI0_GENERIC1_HDR 0x748c +#define HDMI0_GENERIC1_0 0x7490 +#define HDMI0_GENERIC1_1 0x7494 +#define HDMI0_GENERIC1_2 0x7498 +#define HDMI0_GENERIC1_3 0x749c +#define HDMI0_GENERIC1_4 0x74a0 +#define HDMI0_GENERIC1_5 0x74a4 +#define HDMI0_GENERIC1_6 0x74a8 +#define HDMI0_ACR_32_0 0x74ac +# define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12) +#define HDMI0_ACR_32_1 0x74b0 +# define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0) +#define HDMI0_ACR_44_0 0x74b4 +# define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12) +#define HDMI0_ACR_44_1 0x74b8 +# define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0) +#define HDMI0_ACR_48_0 0x74bc +# define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12) +#define HDMI0_ACR_48_1 0x74c0 +# define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0) +#define HDMI0_ACR_STATUS_0 0x74c4 +#define HDMI0_ACR_STATUS_1 0x74c8 +#define HDMI0_AUDIO_INFO0 0x74cc +# define HDMI0_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0) +# define HDMI0_AUDIO_INFO_CC(x) (((x) & 7) << 8) +#define HDMI0_AUDIO_INFO1 0x74d0 +# define HDMI0_AUDIO_INFO_CA(x) (((x) & 0xff) << 0) +# define HDMI0_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11) +# define HDMI0_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15) +# define HDMI0_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8) +#define HDMI0_60958_0 0x74d4 +# define HDMI0_60958_CS_A(x) (((x) & 1) << 0) +# define HDMI0_60958_CS_B(x) (((x) & 1) << 1) +# define HDMI0_60958_CS_C(x) (((x) & 1) << 2) +# define HDMI0_60958_CS_D(x) (((x) & 3) << 3) +# define HDMI0_60958_CS_MODE(x) (((x) & 3) << 6) +# define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8) +# define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16) +# define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20) +# define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24) +# define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28) +#define HDMI0_60958_1 0x74d8 +# define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0) +# define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4) +# define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16) +# define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18) +# define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20) +#define HDMI0_ACR_PACKET_CONTROL 0x74dc +# define HDMI0_ACR_SEND (1 << 0) +# define HDMI0_ACR_CONT (1 << 1) +# define HDMI0_ACR_SELECT(x) (((x) & 3) << 4) +# define HDMI0_ACR_HW 0 +# define HDMI0_ACR_32 1 +# define HDMI0_ACR_44 2 +# define HDMI0_ACR_48 3 +# define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */ +# define HDMI0_ACR_AUTO_SEND (1 << 12) +#define HDMI0_RAMP_CONTROL0 0x74e0 +# define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0) +#define HDMI0_RAMP_CONTROL1 0x74e4 +# define HDMI0_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0) +#define HDMI0_RAMP_CONTROL2 0x74e8 +# define HDMI0_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0) +#define HDMI0_RAMP_CONTROL3 0x74ec +# define HDMI0_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0) +/* HDMI0_60958_2 is r7xx only */ +#define HDMI0_60958_2 0x74f0 +# define HDMI0_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0) +# define HDMI0_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4) +# define HDMI0_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8) +# define HDMI0_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12) +# define HDMI0_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16) +# define HDMI0_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20) +/* r6xx only; second instance starts at 0x7700 */ +#define HDMI1_CONTROL 0x7700 +#define HDMI1_STATUS 0x7704 +#define HDMI1_AUDIO_PACKET_CONTROL 0x7708 +/* DCE3; second instance starts at 0x7800 NOT 0x7700 */ +#define DCE3_HDMI1_CONTROL 0x7800 +#define DCE3_HDMI1_STATUS 0x7804 +#define DCE3_HDMI1_AUDIO_PACKET_CONTROL 0x7808 +/* DCE3.2 (for interrupts) */ +#define AFMT_STATUS 0x7600 +# define AFMT_AUDIO_ENABLE (1 << 4) +# define AFMT_AZ_FORMAT_WTRIG (1 << 28) +# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29) +# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30) +#define AFMT_AUDIO_PACKET_CONTROL 0x7604 +# define AFMT_AUDIO_SAMPLE_SEND (1 << 0) +# define AFMT_AUDIO_TEST_EN (1 << 12) +# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24) +# define AFMT_60958_CS_UPDATE (1 << 26) +# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27) +# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28) +# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29) +# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30) +/* DCE3.2 second instance starts at 0x7800 */ +#define HDMI_OFFSET0 (0x7400 - 0x7400) +#define HDMI_OFFSET1 (0x7800 - 0x7400) + /* * PM4 */ diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h index a27c13ac47c3..f1f89414dc63 100644 --- a/drivers/gpu/drm/radeon/rs600d.h +++ b/drivers/gpu/drm/radeon/rs600d.h @@ -485,6 +485,20 @@ #define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16) #define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1) #define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF +#define R_007404_HDMI0_STATUS 0x007404 +#define S_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) & 0x1) << 28) +#define G_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) >> 28) & 0x1) +#define C_007404_HDMI0_AZ_FORMAT_WTRIG 0xEFFFFFFF +#define S_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) & 0x1) << 29) +#define G_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) >> 29) & 0x1) +#define C_007404_HDMI0_AZ_FORMAT_WTRIG_INT 0xDFFFFFFF +#define R_007408_HDMI0_AUDIO_PACKET_CONTROL 0x007408 +#define S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) & 0x1) << 28) +#define G_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) >> 28) & 0x1) +#define C_007408_HDMI0_AZ_FORMAT_WTRIG_MASK 0xEFFFFFFF +#define S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) & 0x1) << 29) +#define G_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) >> 29) & 0x1) +#define C_007408_HDMI0_AZ_FORMAT_WTRIG_ACK 0xDFFFFFFF /* MC registers */ #define R_000000_MC_STATUS 0x000000 diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 79fa588e9ed5..9c549f702f2f 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -353,6 +353,197 @@ #define SRBM_STATUS 0x0E50 +/* DCE 3.2 HDMI */ +#define HDMI_CONTROL 0x7400 +# define HDMI_KEEPOUT_MODE (1 << 0) +# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */ +# define HDMI_ERROR_ACK (1 << 8) +# define HDMI_ERROR_MASK (1 << 9) +#define HDMI_STATUS 0x7404 +# define HDMI_ACTIVE_AVMUTE (1 << 0) +# define HDMI_AUDIO_PACKET_ERROR (1 << 16) +# define HDMI_VBI_PACKET_ERROR (1 << 20) +#define HDMI_AUDIO_PACKET_CONTROL 0x7408 +# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4) +# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16) +#define HDMI_ACR_PACKET_CONTROL 0x740c +# define HDMI_ACR_SEND (1 << 0) +# define HDMI_ACR_CONT (1 << 1) +# define HDMI_ACR_SELECT(x) (((x) & 3) << 4) +# define HDMI_ACR_HW 0 +# define HDMI_ACR_32 1 +# define HDMI_ACR_44 2 +# define HDMI_ACR_48 3 +# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */ +# define HDMI_ACR_AUTO_SEND (1 << 12) +#define HDMI_VBI_PACKET_CONTROL 0x7410 +# define HDMI_NULL_SEND (1 << 0) +# define HDMI_GC_SEND (1 << 4) +# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */ +#define HDMI_INFOFRAME_CONTROL0 0x7414 +# define HDMI_AVI_INFO_SEND (1 << 0) +# define HDMI_AVI_INFO_CONT (1 << 1) +# define HDMI_AUDIO_INFO_SEND (1 << 4) +# define HDMI_AUDIO_INFO_CONT (1 << 5) +# define HDMI_MPEG_INFO_SEND (1 << 8) +# define HDMI_MPEG_INFO_CONT (1 << 9) +#define HDMI_INFOFRAME_CONTROL1 0x7418 +# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0) +# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8) +# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16) +#define HDMI_GENERIC_PACKET_CONTROL 0x741c +# define HDMI_GENERIC0_SEND (1 << 0) +# define HDMI_GENERIC0_CONT (1 << 1) +# define HDMI_GENERIC1_SEND (1 << 4) +# define HDMI_GENERIC1_CONT (1 << 5) +# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16) +# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24) +#define HDMI_GC 0x7428 +# define HDMI_GC_AVMUTE (1 << 0) +#define AFMT_AUDIO_PACKET_CONTROL2 0x742c +# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0) +# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1) +# define AFMT_60958_CS_SOURCE (1 << 4) +# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8) +# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16) +#define AFMT_AVI_INFO0 0x7454 +# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0) +# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8) +# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10) +# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12) +# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13) +# define AFMT_AVI_INFO_Y_RGB 0 +# define AFMT_AVI_INFO_Y_YCBCR422 1 +# define AFMT_AVI_INFO_Y_YCBCR444 2 +# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8) +# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16) +# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20) +# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22) +# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16) +# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24) +# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26) +# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28) +# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31) +# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24) +#define AFMT_AVI_INFO1 0x7458 +# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */ +# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */ +# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16) +#define AFMT_AVI_INFO2 0x745c +# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0) +# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16) +#define AFMT_AVI_INFO3 0x7460 +# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0) +# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24) +#define AFMT_MPEG_INFO0 0x7464 +# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0) +# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8) +# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16) +# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24) +#define AFMT_MPEG_INFO1 0x7468 +# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0) +# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8) +# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12) +#define AFMT_GENERIC0_HDR 0x746c +#define AFMT_GENERIC0_0 0x7470 +#define AFMT_GENERIC0_1 0x7474 +#define AFMT_GENERIC0_2 0x7478 +#define AFMT_GENERIC0_3 0x747c +#define AFMT_GENERIC0_4 0x7480 +#define AFMT_GENERIC0_5 0x7484 +#define AFMT_GENERIC0_6 0x7488 +#define AFMT_GENERIC1_HDR 0x748c +#define AFMT_GENERIC1_0 0x7490 +#define AFMT_GENERIC1_1 0x7494 +#define AFMT_GENERIC1_2 0x7498 +#define AFMT_GENERIC1_3 0x749c +#define AFMT_GENERIC1_4 0x74a0 +#define AFMT_GENERIC1_5 0x74a4 +#define AFMT_GENERIC1_6 0x74a8 +#define HDMI_ACR_32_0 0x74ac +# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12) +#define HDMI_ACR_32_1 0x74b0 +# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0) +#define HDMI_ACR_44_0 0x74b4 +# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12) +#define HDMI_ACR_44_1 0x74b8 +# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0) +#define HDMI_ACR_48_0 0x74bc +# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12) +#define HDMI_ACR_48_1 0x74c0 +# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0) +#define HDMI_ACR_STATUS_0 0x74c4 +#define HDMI_ACR_STATUS_1 0x74c8 +#define AFMT_AUDIO_INFO0 0x74cc +# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0) +# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8) +# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16) +#define AFMT_AUDIO_INFO1 0x74d0 +# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0) +# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11) +# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15) +# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8) +#define AFMT_60958_0 0x74d4 +# define AFMT_60958_CS_A(x) (((x) & 1) << 0) +# define AFMT_60958_CS_B(x) (((x) & 1) << 1) +# define AFMT_60958_CS_C(x) (((x) & 1) << 2) +# define AFMT_60958_CS_D(x) (((x) & 3) << 3) +# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6) +# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8) +# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16) +# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20) +# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24) +# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28) +#define AFMT_60958_1 0x74d8 +# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0) +# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4) +# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16) +# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18) +# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20) +#define AFMT_AUDIO_CRC_CONTROL 0x74dc +# define AFMT_AUDIO_CRC_EN (1 << 0) +#define AFMT_RAMP_CONTROL0 0x74e0 +# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0) +# define AFMT_RAMP_DATA_SIGN (1 << 31) +#define AFMT_RAMP_CONTROL1 0x74e4 +# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0) +# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24) +#define AFMT_RAMP_CONTROL2 0x74e8 +# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0) +#define AFMT_RAMP_CONTROL3 0x74ec +# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0) +#define AFMT_60958_2 0x74f0 +# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0) +# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4) +# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8) +# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12) +# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16) +# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20) +#define AFMT_STATUS 0x7600 +# define AFMT_AUDIO_ENABLE (1 << 4) +# define AFMT_AZ_FORMAT_WTRIG (1 << 28) +# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29) +# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30) +#define AFMT_AUDIO_PACKET_CONTROL 0x7604 +# define AFMT_AUDIO_SAMPLE_SEND (1 << 0) +# define AFMT_AUDIO_TEST_EN (1 << 12) +# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24) +# define AFMT_60958_CS_UPDATE (1 << 26) +# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27) +# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28) +# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29) +# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30) +#define AFMT_VBI_PACKET_CONTROL 0x7608 +# define AFMT_GENERIC0_UPDATE (1 << 2) +#define AFMT_INFOFRAME_CONTROL0 0x760c +# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ +# define AFMT_AUDIO_INFO_UPDATE (1 << 7) +# define AFMT_MPEG_INFO_UPDATE (1 << 10) +#define AFMT_GENERIC0_7 0x7610 +/* second instance starts at 0x7800 */ +#define HDMI_OFFSET0 (0x7400 - 0x7400) +#define HDMI_OFFSET1 (0x7800 - 0x7400) + #define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 #define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914 #define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114 -- cgit v1.2.3-59-g8ed1b From f122c6109b1a79153cfb1e188c665ce3f312a886 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 30 Mar 2012 08:59:57 -0400 Subject: drm/radeon/kms: fix up audio interrupt handling - add support for rs6xx - add support for DCE4/5 - fixup 6xx/7xx Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 128 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/r600.c | 115 +++++++++++++++++++++------- drivers/gpu/drm/radeon/r600_audio.c | 63 ++-------------- drivers/gpu/drm/radeon/r600_hdmi.c | 17 +---- drivers/gpu/drm/radeon/radeon.h | 17 ++++- drivers/gpu/drm/radeon/radeon_asic.h | 3 - drivers/gpu/drm/radeon/radeon_irq_kms.c | 3 + drivers/gpu/drm/radeon/rs600.c | 40 +++++++++- 8 files changed, 281 insertions(+), 105 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index cfa372cb1cb3..eed7acefb492 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2594,6 +2594,7 @@ int evergreen_irq_set(struct radeon_device *rdev) u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; + u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -2614,6 +2615,13 @@ int evergreen_irq_set(struct radeon_device *rdev) hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; + afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; + afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; + afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; + afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; + afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; + if (rdev->family >= CHIP_CAYMAN) { /* enable CP interrupts on all rings */ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { @@ -2690,6 +2698,30 @@ int evergreen_irq_set(struct radeon_device *rdev) DRM_DEBUG("evergreen_irq_set: hpd 6\n"); hpd6 |= DC_HPDx_INT_EN; } + if (rdev->irq.afmt[0]) { + DRM_DEBUG("evergreen_irq_set: hdmi 0\n"); + afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK; + } + if (rdev->irq.afmt[1]) { + DRM_DEBUG("evergreen_irq_set: hdmi 1\n"); + afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK; + } + if (rdev->irq.afmt[2]) { + DRM_DEBUG("evergreen_irq_set: hdmi 2\n"); + afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK; + } + if (rdev->irq.afmt[3]) { + DRM_DEBUG("evergreen_irq_set: hdmi 3\n"); + afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK; + } + if (rdev->irq.afmt[4]) { + DRM_DEBUG("evergreen_irq_set: hdmi 4\n"); + afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK; + } + if (rdev->irq.afmt[5]) { + DRM_DEBUG("evergreen_irq_set: hdmi 5\n"); + afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK; + } if (rdev->irq.gui_idle) { DRM_DEBUG("gui idle\n"); grbm_int_cntl |= GUI_IDLE_INT_ENABLE; @@ -2732,6 +2764,13 @@ int evergreen_irq_set(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, hpd5); WREG32(DC_HPD6_INT_CONTROL, hpd6); + WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1); + WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2); + WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3); + WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4); + WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); + WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); + return 0; } @@ -2756,6 +2795,13 @@ static void evergreen_irq_ack(struct radeon_device *rdev) rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET); } + rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET); + if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED) WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED) @@ -2829,6 +2875,36 @@ static void evergreen_irq_ack(struct radeon_device *rdev) tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } + if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) { + tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); + tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; + WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp); + } + if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) { + tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); + tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; + WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp); + } + if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) { + tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); + tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; + WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp); + } + if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) { + tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); + tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; + WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp); + } + if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) { + tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); + tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; + WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp); + } + if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) { + tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; + WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp); + } } void evergreen_irq_disable(struct radeon_device *rdev) @@ -2878,6 +2954,7 @@ int evergreen_irq_process(struct radeon_device *rdev) u32 ring_index; unsigned long flags; bool queue_hotplug = false; + bool queue_hdmi = false; if (!rdev->ih.enabled || rdev->shutdown) return IRQ_NONE; @@ -3111,6 +3188,55 @@ restart_ih: break; } break; + case 44: /* hdmi */ + switch (src_data) { + case 0: + if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) { + rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI0\n"); + } + break; + case 1: + if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) { + rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI1\n"); + } + break; + case 2: + if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) { + rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI2\n"); + } + break; + case 3: + if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) { + rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI3\n"); + } + break; + case 4: + if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) { + rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI4\n"); + } + break; + case 5: + if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) { + rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI5\n"); + } + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; case 176: /* CP_INT in ring buffer */ case 177: /* CP_INT in IB1 */ case 178: /* CP_INT in IB2 */ @@ -3154,6 +3280,8 @@ restart_ih: goto restart_ih; if (queue_hotplug) schedule_work(&rdev->hotplug_work); + if (queue_hdmi) + schedule_work(&rdev->audio_work); rdev->ih.rptr = rptr; WREG32(IH_RB_RPTR, rdev->ih.rptr); spin_unlock_irqrestore(&rdev->ih.lock, flags); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 4added1c7ee9..ba637d95965b 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2968,6 +2968,15 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, tmp); tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; WREG32(DC_HPD6_INT_CONTROL, tmp); + tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; + WREG32(AFMT_AUDIO_PACKET_CONTROL + HDMI_OFFSET0, tmp); + tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; + WREG32(AFMT_AUDIO_PACKET_CONTROL + HDMI_OFFSET1, tmp); + } else { + tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; + WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); + tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; + WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp); } } else { WREG32(DACA_AUTODETECT_INT_CONTROL, 0); @@ -2978,6 +2987,10 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev) WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); + tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; + WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); + tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; + WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp); } } @@ -3074,7 +3087,7 @@ int r600_irq_set(struct radeon_device *rdev) u32 mode_int = 0; u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 grbm_int_cntl = 0; - u32 hdmi1, hdmi2; + u32 hdmi0, hdmi1; u32 d1grph = 0, d2grph = 0; if (!rdev->irq.installed) { @@ -3089,9 +3102,7 @@ int r600_irq_set(struct radeon_device *rdev) return 0; } - hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN; if (ASIC_IS_DCE3(rdev)) { - hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN; hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; @@ -3099,12 +3110,18 @@ int r600_irq_set(struct radeon_device *rdev) if (ASIC_IS_DCE32(rdev)) { hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK; + hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK; + } else { + hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; + hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; } } else { - hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN; hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; + hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; + hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; } if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { @@ -3146,13 +3163,13 @@ int r600_irq_set(struct radeon_device *rdev) DRM_DEBUG("r600_irq_set: hpd 6\n"); hpd6 |= DC_HPDx_INT_EN; } - if (rdev->irq.hdmi[0]) { - DRM_DEBUG("r600_irq_set: hdmi 1\n"); - hdmi1 |= R600_HDMI_INT_EN; + if (rdev->irq.afmt[0]) { + DRM_DEBUG("r600_irq_set: hdmi 0\n"); + hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK; } - if (rdev->irq.hdmi[1]) { - DRM_DEBUG("r600_irq_set: hdmi 2\n"); - hdmi2 |= R600_HDMI_INT_EN; + if (rdev->irq.afmt[1]) { + DRM_DEBUG("r600_irq_set: hdmi 0\n"); + hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK; } if (rdev->irq.gui_idle) { DRM_DEBUG("gui idle\n"); @@ -3164,9 +3181,7 @@ int r600_irq_set(struct radeon_device *rdev) WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); WREG32(GRBM_INT_CNTL, grbm_int_cntl); - WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); if (ASIC_IS_DCE3(rdev)) { - WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2); WREG32(DC_HPD1_INT_CONTROL, hpd1); WREG32(DC_HPD2_INT_CONTROL, hpd2); WREG32(DC_HPD3_INT_CONTROL, hpd3); @@ -3174,12 +3189,18 @@ int r600_irq_set(struct radeon_device *rdev) if (ASIC_IS_DCE32(rdev)) { WREG32(DC_HPD5_INT_CONTROL, hpd5); WREG32(DC_HPD6_INT_CONTROL, hpd6); + WREG32(AFMT_AUDIO_PACKET_CONTROL + HDMI_OFFSET0, hdmi0); + WREG32(AFMT_AUDIO_PACKET_CONTROL + HDMI_OFFSET1, hdmi1); + } else { + WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0); + WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1); } } else { - WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2); WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3); + WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0); + WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1); } return 0; @@ -3193,10 +3214,19 @@ static void r600_irq_ack(struct radeon_device *rdev) rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); + if (ASIC_IS_DCE32(rdev)) { + rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + HDMI_OFFSET0); + rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + HDMI_OFFSET1); + } else { + rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS); + rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS); + } } else { rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS); rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); rdev->irq.stat_regs.r600.disp_int_cont2 = 0; + rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS); + rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS); } rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS); rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS); @@ -3262,17 +3292,32 @@ static void r600_irq_ack(struct radeon_device *rdev) tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } - } - if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { - WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); - } - if (ASIC_IS_DCE3(rdev)) { - if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { - WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); + if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) { + tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + HDMI_OFFSET0); + tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; + WREG32(AFMT_AUDIO_PACKET_CONTROL + HDMI_OFFSET0, tmp); + } + if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) { + tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + HDMI_OFFSET1); + tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; + WREG32(AFMT_AUDIO_PACKET_CONTROL + HDMI_OFFSET1, tmp); } } else { - if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { - WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); + if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { + tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL); + tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; + WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); + } + if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { + if (ASIC_IS_DCE3(rdev)) { + tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL); + tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; + WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp); + } else { + tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL); + tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; + WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp); + } } } } @@ -3348,6 +3393,7 @@ int r600_irq_process(struct radeon_device *rdev) u32 ring_index; unsigned long flags; bool queue_hotplug = false; + bool queue_hdmi = false; if (!rdev->ih.enabled || rdev->shutdown) return IRQ_NONE; @@ -3483,9 +3529,26 @@ restart_ih: break; } break; - case 21: /* HDMI */ - DRM_DEBUG("IH: HDMI: 0x%x\n", src_data); - r600_audio_schedule_polling(rdev); + case 21: /* hdmi */ + switch (src_data) { + case 4: + if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { + rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI0\n"); + } + break; + case 5: + if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { + rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI1\n"); + } + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } break; case 176: /* CP_INT in ring buffer */ case 177: /* CP_INT in IB1 */ @@ -3517,6 +3580,8 @@ restart_ih: goto restart_ih; if (queue_hotplug) schedule_work(&rdev->hotplug_work); + if (queue_hdmi) + schedule_work(&rdev->audio_work); rdev->ih.rptr = rptr; WREG32(IH_RB_RPTR, rdev->ih.rptr); spin_unlock_irqrestore(&rdev->ih.lock, flags); diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index ba66f3093d46..a7c06c330fe1 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c @@ -29,8 +29,6 @@ #include "radeon_asic.h" #include "atom.h" -#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */ - /* * check if the chipset is supported */ @@ -105,21 +103,13 @@ uint8_t r600_audio_category_code(struct radeon_device *rdev) return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff; } -/* - * schedule next audio update event - */ -void r600_audio_schedule_polling(struct radeon_device *rdev) -{ - mod_timer(&rdev->audio_timer, - jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL)); -} - /* * update all hdmi interfaces with current audio parameters */ -static void r600_audio_update_hdmi(unsigned long param) +void r600_audio_update_hdmi(struct work_struct *work) { - struct radeon_device *rdev = (struct radeon_device *)param; + struct radeon_device *rdev = container_of(work, struct radeon_device, + audio_work); struct drm_device *dev = rdev->ddev; int channels = r600_audio_channels(rdev); @@ -127,9 +117,8 @@ static void r600_audio_update_hdmi(unsigned long param) int bps = r600_audio_bits_per_sample(rdev); uint8_t status_bits = r600_audio_status_bits(rdev); uint8_t category_code = r600_audio_category_code(rdev); - struct drm_encoder *encoder; - int changes = 0, still_going = 0; + int changes = 0; changes |= channels != rdev->audio_channels; changes |= rate != rdev->audio_rate; @@ -146,14 +135,9 @@ static void r600_audio_update_hdmi(unsigned long param) } list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - still_going |= radeon_encoder->audio_polling_active; if (changes || r600_hdmi_buffer_status_changed(encoder)) r600_hdmi_update_audio_settings(encoder); } - - if (still_going) - r600_audio_schedule_polling(rdev); } /* @@ -177,7 +161,7 @@ static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable) } /* - * initialize the audio vars and register the update timer + * initialize the audio vars */ int r600_audio_init(struct radeon_device *rdev) { @@ -192,44 +176,9 @@ int r600_audio_init(struct radeon_device *rdev) rdev->audio_status_bits = 0; rdev->audio_category_code = 0; - setup_timer( - &rdev->audio_timer, - r600_audio_update_hdmi, - (unsigned long)rdev); - return 0; } -/* - * enable the polling timer, to check for status changes - */ -void r600_audio_enable_polling(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - - DRM_DEBUG("r600_audio_enable_polling: %d\n", - radeon_encoder->audio_polling_active); - if (radeon_encoder->audio_polling_active) - return; - - radeon_encoder->audio_polling_active = 1; - if (rdev->audio_enabled) - mod_timer(&rdev->audio_timer, jiffies + 1); -} - -/* - * disable the polling timer, so we get no more status updates - */ -void r600_audio_disable_polling(struct drm_encoder *encoder) -{ - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - DRM_DEBUG("r600_audio_disable_polling: %d\n", - radeon_encoder->audio_polling_active); - radeon_encoder->audio_polling_active = 0; -} - /* * atach the audio codec to the clock source of the encoder */ @@ -297,7 +246,5 @@ void r600_audio_fini(struct radeon_device *rdev) if (!rdev->audio_enabled) return; - del_timer(&rdev->audio_timer); - r600_audio_engine_enable(rdev, false); } diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 0b5920671450..37ac1b06753e 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -548,19 +548,10 @@ void r600_hdmi_enable(struct drm_encoder *encoder) } } - if (rdev->irq.installed - && rdev->family != CHIP_RS600 - && rdev->family != CHIP_RS690 - && rdev->family != CHIP_RS740 - && !ASIC_IS_DCE4(rdev)) { + if (rdev->irq.installed) { /* if irq is available use it */ - rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true; + rdev->irq.afmt[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true; radeon_irq_set(rdev); - - r600_audio_disable_polling(encoder); - } else { - /* if not fallback to polling */ - r600_audio_enable_polling(encoder); } DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n", @@ -590,11 +581,9 @@ void r600_hdmi_disable(struct drm_encoder *encoder) offset, radeon_encoder->encoder_id); /* disable irq */ - rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false; + rdev->irq.afmt[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false; radeon_irq_set(rdev); - /* disable polling */ - r600_audio_disable_polling(encoder); if (ASIC_IS_DCE5(rdev)) { /* TODO */ diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 138b95216d8d..566ca3b3c873 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -560,6 +560,7 @@ struct radeon_unpin_work { struct r500_irq_stat_regs { u32 disp_int; + u32 hdmi0_status; }; struct r600_irq_stat_regs { @@ -568,6 +569,8 @@ struct r600_irq_stat_regs { u32 disp_int_cont2; u32 d1grph_int; u32 d2grph_int; + u32 hdmi0_status; + u32 hdmi1_status; }; struct evergreen_irq_stat_regs { @@ -583,6 +586,12 @@ struct evergreen_irq_stat_regs { u32 d4grph_int; u32 d5grph_int; u32 d6grph_int; + u32 afmt_status1; + u32 afmt_status2; + u32 afmt_status3; + u32 afmt_status4; + u32 afmt_status5; + u32 afmt_status6; }; union radeon_irq_stat_regs { @@ -593,7 +602,7 @@ union radeon_irq_stat_regs { #define RADEON_MAX_HPD_PINS 6 #define RADEON_MAX_CRTCS 6 -#define RADEON_MAX_HDMI_BLOCKS 2 +#define RADEON_MAX_AFMT_BLOCKS 6 struct radeon_irq { bool installed; @@ -605,7 +614,7 @@ struct radeon_irq { bool gui_idle; bool gui_idle_acked; wait_queue_head_t idle_queue; - bool hdmi[RADEON_MAX_HDMI_BLOCKS]; + bool afmt[RADEON_MAX_AFMT_BLOCKS]; spinlock_t sw_lock; int sw_refcount[RADEON_NUM_RINGS]; union radeon_irq_stat_regs stat_regs; @@ -1546,13 +1555,13 @@ struct radeon_device { struct r600_ih ih; /* r6/700 interrupt ring */ struct si_rlc rlc; struct work_struct hotplug_work; + struct work_struct audio_work; int num_crtc; /* number of crtcs */ struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ struct mutex vram_mutex; /* audio stuff */ bool audio_enabled; - struct timer_list audio_timer; int audio_channels; int audio_rate; int audio_bits_per_sample; @@ -1828,6 +1837,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo); +/* audio */ +void r600_audio_update_hdmi(struct work_struct *work); /* * R600 vram scratch functions diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 3d9f9f1d8f90..b135bec649d1 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -369,9 +369,6 @@ int r600_audio_bits_per_sample(struct radeon_device *rdev); int r600_audio_rate(struct radeon_device *rdev); uint8_t r600_audio_status_bits(struct radeon_device *rdev); uint8_t r600_audio_category_code(struct radeon_device *rdev); -void r600_audio_schedule_polling(struct radeon_device *rdev); -void r600_audio_enable_polling(struct drm_encoder *encoder); -void r600_audio_disable_polling(struct drm_encoder *encoder); void r600_audio_fini(struct radeon_device *rdev); void r600_hdmi_init(struct drm_encoder *encoder); int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 66d5fe1c8174..170f1718d92a 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -73,6 +73,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) for (i = 0; i < RADEON_MAX_CRTCS; i++) { rdev->irq.crtc_vblank_int[i] = false; rdev->irq.pflip[i] = false; + rdev->irq.afmt[i] = false; } radeon_irq_set(rdev); /* Clear bits */ @@ -108,6 +109,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) for (i = 0; i < RADEON_MAX_CRTCS; i++) { rdev->irq.crtc_vblank_int[i] = false; rdev->irq.pflip[i] = false; + rdev->irq.afmt[i] = false; } radeon_irq_set(rdev); } @@ -164,6 +166,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) int r = 0; INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); + INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); spin_lock_init(&rdev->irq.sw_lock); for (i = 0; i < rdev->num_crtc; i++) diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index d25cf869d08d..10706c66b84b 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -553,6 +553,12 @@ int rs600_irq_set(struct radeon_device *rdev) ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); + u32 hdmi0; + if (ASIC_IS_DCE2(rdev)) + hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) & + ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1); + else + hdmi0 = 0; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -579,10 +585,15 @@ int rs600_irq_set(struct radeon_device *rdev) if (rdev->irq.hpd[1]) { hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); } + if (rdev->irq.afmt[0]) { + hdmi0 |= S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1); + } WREG32(R_000040_GEN_INT_CNTL, tmp); WREG32(R_006540_DxMODE_INT_MASK, mode_int); WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); + if (ASIC_IS_DCE2(rdev)) + WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); return 0; } @@ -622,6 +633,17 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev) rdev->irq.stat_regs.r500.disp_int = 0; } + if (ASIC_IS_DCE2(rdev)) { + rdev->irq.stat_regs.r500.hdmi0_status = RREG32(R_007404_HDMI0_STATUS) & + S_007404_HDMI0_AZ_FORMAT_WTRIG(1); + if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) { + tmp = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL); + tmp |= S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(1); + WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, tmp); + } + } else + rdev->irq.stat_regs.r500.hdmi0_status = 0; + if (irqs) { WREG32(R_000044_GEN_INT_STATUS, irqs); } @@ -630,6 +652,9 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev) void rs600_irq_disable(struct radeon_device *rdev) { + u32 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) & + ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1); + WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); WREG32(R_000040_GEN_INT_CNTL, 0); WREG32(R_006540_DxMODE_INT_MASK, 0); /* Wait and acknowledge irq */ @@ -641,15 +666,20 @@ int rs600_irq_process(struct radeon_device *rdev) { u32 status, msi_rearm; bool queue_hotplug = false; + bool queue_hdmi = false; /* reset gui idle ack. the status bit is broken */ rdev->irq.gui_idle_acked = false; status = rs600_irq_ack(rdev); - if (!status && !rdev->irq.stat_regs.r500.disp_int) { + if (!status && + !rdev->irq.stat_regs.r500.disp_int && + !rdev->irq.stat_regs.r500.hdmi0_status) { return IRQ_NONE; } - while (status || rdev->irq.stat_regs.r500.disp_int) { + while (status || + rdev->irq.stat_regs.r500.disp_int || + rdev->irq.stat_regs.r500.hdmi0_status) { /* SW interrupt */ if (G_000044_SW_INT(status)) { radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); @@ -687,12 +717,18 @@ int rs600_irq_process(struct radeon_device *rdev) queue_hotplug = true; DRM_DEBUG("HPD2\n"); } + if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) { + queue_hdmi = true; + DRM_DEBUG("HDMI0\n"); + } status = rs600_irq_ack(rdev); } /* reset gui idle ack. the status bit is broken */ rdev->irq.gui_idle_acked = false; if (queue_hotplug) schedule_work(&rdev->hotplug_work); + if (queue_hdmi) + schedule_work(&rdev->audio_work); if (rdev->msi_enabled) { switch (rdev->family) { case CHIP_RS600: -- cgit v1.2.3-59-g8ed1b From 1a39b310e920bb7098067d96411b31e459ae8f32 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Mon, 16 Apr 2012 16:26:02 -0400 Subject: vgaarb: Add support for setting the default video device (v2) The default VGA device is a somewhat fluid concept on platforms with multiple GPUs. Add support for setting it so switching code can update things appropriately, and make sure that the sysfs code returns the right device if it's changed. v2: Updated to fix builds when __ARCH_HAS_VGA_DEFAULT_DEVICE is false. Signed-off-by: Matthew Garrett Acked-by: H. Peter Anvin Acked-by: benh@kernel.crashing.org Cc: airlied@redhat.com Signed-off-by: Dave Airlie --- drivers/gpu/vga/vgaarb.c | 7 +++++++ drivers/pci/pci-sysfs.c | 5 +++++ include/linux/vgaarb.h | 2 ++ 3 files changed, 14 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 111d956d8e7d..e223b96fa6a0 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -136,6 +136,11 @@ struct pci_dev *vga_default_device(void) { return vga_default; } + +void vga_set_default_device(struct pci_dev *pdev) +{ + vga_default = pdev; +} #endif static inline void vga_irq_set_state(struct vga_device *vgadev, bool state) @@ -605,10 +610,12 @@ static bool vga_arbiter_del_pci_device(struct pci_dev *pdev) goto bail; } +#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE if (vga_default == pdev) { pci_dev_put(vga_default); vga_default = NULL; } +#endif if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) vga_decode_count--; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index a55e248618cd..86c63fe45d11 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "pci.h" static int sysfs_initialized; /* = 0 */ @@ -417,6 +418,10 @@ static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); + struct pci_dev *vga_dev = vga_default_device(); + + if (vga_dev) + return sprintf(buf, "%u\n", (pdev == vga_dev)); return sprintf(buf, "%u\n", !!(pdev->resource[PCI_ROM_RESOURCE].flags & diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index 9c3120dca294..759a25ba0539 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -31,6 +31,7 @@ #ifndef LINUX_VGA_H #define LINUX_VGA_H +#include