aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/drm_context.c73
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_fops.c21
-rw-r--r--drivers/gpu/drm/drm_stub.c10
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/gma500/gtt.c1
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c11
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c164
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c137
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c41
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c91
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h40
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c107
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c15
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c15
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c29
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c99
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h6
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c80
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c7
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c8
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c58
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c41
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c24
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c21
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c23
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c63
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c32
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c39
-rw-r--r--drivers/gpu/drm/radeon/cik.c59
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c12
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h4
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c164
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c8
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c30
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h2
-rw-r--r--drivers/gpu/drm/radeon/r100.c15
-rw-r--r--drivers/gpu/drm/radeon/r420.c7
-rw-r--r--drivers/gpu/drm/radeon/r600.c19
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c40
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c41
-rw-r--r--drivers/gpu/drm/radeon/r600d.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h82
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c69
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c92
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h27
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c6
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c12
-rw-r--r--drivers/gpu/drm/radeon/rs690.c7
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c112
-rw-r--r--drivers/gpu/drm/radeon/rv515.c8
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c16
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.c44
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.h2
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h2
-rw-r--r--drivers/gpu/drm/radeon/si.c31
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c36
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c43
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c17
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/trinity_smc.c8
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c44
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c51
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c1
-rw-r--r--drivers/gpu/vga/vgaarb.c51
114 files changed, 1875 insertions, 954 deletions
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 796dbb212a41..8492b68e873c 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -177,7 +177,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
static inline void ast_open_key(struct ast_private *ast)
{
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
}
#define AST_VIDMEM_SIZE_8M 0x00800000
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index b4fb86d89850..224ff965bcf7 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -42,6 +42,10 @@
#include <drm/drmP.h>
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
/**
* Free a handle from the context bitmap.
*
@@ -52,48 +56,13 @@
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
-static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
mutex_lock(&dev->struct_mutex);
idr_remove(&dev->ctx_idr, ctx_handle);
mutex_unlock(&dev->struct_mutex);
}
-/******************************************************************/
-/** \name Context bitmap support */
-/*@{*/
-
-void drm_legacy_ctxbitmap_release(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->tag == file_priv &&
- pos->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev,
- pos->handle);
-
- drm_ctxbitmap_free(dev, pos->handle);
-
- list_del(&pos->head);
- kfree(pos);
- --dev->ctx_count;
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
-}
-
/**
* Context bitmap allocation.
*
@@ -121,12 +90,10 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
*
* Initialise the drm_device::ctx_idr
*/
-void drm_legacy_ctxbitmap_init(struct drm_device * dev)
+int drm_ctxbitmap_init(struct drm_device * dev)
{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
idr_init(&dev->ctx_idr);
+ return 0;
}
/**
@@ -137,7 +104,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
* Free all idr members using drm_ctx_sarea_free helper function
* while holding the drm_device::struct_mutex lock.
*/
-void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
+void drm_ctxbitmap_cleanup(struct drm_device * dev)
{
mutex_lock(&dev->struct_mutex);
idr_destroy(&dev->ctx_idr);
@@ -169,9 +136,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map;
struct drm_map_list *_entry;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
mutex_lock(&dev->struct_mutex);
map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -216,9 +180,6 @@ int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map = NULL;
struct drm_map_list *r_list = NULL;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
@@ -319,9 +280,6 @@ int drm_resctx(struct drm_device *dev, void *data,
struct drm_ctx ctx;
int i;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
if (res->count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -352,9 +310,6 @@ int drm_addctx(struct drm_device *dev, void *data,
struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
ctx->handle = drm_ctxbitmap_next(dev);
if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
@@ -398,9 +353,6 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
/* This is 0, because we don't handle any context flags */
ctx->flags = 0;
@@ -423,9 +375,6 @@ int drm_switchctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
DRM_DEBUG("%d\n", ctx->handle);
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
@@ -446,9 +395,6 @@ int drm_newctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, file_priv, ctx->handle);
@@ -471,9 +417,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 1688ff500513..830f7501cb4d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2925,6 +2925,8 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
/* Speaker Allocation Data Block */
if (dbl == 3) {
*sadb = kmalloc(dbl, GFP_KERNEL);
+ if (!*sadb)
+ return -ENOMEM;
memcpy(*sadb, &db[1], dbl);
count = dbl;
break;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 4be8e09a32ef..3f84277d7036 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -439,7 +439,26 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
- drm_legacy_ctxbitmap_release(dev, file_priv);
+ mutex_lock(&dev->ctxlist_mutex);
+ if (!list_empty(&dev->ctxlist)) {
+ struct drm_ctx_list *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+ if (pos->tag == file_priv &&
+ pos->handle != DRM_KERNEL_CONTEXT) {
+ if (dev->driver->context_dtor)
+ dev->driver->context_dtor(dev,
+ pos->handle);
+
+ drm_ctxbitmap_free(dev, pos->handle);
+
+ list_del(&pos->head);
+ kfree(pos);
+ --dev->ctx_count;
+ }
+ }
+ }
+ mutex_unlock(&dev->ctxlist_mutex);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index e7eb0276f7f1..39d864576be4 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -292,7 +292,13 @@ int drm_fill_in_dev(struct drm_device *dev,
goto error_out_unreg;
}
- drm_legacy_ctxbitmap_init(dev);
+
+
+ retcode = drm_ctxbitmap_init(dev);
+ if (retcode) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ goto error_out_unreg;
+ }
if (driver->driver_features & DRIVER_GEM) {
retcode = drm_gem_init(dev);
@@ -446,7 +452,7 @@ void drm_put_dev(struct drm_device *dev)
drm_rmmap(dev, r_list->map);
drm_ht_remove(&dev->map_hash);
- drm_legacy_ctxbitmap_cleanup(dev);
+ drm_ctxbitmap_cleanup(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 4752f223e5b2..45b6ef595965 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -56,7 +56,7 @@ config DRM_EXYNOS_IPP
config DRM_EXYNOS_FIMC
bool "Exynos DRM FIMC"
- depends on DRM_EXYNOS_IPP && MFD_SYSCON && OF
+ depends on DRM_EXYNOS_IPP && MFD_SYSCON
help
Choose this option if you want to use Exynos FIMC for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 3445a0f3a6b2..9c8088462c26 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -63,7 +63,8 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
return -ENOMEM;
}
- buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
+ buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev,
+ buf->size,
&buf->dma_addr, GFP_KERNEL,
&buf->dma_attrs);
if (!buf->kvaddr) {
@@ -90,9 +91,9 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
}
buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
- if (!buf->sgt) {
+ if (IS_ERR(buf->sgt)) {
DRM_ERROR("failed to get sg table.\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(buf->sgt);
goto err_free_attrs;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 78e868bcf1ec..e7c2f2d07f19 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -99,12 +99,13 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
if (is_drm_iommu_supported(dev)) {
unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
- buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
+ buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
+ nr_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
} else {
phys_addr_t dma_addr = buffer->dma_addr;
if (dma_addr)
- buffer->kvaddr = phys_to_virt(dma_addr);
+ buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
else
buffer->kvaddr = (void __iomem *)NULL;
}
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 92babac362ec..2db731f00930 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -204,6 +204,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
if (IS_ERR(pages))
return PTR_ERR(pages);
+ gt->npage = gt->gem.size / PAGE_SIZE;
gt->pages = pages;
return 0;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index b1f8fc69023f..60e84043aa34 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -707,8 +707,7 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
break;
case DRM_MODE_DPMS_OFF:
- /* disable audio and video ports */
- reg_write(encoder, REG_ENA_AP, 0x00);
+ /* disable video ports */
reg_write(encoder, REG_ENA_VP_0, 0x00);
reg_write(encoder, REG_ENA_VP_1, 0x00);
reg_write(encoder, REG_ENA_VP_2, 0x00);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 55ab9246e1b9..a6f4cb5af185 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -857,7 +857,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 rpstat, cagf;
+ u32 rpstat, cagf, reqf;
u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown;
int max_freq;
@@ -869,6 +869,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gen6_gt_force_wake_get(dev_priv);
+ reqf = I915_READ(GEN6_RPNSWREQ);
+ reqf &= ~GEN6_TURBO_DISABLE;
+ if (IS_HASWELL(dev))
+ reqf >>= 24;
+ else
+ reqf >>= 25;
+ reqf *= GT_FREQUENCY_MULTIPLIER;
+
rpstat = I915_READ(GEN6_RPSTAT1);
rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
rpcurup = I915_READ(GEN6_RP_CUR_UP);
@@ -893,6 +901,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
+ seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index fdaa0915ce56..d5c784d48671 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1667,7 +1667,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
out_gem_unload:
- if (dev_priv->mm.inactive_shrinker.shrink)
+ if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
if (dev->pdev->msi_enabled)
@@ -1706,7 +1706,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_teardown_sysfs(dev);
- if (dev_priv->mm.inactive_shrinker.shrink)
+ if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ccb28ead3501..69d8ed5416c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -157,25 +157,6 @@ MODULE_PARM_DESC(prefault_disable,
static struct drm_driver driver;
extern int intel_agp_enabled;
-#define INTEL_VGA_DEVICE(id, info) { \
- .class = PCI_BASE_CLASS_DISPLAY << 16, \
- .class_mask = 0xff0000, \
- .vendor = 0x8086, \
- .device = id, \
- .subvendor = PCI_ANY_ID, \
- .subdevice = PCI_ANY_ID, \
- .driver_data = (unsigned long) info }
-
-#define INTEL_QUANTA_VGA_DEVICE(info) { \
- .class = PCI_BASE_CLASS_DISPLAY << 16, \
- .class_mask = 0xff0000, \
- .vendor = 0x8086, \
- .device = 0x16a, \
- .subvendor = 0x152d, \
- .subdevice = 0x8990, \
- .driver_data = (unsigned long) info }
-
-
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
@@ -350,118 +331,41 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_vebox_ring = 1,
};
+/*
+ * Make sure any device matches here are from most specific to most
+ * general. For example, since the Quanta match is based on the subsystem
+ * and subvendor IDs, we need it to come before the more general IVB
+ * PCI ID matches, otherwise we'll use the wrong info struct above.
+ */
+#define INTEL_PCI_IDS \
+ INTEL_I830_IDS(&intel_i830_info), \
+ INTEL_I845G_IDS(&intel_845g_info), \
+ INTEL_I85X_IDS(&intel_i85x_info), \
+ INTEL_I865G_IDS(&intel_i865g_info), \
+ INTEL_I915G_IDS(&intel_i915g_info), \
+ INTEL_I915GM_IDS(&intel_i915gm_info), \
+ INTEL_I945G_IDS(&intel_i945g_info), \
+ INTEL_I945GM_IDS(&intel_i945gm_info), \
+ INTEL_I965G_IDS(&intel_i965g_info), \
+ INTEL_G33_IDS(&intel_g33_info), \
+ INTEL_I965GM_IDS(&intel_i965gm_info), \
+ INTEL_GM45_IDS(&intel_gm45_info), \
+ INTEL_G45_IDS(&intel_g45_info), \
+ INTEL_PINEVIEW_IDS(&intel_pineview_info), \
+ INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
+ INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
+ INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
+ INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
+ INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
+ INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
+ INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
+ INTEL_HSW_D_IDS(&intel_haswell_d_info), \
+ INTEL_HSW_M_IDS(&intel_haswell_m_info), \
+ INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
+ INTEL_VLV_D_IDS(&intel_valleyview_d_info)
+
static const struct pci_device_id pciidlist[] = { /* aka */
- INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */
- INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
- INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */
- INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
- INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */
- INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
- INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
- INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
- INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
- INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
- INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
- INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
- INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
- INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
- INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
- INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
- INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
- INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
- INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
- INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
- INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
- INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
- INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
- INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
- INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
- INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
- INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
- INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
- INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
- INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
- INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
- INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
- INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
- INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
- INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
- INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
- INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
- INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
- INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
- INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
- INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
- INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
- INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
- INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
- INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
- INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
- INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
- INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
- INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
- INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
- INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
- INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
- INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
- INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
- INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
- INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
- INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
- INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
- INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
- INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
- INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
- INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
- INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
- INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
- INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
- INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
- INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
- INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
- INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
- INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
- INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
- INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
- INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
- INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
- INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
- INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
- INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
- INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
- INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
- INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
- INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
- INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
- INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
- INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
- INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
- INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
- INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
- INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
- INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
- INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
- INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
- INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
- INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
- INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
- INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
- INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
- INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
- INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
- INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
+ INTEL_PCI_IDS,
{0, 0, 0}
};
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 52a3785a3fdf..35874b3a86dc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1236,6 +1236,13 @@ typedef struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
+ /**
+ * wq - Driver workqueue for GEM.
+ *
+ * NOTE: Work items scheduled here are not allowed to grab any modeset
+ * locks, for otherwise the flushing done in the pageflip code will
+ * result in deadlocks.
+ */
struct workqueue_struct *wq;
/* Display functions */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2d1cb10d846f..cdfb9da0e4ce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -57,10 +57,12 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
-static int i915_gem_inactive_shrink(struct shrinker *shrinker,
- struct shrink_control *sc);
+static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
+ struct shrink_control *sc);
+static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
+ struct shrink_control *sc);
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
-static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -212,7 +214,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
void *i915_gem_object_alloc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
+ return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
@@ -1390,14 +1392,11 @@ out:
if (i915_terminally_wedged(&dev_priv->gpu_error))
return VM_FAULT_SIGBUS;
case -EAGAIN:
- /* Give the error handler a chance to run and move the
- * objects off the GPU active list. Next time we service the
- * fault, we should be able to transition the page into the
- * GTT without touching the GPU (and so avoid further
- * EIO/EGAIN). If the GPU is wedged, then there is no issue
- * with coherency, just lost writes.
+ /*
+ * EAGAIN means the gpu is hung and we'll wait for the error
+ * handler to reset everything when re-faulting in
+ * i915_mutex_lock_interruptible.
*/
- set_need_resched();
case 0:
case -ERESTARTSYS:
case -EINTR:
@@ -1695,6 +1694,7 @@ static long
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
bool purgeable_only)
{
+ struct list_head still_bound_list;
struct drm_i915_gem_object *obj, *next;
long count = 0;
@@ -1709,23 +1709,55 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
}
}
- list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
- global_list) {
+ /*
+ * As we may completely rewrite the bound list whilst unbinding
+ * (due to retiring requests) we have to strictly process only
+ * one element of the list at the time, and recheck the list
+ * on every iteration.
+ */
+ INIT_LIST_HEAD(&still_bound_list);
+ while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
struct i915_vma *vma, *v;
+ obj = list_first_entry(&dev_priv->mm.bound_list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_bound_list);
+
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
continue;
+ /*
+ * Hold a reference whilst we unbind this object, as we may
+ * end up waiting for and retiring requests. This might
+ * release the final reference (held by the active list)
+ * and result in the object being freed from under us.
+ * in this object being freed.
+ *
+ * Note 1: Shrinking the bound list is special since only active
+ * (and hence bound objects) can contain such limbo objects, so
+ * we don't need special tricks for shrinking the unbound list.
+ * The only other place where we have to be careful with active
+ * objects suddenly disappearing due to retiring requests is the
+ * eviction code.
+ *
+ * Note 2: Even though the bound list doesn't hold a reference
+ * to the object we can safely grab one here: The final object
+ * unreferencing and the bound_list are both protected by the
+ * dev->struct_mutex and so we won't ever be able to observe an
+ * object on the bound_list with a reference count equals 0.
+ */
+ drm_gem_object_reference(&obj->base);
+
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
if (i915_vma_unbind(vma))
break;
- if (!i915_gem_object_put_pages(obj)) {
+ if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT;
- if (count >= target)
- return count;
- }
+
+ drm_gem_object_unreference(&obj->base);
}
+ list_splice(&still_bound_list, &dev_priv->mm.bound_list);
return count;
}
@@ -1736,16 +1768,21 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
return __i915_gem_shrink(dev_priv, target, true);
}
-static void
+static long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj, *next;
+ long freed = 0;
i915_gem_evict_everything(dev_priv->dev);
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
- global_list)
+ global_list) {
+ if (obj->pages_pin_count == 0)
+ freed += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put_pages(obj);
+ }
+ return freed;
}
static int
@@ -1774,7 +1811,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
page_count = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
- sg_free_table(st);
kfree(st);
return -ENOMEM;
}
@@ -4526,7 +4562,8 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.interruptible = true;
- dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
+ dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
+ dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);
}
@@ -4749,8 +4786,8 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
-static int
-i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+static unsigned long
+i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker,
@@ -4758,9 +4795,8 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
- int nr_to_scan = sc->nr_to_scan;
bool unlock = true;
- int cnt;
+ unsigned long count;
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
@@ -4772,31 +4808,22 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
unlock = false;
}
- if (nr_to_scan) {
- nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
- if (nr_to_scan > 0)
- nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
- false);
- if (nr_to_scan > 0)
- i915_gem_shrink_all(dev_priv);
- }
-
- cnt = 0;
+ count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
- cnt += obj->base.size >> PAGE_SHIFT;
+ count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->active)
continue;
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
- cnt += obj->base.size >> PAGE_SHIFT;
+ count += obj->base.size >> PAGE_SHIFT;
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
- return cnt;
+ return count;
}
/* All the new VM stuff */
@@ -4860,6 +4887,40 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
return 0;
}
+static unsigned long
+i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker,
+ struct drm_i915_private,
+ mm.inactive_shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ int nr_to_scan = sc->nr_to_scan;
+ unsigned long freed;
+ bool unlock = true;
+
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return SHRINK_STOP;
+
+ if (dev_priv->mm.shrinker_no_lock_stealing)
+ return SHRINK_STOP;
+
+ unlock = false;
+ }
+
+ freed = i915_gem_purge(dev_priv, nr_to_scan);
+ if (freed < nr_to_scan)
+ freed += __i915_gem_shrink(dev_priv, nr_to_scan,
+ false);
+ if (freed < nr_to_scan)
+ freed += i915_gem_shrink_all(dev_priv);
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+ return freed;
+}
+
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index e918b05fcbdd..7d5752fda5f1 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -42,27 +42,24 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
ret = i915_mutex_lock_interruptible(obj->base.dev);
if (ret)
- return ERR_PTR(ret);
+ goto err;
ret = i915_gem_object_get_pages(obj);
- if (ret) {
- st = ERR_PTR(ret);
- goto out;
- }
+ if (ret)
+ goto err_unlock;
+
+ i915_gem_object_pin_pages(obj);
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
- st = ERR_PTR(-ENOMEM);
- goto out;
+ ret = -ENOMEM;
+ goto err_unpin;
}
ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
- if (ret) {
- kfree(st);
- st = ERR_PTR(ret);
- goto out;
- }
+ if (ret)
+ goto err_free;
src = obj->pages->sgl;
dst = st->sgl;
@@ -73,17 +70,23 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
}
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
- sg_free_table(st);
- kfree(st);
- st = ERR_PTR(-ENOMEM);
- goto out;
+ ret =-ENOMEM;
+ goto err_free_sg;
}
- i915_gem_object_pin_pages(obj);
-
-out:
mutex_unlock(&obj->base.dev->struct_mutex);
return st;
+
+err_free_sg:
+ sg_free_table(st);
+err_free:
+ kfree(st);
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err_unlock:
+ mutex_unlock(&obj->base.dev->struct_mutex);
+err:
+ return ERR_PTR(ret);
}
static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 792c52a235ee..bf345777ae9f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -310,6 +310,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
else
ret = relocate_entry_gtt(obj, reloc);
+ if (ret)
+ return ret;
+
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 9969d10b80f5..e15a1d90037d 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -201,6 +201,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int bios_reserved = 0;
+ if (dev_priv->gtt.stolen_size == 0)
+ return 0;
+
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0)
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 558e568d5b45..dae364f0028c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -143,8 +143,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
/* Seek the first printf which is hits start position */
if (e->pos < e->start) {
- len = vsnprintf(NULL, 0, f, args);
- if (!__i915_error_seek(e, len))
+ va_list tmp;
+
+ va_copy(tmp, args);
+ if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
return;
}
@@ -641,7 +643,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (WARN_ON(ring->id != RCS))
return NULL;
- obj = ring->private;
+ obj = ring->scratch.obj;
if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return i915_error_object_create(dev_priv, obj);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a03b445ceb5f..4b91228fd9bd 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1027,8 +1027,13 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
dev_priv->display.hpd_irq_setup(dev);
spin_unlock(&dev_priv->irq_lock);
- queue_work(dev_priv->wq,
- &dev_priv->hotplug_work);
+ /*
+ * Our hotplug handler can grab modeset locks (by calling down into the
+ * fb helpers). Hence it must not be run on our own dev-priv->wq work
+ * queue for otherwise the flush_work in the pageflip code will
+ * deadlock.
+ */
+ schedule_work(&dev_priv->hotplug_work);
}
static void gmbus_irq_handler(struct drm_device *dev)
@@ -1464,6 +1469,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
return ret;
}
+static void i915_error_wake_up(struct drm_i915_private *dev_priv,
+ bool reset_completed)
+{
+ struct intel_ring_buffer *ring;
+ int i;
+
+ /*
+ * Notify all waiters for GPU completion events that reset state has
+ * been changed, and that they need to restart their wait after
+ * checking for potential errors (and bail out to drop locks if there is
+ * a gpu reset pending so that i915_error_work_func can acquire them).
+ */
+
+ /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
+ for_each_ring(ring, dev_priv, i)
+ wake_up_all(&ring->irq_queue);
+
+ /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
+ wake_up_all(&dev_priv->pending_flip_queue);
+
+ /*
+ * Signal tasks blocked in i915_gem_wait_for_error that the pending
+ * reset state is cleared.
+ */
+ if (reset_completed)
+ wake_up_all(&dev_priv->gpu_error.reset_queue);
+}
+
/**
* i915_error_work_func - do process context error handling work
* @work: work struct
@@ -1478,11 +1511,10 @@ static void i915_error_work_func(struct work_struct *work)
drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
gpu_error);
struct drm_device *dev = dev_priv->dev;
- struct intel_ring_buffer *ring;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
- int i, ret;
+ int ret;
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
@@ -1501,8 +1533,16 @@ static void i915_error_work_func(struct work_struct *work)
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
reset_event);
+ /*
+ * All state reset _must_ be completed before we update the
+ * reset counter, for otherwise waiters might miss the reset
+ * pending state and not properly drop locks, resulting in
+ * deadlocks with the reset work.
+ */
ret = i915_reset(dev);
+ intel_display_handle_reset(dev);
+
if (ret == 0) {
/*
* After all the gem state is reset, increment the reset
@@ -1523,12 +1563,11 @@ static void i915_error_work_func(struct work_struct *work)
atomic_set(&error->reset_counter, I915_WEDGED);
}
- for_each_ring(ring, dev_priv, i)
- wake_up_all(&ring->irq_queue);
-
- intel_display_handle_reset(dev);
-
- wake_up_all(&dev_priv->gpu_error.reset_queue);
+ /*
+ * Note: The wake_up also serves as a memory barrier so that
+ * waiters see the update value of the reset counter atomic_t.
+ */
+ i915_error_wake_up(dev_priv, true);
}
}
@@ -1637,8 +1676,6 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
void i915_handle_error(struct drm_device *dev, bool wedged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- int i;
i915_capture_error_state(dev);
i915_report_and_clear_eir(dev);
@@ -1648,14 +1685,28 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
&dev_priv->gpu_error.reset_counter);
/*
- * Wakeup waiting processes so that the reset work item
- * doesn't deadlock trying to grab various locks.
+ * Wakeup waiting processes so that the reset work function
+ * i915_error_work_func doesn't deadlock trying to grab various
+ * locks. By bumping the reset counter first, the woken
+ * processes will see a reset in progress and back off,
+ * releasing their locks and then wait for the reset completion.
+ * We must do this for _all_ gpu waiters that might hold locks
+ * that the reset work needs to acquire.
+ *
+ * Note: The wake_up serves as the required memory barrier to
+ * ensure that the waiters see the updated value of the reset
+ * counter atomic_t.
*/
- for_each_ring(ring, dev_priv, i)
- wake_up_all(&ring->irq_queue);
+ i915_error_wake_up(dev_priv, false);
}
- queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
+ /*
+ * Our reset work can grab modeset locks (since it needs to reset the
+ * state of outstanding pagelips). Hence it must not be run on our own
+ * dev-priv->wq work queue for otherwise the flush_work in the pageflip
+ * code will deadlock.
+ */
+ schedule_work(&dev_priv->gpu_error.work);
}
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -2027,9 +2078,9 @@ static void i915_hangcheck_elapsed(unsigned long data)
for_each_ring(ring, dev_priv, i) {
if (ring->hangcheck.score > FIRE) {
- DRM_ERROR("%s on %s\n",
- stuck[i] ? "stuck" : "no progress",
- ring->name);
+ DRM_INFO("%s on %s\n",
+ stuck[i] ? "stuck" : "no progress",
+ ring->name);
rings_hung++;
}
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b6a58f720f9a..38f96f65d87a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -33,21 +33,6 @@
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
-/*
- * The Bridge device's PCI config space has information about the
- * fb aperture size and the amount of pre-reserved memory.
- * This is all handled in the intel-gtt.ko module. i915.ko only
- * cares about the vga bit for the vga rbiter.
- */
-#define INTEL_GMCH_CTRL 0x52
-#define INTEL_GMCH_VGA_DISABLE (1 << 1)
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
-#define SNB_GMCH_GGMS_MASK 0x3
-#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
-#define SNB_GMCH_GMS_MASK 0x1f
-
-
/* PCI config space */
#define HPLLCC 0xc0 /* 855 only */
@@ -245,6 +230,7 @@
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
+#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
@@ -693,6 +679,23 @@
#define FPGA_DBG_RM_NOCLAIM (1<<31)
#define DERRMR 0x44050
+#define DERRMR_PIPEA_SCANLINE (1<<0)
+#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
+#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
+#define DERRMR_PIPEA_VBLANK (1<<3)
+#define DERRMR_PIPEA_HBLANK (1<<5)
+#define DERRMR_PIPEB_SCANLINE (1<<8)
+#define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9)
+#define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10)
+#define DERRMR_PIPEB_VBLANK (1<<11)
+#define DERRMR_PIPEB_HBLANK (1<<13)
+/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
+#define DERRMR_PIPEC_SCANLINE (1<<14)
+#define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15)
+#define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20)
+#define DERRMR_PIPEC_VBLANK (1<<21)
+#define DERRMR_PIPEC_HBLANK (1<<22)
+
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
@@ -3310,6 +3313,7 @@
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
+#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
#define CURSOR_POS_MASK 0x007FF
@@ -3877,6 +3881,9 @@
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
+#define HSW_SCRATCH1 0xb038
+#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
+
#define HSW_FUSE_STRAP 0x42014
#define HSW_CDCLK_LIMIT (1 << 24)
@@ -4724,6 +4731,9 @@
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define DOP_CLOCK_GATING_DISABLE (1<<0)
+#define HSW_ROW_CHICKEN3 0xe49c
+#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
+
#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index a777e7f3b0df..c8c4112de110 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -224,6 +224,18 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
+static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ vlv_gpu_freq(dev_priv->mem_freq,
+ dev_priv->rps.rpe_delay));
+}
+
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
@@ -366,6 +378,7 @@ static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
@@ -409,6 +422,14 @@ static const struct attribute *gen6_attrs[] = {
NULL,
};
+static const struct attribute *vlv_attrs[] = {
+ &dev_attr_gt_cur_freq_mhz.attr,
+ &dev_attr_gt_max_freq_mhz.attr,
+ &dev_attr_gt_min_freq_mhz.attr,
+ &dev_attr_vlv_rpe_freq_mhz.attr,
+ NULL,
+};
+
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -492,11 +513,13 @@ void i915_setup_sysfs(struct drm_device *dev)
DRM_ERROR("l3 parity sysfs setup failed\n");
}
- if (INTEL_INFO(dev)->gen >= 6) {
+ ret = 0;
+ if (IS_VALLEYVIEW(dev))
+ ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
+ else if (INTEL_INFO(dev)->gen >= 6)
ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
- if (ret)
- DRM_ERROR("gen6 sysfs setup failed\n");
- }
+ if (ret)
+ DRM_ERROR("RPS sysfs setup failed\n");
ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
&error_state_attr);
@@ -507,7 +530,10 @@ void i915_setup_sysfs(struct drm_device *dev)
void i915_teardown_sysfs(struct drm_device *dev)
{
sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
- sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
+ if (IS_VALLEYVIEW(dev))
+ sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
+ else
+ sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b5a3875f22c7..ea9022ef15d5 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -688,7 +688,7 @@ static void intel_crt_reset(struct drm_connector *connector)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
- if (HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 5) {
u32 adpa;
adpa = I915_READ(crt->adpa_reg);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 63aca49d11a8..63de2701b974 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -778,7 +778,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
/* Can only use the always-on power well for eDP when
* not using the panel fitter, and when not using motion
* blur mitigation (which we don't support). */
- if (intel_crtc->config.pch_pfit.size)
+ if (intel_crtc->config.pch_pfit.enabled)
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 38452d82ac7d..581fb4b2f766 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2077,8 +2077,10 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
else
dspcntr &= ~DISPPLANE_TILED;
- /* must disable */
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+ if (IS_HASWELL(dev))
+ dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
+ else
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
I915_WRITE(reg, dspcntr);
@@ -2247,7 +2249,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
I915_WRITE(PIPESRC(intel_crtc->pipe),
((crtc->mode.hdisplay - 1) << 16) |
(crtc->mode.vdisplay - 1));
- if (!intel_crtc->config.pch_pfit.size &&
+ if (!intel_crtc->config.pch_pfit.enabled &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
@@ -3201,7 +3203,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
- if (crtc->config.pch_pfit.size) {
+ if (crtc->config.pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
@@ -3426,7 +3428,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc)
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
- if (crtc->config.pch_pfit.size) {
+ if (crtc->config.pch_pfit.enabled) {
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_POS(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -3939,8 +3941,6 @@ static void intel_connector_check_state(struct intel_connector *connector)
* consider. */
void intel_connector_dpms(struct drm_connector *connector, int mode)
{
- struct intel_encoder *encoder = intel_attached_encoder(connector);
-
/* All the simple cases only support two dpms states. */
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
@@ -3951,10 +3951,8 @@ void intel_connector_dpms(struct drm_connector *connector, int mode)
connector->dpms = mode;
/* Only need to change hw state when actually enabled */
- if (encoder->base.crtc)
- intel_encoder_dpms(encoder, mode);
- else
- WARN_ON(encoder->connectors_active != false);
+ if (connector->encoder)
+ intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
intel_modeset_check_state(connector->dev);
}
@@ -4773,6 +4771,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
pipeconf = 0;
+ if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+ I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
+ pipeconf |= PIPECONF_ENABLE;
+
if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
/* Enable pixel doubling when the dot clock is > 90% of the (display)
* core speed.
@@ -4875,9 +4877,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
}
- /* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc, true);
-
if (is_lvds && dev_priv->lvds_downclock_avail) {
/*
* Ensure we match the reduced clock's P to the target clock.
@@ -5766,9 +5765,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc->config.dpll.p2 = clock.p2;
}
- /* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc, true);
-
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (intel_crtc->config.has_pch_encoder) {
fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
@@ -5857,6 +5853,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
tmp = I915_READ(PF_CTL(crtc->pipe));
if (tmp & PF_ENABLE) {
+ pipe_config->pch_pfit.enabled = true;
pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
@@ -6234,7 +6231,7 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
if (!crtc->base.enabled)
continue;
- if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.size ||
+ if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
crtc->config.cpu_transcoder != TRANSCODER_EDP)
enable = true;
}
@@ -6257,9 +6254,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
if (!intel_ddi_pll_mode_set(crtc))
return -EINVAL;
- /* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc, true);
-
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
@@ -6492,15 +6486,15 @@ static void haswell_write_eld(struct drm_connector *connector,
/* Set ELD valid state */
tmp = I915_READ(aud_cntrl_st2);
- DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
+ DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
I915_WRITE(aud_cntrl_st2, tmp);
tmp = I915_READ(aud_cntrl_st2);
- DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
+ DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
/* Enable HDMI mode */
tmp = I915_READ(aud_config);
- DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
+ DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
/* clear N_programing_enable and N_value_index */
tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
I915_WRITE(aud_config, tmp);
@@ -6762,8 +6756,10 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev)) {
cntl |= CURSOR_PIPE_CSC_ENABLE;
+ cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
+ }
I915_WRITE(CURCNTR_IVB(pipe), cntl);
intel_crtc->cursor_visible = visible;
@@ -6933,7 +6929,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
- intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
+ if (intel_crtc->active)
+ intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
return 0;
fail_unpin:
@@ -6952,7 +6949,8 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
intel_crtc->cursor_x = x;
intel_crtc->cursor_y = y;
- intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
+ if (intel_crtc->active)
+ intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
return 0;
}
@@ -7309,8 +7307,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
}
}
- pipe_config->adjusted_mode.clock = clock.dot *
- pipe_config->pixel_multiplier;
+ pipe_config->adjusted_mode.clock = clock.dot;
}
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
@@ -7828,12 +7825,6 @@ err:
return ret;
}
-/*
- * On gen7 we currently use the blit ring because (in early silicon at least)
- * the render ring doesn't give us interrpts for page flip completion, which
- * means clients will hang after the first flip is queued. Fortunately the
- * blit ring generates interrupts properly, so use it instead.
- */
static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -7842,9 +7833,13 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ struct intel_ring_buffer *ring;
uint32_t plane_bit = 0;
- int ret;
+ int len, ret;
+
+ ring = obj->ring;
+ if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
+ ring = &dev_priv->ring[BCS];
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
@@ -7866,10 +7861,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
goto err_unpin;
}
- ret = intel_ring_begin(ring, 4);
+ len = 4;
+ if (ring->id == RCS)
+ len += 6;
+
+ ret = intel_ring_begin(ring, len);
if (ret)
goto err_unpin;
+ /* Unmask the flip-done completion message. Note that the bspec says that
+ * we should do this for both the BCS and RCS, and that we must not unmask
+ * more than one flip event at any time (or ensure that one flip message
+ * can be sent by waiting for flip-done prior to queueing new flips).
+ * Experimentation says that BCS works despite DERRMR masking all
+ * flip-done completion events and that unmasking all planes at once
+ * for the RCS also doesn't appear to drop events. Setting the DERRMR
+ * to zero does lead to lockups within MI_DISPLAY_FLIP.
+ */
+ if (ring->id == RCS) {
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+ DERRMR_PIPEB_PRI_FLIP_DONE |
+ DERRMR_PIPEC_PRI_FLIP_DONE));
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+ }
+
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
@@ -8180,9 +8199,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
pipe_config->gmch_pfit.lvds_border_bits);
- DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x\n",
+ DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
pipe_config->pch_pfit.pos,
- pipe_config->pch_pfit.size);
+ pipe_config->pch_pfit.size,
+ pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
}
@@ -8578,8 +8598,11 @@ intel_pipe_config_compare(struct drm_device *dev,
if (INTEL_INFO(dev)->gen < 4)
PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
- PIPE_CONF_CHECK_I(pch_pfit.pos);
- PIPE_CONF_CHECK_I(pch_pfit.size);
+ PIPE_CONF_CHECK_I(pch_pfit.enabled);
+ if (current_config->pch_pfit.enabled) {
+ PIPE_CONF_CHECK_I(pch_pfit.pos);
+ PIPE_CONF_CHECK_I(pch_pfit.size);
+ }
PIPE_CONF_CHECK_I(ips_enabled);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2151d13772b8..2c555f91bfae 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -588,7 +588,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
case AUX_NATIVE_REPLY_DEFER:
- udelay(100);
+ /*
+ * For now, just give more slack to branch devices. We
+ * could check the DPCD for I2C bit rate capabilities,
+ * and if available, adjust the interval. We could also
+ * be more careful with DP-to-Legacy adapters where a
+ * long legacy cable may force very low I2C bit rates.
+ */
+ if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_PRESENT)
+ usleep_range(500, 600);
+ else
+ usleep_range(300, 400);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
@@ -1456,7 +1467,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD);
+ EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
intel_dp->psr_setup_done = true;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 176080822a74..9b7b68fd5d47 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -280,6 +280,7 @@ struct intel_crtc_config {
struct {
u32 pos;
u32 size;
+ bool enabled;
} pch_pfit;
/* FDI configuration, only valid if has_pch_encoder is set. */
@@ -551,7 +552,7 @@ extern int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode);
extern void intel_panel_fini(struct intel_panel *panel);
-extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 406303b509c1..7fa7df546c1e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -263,6 +263,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
C(vtotal);
C(clock);
#undef C
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
}
if (intel_dvo->dev.dev_ops->mode_fixup)
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 4d33278e31fb..831a5c021c4b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -128,8 +128,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct drm_display_mode *fixed_mode =
- lvds_encoder->attached_connector->base.panel.fixed_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config.adjusted_mode;
int pipe = crtc->pipe;
u32 temp;
@@ -183,9 +183,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
temp &= ~LVDS_ENABLE_DITHER;
}
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
temp |= LVDS_HSYNC_POLARITY;
- if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(lvds_encoder->reg, temp);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index cfb8fb68f09c..119771ff46ab 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -173,7 +173,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
return ASLE_BACKLIGHT_FAILED;
intel_panel_set_backlight(dev, bclp, 255);
- iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
+ iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a43c33bc4a35..293564a2896a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -36,20 +36,12 @@
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
void
-intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
{
- adjusted_mode->hdisplay = fixed_mode->hdisplay;
- adjusted_mode->hsync_start = fixed_mode->hsync_start;
- adjusted_mode->hsync_end = fixed_mode->hsync_end;
- adjusted_mode->htotal = fixed_mode->htotal;
+ drm_mode_copy(adjusted_mode, fixed_mode);
- adjusted_mode->vdisplay = fixed_mode->vdisplay;
- adjusted_mode->vsync_start = fixed_mode->vsync_start;
- adjusted_mode->vsync_end = fixed_mode->vsync_end;
- adjusted_mode->vtotal = fixed_mode->vtotal;
-
- adjusted_mode->clock = fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
}
/* adjusted_mode has been preset to be the panel's fixed mode */
@@ -120,6 +112,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
done:
pipe_config->pch_pfit.pos = (x << 16) | y;
pipe_config->pch_pfit.size = (width << 16) | height;
+ pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
}
static void
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 46056820d1d2..f4c5e95b2d6f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2096,16 +2096,16 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t pixel_rate, pfit_size;
+ uint32_t pixel_rate;
pixel_rate = intel_crtc->config.adjusted_mode.clock;
/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
* adjust the pixel_rate here. */
- pfit_size = intel_crtc->config.pch_pfit.size;
- if (pfit_size) {
+ if (intel_crtc->config.pch_pfit.enabled) {
uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
+ uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
pipe_w = intel_crtc->config.requested_mode.hdisplay;
pipe_h = intel_crtc->config.requested_mode.vdisplay;
@@ -3447,14 +3447,24 @@ int intel_enable_rc6(const struct drm_device *dev)
static void gen6_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 enabled_intrs;
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
spin_unlock_irq(&dev_priv->irq_lock);
+
/* only unmask PM interrupts we need. Mask all others. */
- I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS);
+ enabled_intrs = GEN6_PM_RPS_EVENTS;
+
+ /* IVB and SNB hard hangs on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ */
+ if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+ I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
}
static void gen6_enable_rps(struct drm_device *dev)
@@ -3854,8 +3864,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
dev_priv->rps.rpe_delay),
dev_priv->rps.rpe_delay);
- INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
-
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
gen6_enable_rps_interrupts(dev);
@@ -4945,13 +4953,16 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
GEN7_WA_L3_CHICKEN_MODE);
+ /* L3 caching of data atomics doesn't work -- disable it. */
+ I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
+ I915_WRITE(HSW_ROW_CHICKEN3,
+ _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
+
/* This is required by WaCatErrorRejectionIssue:hsw */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- g4x_disable_trickle_feed(dev);
-
/* WaVSRefCountFullforceMissDisable:hsw */
gen7_setup_fixed_func_scheduler(dev_priv);
@@ -5673,5 +5684,7 @@ void intel_pm_init(struct drm_device *dev)
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
+
+ INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f05cceac5a52..460ee1026fca 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,16 +33,6 @@
#include "i915_trace.h"
#include "intel_drv.h"
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-struct pipe_control {
- struct drm_i915_gem_object *obj;
- volatile u32 *cpu_page;
- u32 gtt_offset;
-};
-
static inline int ring_space(struct intel_ring_buffer *ring)
{
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
@@ -175,8 +165,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
static int
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
{
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
@@ -213,8 +202,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -306,8 +294,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
/*
@@ -481,68 +468,43 @@ out:
static int
init_pipe_control(struct intel_ring_buffer *ring)
{
- struct pipe_control *pc;
- struct drm_i915_gem_object *obj;
int ret;
- if (ring->private)
+ if (ring->scratch.obj)
return 0;
- pc = kmalloc(sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
-
- obj = i915_gem_alloc_object(ring->dev, 4096);
- if (obj == NULL) {
+ ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (ring->scratch.obj == NULL) {
DRM_ERROR("Failed to allocate seqno page\n");
ret = -ENOMEM;
goto err;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
- ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
+ ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
if (ret)
goto err_unref;
- pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
- pc->cpu_page = kmap(sg_page(obj->pages->sgl));
- if (pc->cpu_page == NULL) {
+ ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
+ ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
+ if (ring->scratch.cpu_page == NULL) {
ret = -ENOMEM;
goto err_unpin;
}
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
- ring->name, pc->gtt_offset);
-
- pc->obj = obj;
- ring->private = pc;
+ ring->name, ring->scratch.gtt_offset);
return 0;
err_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_unpin(ring->scratch.obj);
err_unref:
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference(&ring->scratch.obj->base);
err:
- kfree(pc);
return ret;
}
-static void
-cleanup_pipe_control(struct intel_ring_buffer *ring)
-{
- struct pipe_control *pc = ring->private;
- struct drm_i915_gem_object *obj;
-
- obj = pc->obj;
-
- kunmap(sg_page(obj->pages->sgl));
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(&obj->base);
-
- kfree(pc);
-}
-
static int init_render_ring(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
@@ -607,16 +569,16 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- if (!ring->private)
+ if (ring->scratch.obj == NULL)
return;
- if (HAS_BROKEN_CS_TLB(dev))
- drm_gem_object_unreference(to_gem_object(ring->private));
-
- if (INTEL_INFO(dev)->gen >= 5)
- cleanup_pipe_control(ring);
+ if (INTEL_INFO(dev)->gen >= 5) {
+ kunmap(sg_page(ring->scratch.obj->pages->sgl));
+ i915_gem_object_unpin(ring->scratch.obj);
+ }
- ring->private = NULL;
+ drm_gem_object_unreference(&ring->scratch.obj->base);
+ ring->scratch.obj = NULL;
}
static void
@@ -742,8 +704,7 @@ do { \
static int
pc_render_add_request(struct intel_ring_buffer *ring)
{
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -761,7 +722,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
- intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
@@ -780,7 +741,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
- intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
@@ -814,15 +775,13 @@ ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
- struct pipe_control *pc = ring->private;
- return pc->cpu_page[0];
+ return ring->scratch.cpu_page[0];
}
static void
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
- struct pipe_control *pc = ring->private;
- pc->cpu_page[0] = seqno;
+ ring->scratch.cpu_page[0] = seqno;
}
static bool
@@ -1141,8 +1100,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
} else {
- struct drm_i915_gem_object *obj = ring->private;
- u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
+ u32 cs_offset = ring->scratch.gtt_offset;
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
@@ -1835,7 +1793,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return ret;
}
- ring->private = obj;
+ ring->scratch.obj = obj;
+ ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
}
return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 432ad5311ba6..68b1ca974d59 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -155,7 +155,11 @@ struct intel_ring_buffer {
struct intel_ring_hangcheck hangcheck;
- void *private;
+ struct {
+ struct drm_i915_gem_object *obj;
+ u32 gtt_offset;
+ volatile u32 *cpu_page;
+ } scratch;
};
static inline bool
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 317e058fb3cf..49482fd5b76c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -788,6 +788,8 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
uint16_t h_sync_offset, v_sync_offset;
int mode_clock;
+ memset(dtd, 0, sizeof(*dtd));
+
width = mode->hdisplay;
height = mode->vdisplay;
@@ -830,44 +832,51 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
- dtd->part2.sdvo_flags = 0;
dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
- dtd->part2.reserved = 0;
}
-static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode,
const struct intel_sdvo_dtd *dtd)
{
- mode->hdisplay = dtd->part1.h_active;
- mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
- mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
- mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
- mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
- mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
- mode->htotal = mode->hdisplay + dtd->part1.h_blank;
- mode->htotal += (dtd->part1.h_high & 0xf) << 8;
-
- mode->vdisplay = dtd->part1.v_active;
- mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
- mode->vsync_start = mode->vdisplay;
- mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
- mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
- mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
- mode->vsync_end = mode->vsync_start +
+ struct drm_display_mode mode = {};
+
+ mode.hdisplay = dtd->part1.h_active;
+ mode.hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+ mode.hsync_start = mode.hdisplay + dtd->part2.h_sync_off;
+ mode.hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+ mode.hsync_end = mode.hsync_start + dtd->part2.h_sync_width;
+ mode.hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+ mode.htotal = mode.hdisplay + dtd->part1.h_blank;
+ mode.htotal += (dtd->part1.h_high & 0xf) << 8;
+
+ mode.vdisplay = dtd->part1.v_active;
+ mode.vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+ mode.vsync_start = mode.vdisplay;
+ mode.vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+ mode.vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+ mode.vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+ mode.vsync_end = mode.vsync_start +
(dtd->part2.v_sync_off_width & 0xf);
- mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
- mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
- mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+ mode.vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+ mode.vtotal = mode.vdisplay + dtd->part1.v_blank;
+ mode.vtotal += (dtd->part1.v_high & 0xf) << 8;
- mode->clock = dtd->part1.clock * 10;
+ mode.clock = dtd->part1.clock * 10;
- mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ mode.flags |= DRM_MODE_FLAG_INTERLACE;
if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
- mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ mode.flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ mode.flags |= DRM_MODE_FLAG_NHSYNC;
if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
- mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ mode.flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ mode.flags |= DRM_MODE_FLAG_NVSYNC;
+
+ drm_mode_set_crtcinfo(&mode, 0);
+
+ drm_mode_copy(pmode, &mode);
}
static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
@@ -1151,11 +1160,10 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = intel_encoder->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
- struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+ &crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &crtc->config.requested_mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
@@ -1213,13 +1221,15 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
* adjusted_mode.
*/
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ input_dtd.part1.clock /= crtc->config.pixel_multiplier;
+
if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
- switch (intel_crtc->config.pixel_multiplier) {
+ switch (crtc->config.pixel_multiplier) {
default:
WARN(1, "unknown pixel mutlipler specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1252,9 +1262,9 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
}
if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
- sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
+ sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
- sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe);
+ sdvox |= SDVO_PIPE_SEL(crtc->pipe);
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -1264,7 +1274,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
- sdvox |= (intel_crtc->config.pixel_multiplier - 1)
+ sdvox |= (crtc->config.pixel_multiplier - 1)
<< SDVO_PORT_MULTIPLY_SHIFT;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 78b621cdd108..ad6ec4b39005 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -260,8 +260,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SPRITE_TILED;
- /* must disable */
- sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+ if (IS_HASWELL(dev))
+ sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
+ else
+ sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+
sprctl |= SPRITE_ENABLE;
if (IS_HASWELL(dev))
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index f2c6d7909ae2..dd6f84bf6c22 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -916,6 +916,14 @@ intel_tv_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
+ /* TV has it's own notion of sync and other mode flags, so clear them. */
+ pipe_config->adjusted_mode.flags = 0;
+
+ /*
+ * FIXME: We don't check whether the input mode is actually what we want
+ * or whether userspace is doing something stupid.
+ */
+
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8f5bc869c023..8649f1c36b00 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -261,7 +261,7 @@ void intel_uncore_init(struct drm_device *dev)
}
}
-void intel_uncore_sanitize(struct drm_device *dev)
+static void intel_uncore_forcewake_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -272,6 +272,11 @@ void intel_uncore_sanitize(struct drm_device *dev)
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
+}
+
+void intel_uncore_sanitize(struct drm_device *dev)
+{
+ intel_uncore_forcewake_reset(dev);
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev);
@@ -549,6 +554,8 @@ static int gen6_do_reset(struct drm_device *dev)
/* Spin waiting for the device to ack the reset request */
ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+ intel_uncore_forcewake_reset(dev);
+
/* If reset with a user forcewake, try to restore, otherwise turn it off */
if (dev_priv->uncore.forcewake_count)
dev_priv->uncore.funcs.force_wake_get(dev_priv);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a60584763b61..a0b9d8a95b16 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -124,6 +124,8 @@ void adreno_recover(struct msm_gpu *gpu)
/* reset completed fence seqno, just discard anything pending: */
adreno_gpu->memptrs->fence = gpu->submitted_fence;
+ adreno_gpu->memptrs->rptr = 0;
+ adreno_gpu->memptrs->wptr = 0;
gpu->funcs->pm_resume(gpu);
ret = gpu->funcs->hw_init(gpu);
@@ -229,7 +231,7 @@ void adreno_idle(struct msm_gpu *gpu)
return;
} while(time_before(jiffies, t));
- DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name);
+ DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
/* TODO maybe we need to reset GPU here to recover from hang? */
}
@@ -256,11 +258,17 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t freedwords;
+ unsigned long t = jiffies + ADRENO_IDLE_TIMEOUT;
do {
uint32_t size = gpu->rb->size / 4;
uint32_t wptr = get_wptr(gpu->rb);
uint32_t rptr = adreno_gpu->memptrs->rptr;
freedwords = (rptr + (size - 1) - wptr) % size;
+
+ if (time_after(jiffies, t)) {
+ DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
+ break;
+ }
} while(freedwords < ndwords);
}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
index 5db5bbaedae2..bc7fd11ad8be 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -19,8 +19,6 @@
#include "msm_drv.h"
#include "mdp4_kms.h"
-#include <mach/iommu.h>
-
static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
static int mdp4_hw_init(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 864c9773636b..b3a2f1629041 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -18,8 +18,6 @@
#include "msm_drv.h"
#include "msm_gpu.h"
-#include <mach/iommu.h>
-
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -62,6 +60,8 @@ int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
int i, ret;
for (i = 0; i < cnt; i++) {
+ /* TODO maybe some day msm iommu won't require this hack: */
+ struct device *msm_iommu_get_ctx(const char *ctx_name);
struct device *ctx = msm_iommu_get_ctx(names[i]);
if (!ctx)
continue;
@@ -199,7 +199,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
* imx drm driver on iMX5
*/
dev_err(dev->dev, "failed to load kms\n");
- ret = PTR_ERR(priv->kms);
+ ret = PTR_ERR(kms);
goto fail;
}
@@ -499,25 +499,41 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
struct timespec *timeout)
{
struct msm_drm_private *priv = dev->dev_private;
- unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
- unsigned long start_jiffies = jiffies;
- unsigned long remaining_jiffies;
int ret;
- if (time_after(start_jiffies, timeout_jiffies))
- remaining_jiffies = 0;
- else
- remaining_jiffies = timeout_jiffies - start_jiffies;
-
- ret = wait_event_interruptible_timeout(priv->fence_event,
- priv->completed_fence >= fence,
- remaining_jiffies);
- if (ret == 0) {
- DBG("timeout waiting for fence: %u (completed: %u)",
- fence, priv->completed_fence);
- ret = -ETIMEDOUT;
- } else if (ret != -ERESTARTSYS) {
- ret = 0;
+ if (!priv->gpu)
+ return 0;
+
+ if (fence > priv->gpu->submitted_fence) {
+ DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
+ fence, priv->gpu->submitted_fence);
+ return -EINVAL;
+ }
+
+ if (!timeout) {
+ /* no-wait: */
+ ret = fence_completed(dev, fence) ? 0 : -EBUSY;
+ } else {
+ unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
+ unsigned long start_jiffies = jiffies;
+ unsigned long remaining_jiffies;
+
+ if (time_after(start_jiffies, timeout_jiffies))
+ remaining_jiffies = 0;
+ else
+ remaining_jiffies = timeout_jiffies - start_jiffies;
+
+ ret = wait_event_interruptible_timeout(priv->fence_event,
+ fence_completed(dev, fence),
+ remaining_jiffies);
+
+ if (ret == 0) {
+ DBG("timeout waiting for fence: %u (completed: %u)",
+ fence, priv->completed_fence);
+ ret = -ETIMEDOUT;
+ } else if (ret != -ERESTARTSYS) {
+ ret = 0;
+ }
}
return ret;
@@ -681,7 +697,7 @@ static struct drm_driver msm_driver = {
.gem_vm_ops = &vm_ops,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
- .dumb_destroy = msm_gem_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
.debugfs_cleanup = msm_debugfs_cleanup,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 80d75094bf0a..df8f1d084bc1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -153,7 +153,7 @@ void *msm_gem_vaddr(struct drm_gem_object *obj);
int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
struct work_struct *work);
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, uint32_t fence);
+ struct msm_gpu *gpu, bool write, uint32_t fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
struct timespec *timeout);
@@ -191,6 +191,12 @@ u32 msm_readl(const void __iomem *addr);
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ return priv->completed_fence >= fence;
+}
+
static inline int align_pitch(int width, int bpp)
{
int bytespp = (bpp + 7) / 8;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 6b5a6c8c7658..2bae46c66a30 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -40,9 +40,9 @@ static struct page **get_pages(struct drm_gem_object *obj)
}
msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
- if (!msm_obj->sgt) {
+ if (IS_ERR(msm_obj->sgt)) {
dev_err(dev->dev, "failed to allocate sgt\n");
- return ERR_PTR(-ENOMEM);
+ return ERR_CAST(msm_obj->sgt);
}
msm_obj->pages = p;
@@ -159,7 +159,6 @@ out_unlock:
out:
switch (ret) {
case -EAGAIN:
- set_need_resched();
case 0:
case -ERESTARTSYS:
case -EINTR:
@@ -320,13 +319,6 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
}
-int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle)
-{
- /* No special work needed, drop the reference and see what falls out */
- return drm_gem_handle_delete(file, handle);
-}
-
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{
@@ -393,11 +385,14 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
}
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, uint32_t fence)
+ struct msm_gpu *gpu, bool write, uint32_t fence)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_obj->gpu = gpu;
- msm_obj->fence = fence;
+ if (write)
+ msm_obj->write_fence = fence;
+ else
+ msm_obj->read_fence = fence;
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}
@@ -411,7 +406,8 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
msm_obj->gpu = NULL;
- msm_obj->fence = 0;
+ msm_obj->read_fence = 0;
+ msm_obj->write_fence = 0;
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
@@ -433,8 +429,18 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
- if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC))
- ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout);
+ if (is_active(msm_obj)) {
+ uint32_t fence = 0;
+
+ if (op & MSM_PREP_READ)
+ fence = msm_obj->write_fence;
+ if (op & MSM_PREP_WRITE)
+ fence = max(fence, msm_obj->read_fence);
+ if (op & MSM_PREP_NOSYNC)
+ timeout = NULL;
+
+ ret = msm_wait_fence_interruptable(dev, fence, timeout);
+ }
/* TODO cache maintenance */
@@ -455,9 +461,10 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
uint64_t off = drm_vma_node_start(&obj->vma_node);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n",
+ seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
- msm_obj->fence, obj->name, obj->refcount.refcount.counter,
+ msm_obj->read_fence, msm_obj->write_fence,
+ obj->name, obj->refcount.refcount.counter,
off, msm_obj->vaddr, obj->size);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index d746f13d283c..0676f32e2c6a 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -36,7 +36,7 @@ struct msm_gem_object {
*/
struct list_head mm_list;
struct msm_gpu *gpu; /* non-null if active */
- uint32_t fence;
+ uint32_t read_fence, write_fence;
/* Transiently in the process of submit ioctl, objects associated
* with the submit are on submit->bo_list.. this only lasts for
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3e1ef3a00f60..5281d4bc37f7 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -78,7 +78,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
}
if (submit_bo.flags & BO_INVALID_FLAGS) {
- DBG("invalid flags: %x", submit_bo.flags);
+ DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
goto out_unlock;
}
@@ -92,7 +92,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/
obj = idr_find(&file->object_idr, submit_bo.handle);
if (!obj) {
- DBG("invalid handle %u at index %u", submit_bo.handle, i);
+ DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
ret = -EINVAL;
goto out_unlock;
}
@@ -100,7 +100,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
msm_obj = to_msm_bo(obj);
if (!list_empty(&msm_obj->submit_entry)) {
- DBG("handle %u at index %u already on submit list",
+ DRM_ERROR("handle %u at index %u already on submit list\n",
submit_bo.handle, i);
ret = -EINVAL;
goto out_unlock;
@@ -216,8 +216,9 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
struct msm_gem_object **obj, uint32_t *iova, bool *valid)
{
if (idx >= submit->nr_bos) {
- DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos);
- return EINVAL;
+ DRM_ERROR("invalid buffer index: %u (out of %u)\n",
+ idx, submit->nr_bos);
+ return -EINVAL;
}
if (obj)
@@ -239,7 +240,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
int ret;
if (offset % 4) {
- DBG("non-aligned cmdstream buffer: %u", offset);
+ DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
return -EINVAL;
}
@@ -266,7 +267,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
return -EFAULT;
if (submit_reloc.submit_offset % 4) {
- DBG("non-aligned reloc offset: %u",
+ DRM_ERROR("non-aligned reloc offset: %u\n",
submit_reloc.submit_offset);
return -EINVAL;
}
@@ -276,7 +277,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
if ((off >= (obj->base.size / 4)) ||
(off < last_offset)) {
- DBG("invalid offset %u at reloc %u", off, i);
+ DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
return -EINVAL;
}
@@ -374,14 +375,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
if (submit_cmd.size % 4) {
- DBG("non-aligned cmdstream buffer size: %u",
+ DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
submit_cmd.size);
ret = -EINVAL;
goto out;
}
- if (submit_cmd.size >= msm_obj->base.size) {
- DBG("invalid cmdstream size: %u", submit_cmd.size);
+ if ((submit_cmd.size + submit_cmd.submit_offset) >=
+ msm_obj->base.size) {
+ DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index e1e1ec9321ff..3bab937965d1 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -29,13 +29,14 @@
static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct drm_device *dev = gpu->dev;
- struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
+ struct kgsl_device_platform_data *pdata;
if (!pdev) {
dev_err(dev->dev, "could not find dtv pdata\n");
return;
}
+ pdata = pdev->dev.platform_data;
if (pdata->bus_scale_table) {
gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
DBG("bus scale client: %08x", gpu->bsc);
@@ -230,6 +231,8 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
static void hangcheck_handler(unsigned long data)
{
struct msm_gpu *gpu = (struct msm_gpu *)data;
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
uint32_t fence = gpu->funcs->last_fence(gpu);
if (fence != gpu->hangcheck_fence) {
@@ -237,14 +240,22 @@ static void hangcheck_handler(unsigned long data)
gpu->hangcheck_fence = fence;
} else if (fence < gpu->submitted_fence) {
/* no progress and not done.. hung! */
- struct msm_drm_private *priv = gpu->dev->dev_private;
gpu->hangcheck_fence = fence;
+ dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
+ gpu->name);
+ dev_err(dev->dev, "%s: completed fence: %u\n",
+ gpu->name, fence);
+ dev_err(dev->dev, "%s: submitted fence: %u\n",
+ gpu->name, gpu->submitted_fence);
queue_work(priv->wq, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
if (gpu->submitted_fence > gpu->hangcheck_fence)
hangcheck_timer_reset(gpu);
+
+ /* workaround for missing irq: */
+ queue_work(priv->wq, &gpu->retire_work);
}
/*
@@ -265,7 +276,8 @@ static void retire_worker(struct work_struct *work)
obj = list_first_entry(&gpu->active_list,
struct msm_gem_object, mm_list);
- if (obj->fence <= fence) {
+ if ((obj->read_fence <= fence) &&
+ (obj->write_fence <= fence)) {
/* move to inactive: */
msm_gem_move_to_inactive(&obj->base);
msm_gem_put_iova(&obj->base, gpu->id);
@@ -321,7 +333,11 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
submit->gpu->id, &iova);
}
- msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence);
+ if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
+ msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
+
+ if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
+ msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
}
hangcheck_timer_reset(gpu);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 2e11ea02cf87..57cda2a1437b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -579,8 +579,22 @@ static void
init_reserved(struct nvbios_init *init)
{
u8 opcode = nv_ro08(init->bios, init->offset);
- trace("RESERVED\t0x%02x\n", opcode);
- init->offset += 1;
+ u8 length, i;
+
+ switch (opcode) {
+ case 0xaa:
+ length = 4;
+ break;
+ default:
+ length = 1;
+ break;
+ }
+
+ trace("RESERVED 0x%02x\t", opcode);
+ for (i = 1; i < length; i++)
+ cont(" 0x%02x", nv_ro08(init->bios, init->offset + i));
+ cont("\n");
+ init->offset += length;
}
/**
@@ -1437,7 +1451,7 @@ init_configure_mem(struct nvbios_init *init)
data = init_rdvgai(init, 0x03c4, 0x01);
init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
- while ((addr = nv_ro32(bios, sdata)) != 0xffffffff) {
+ for (; (addr = nv_ro32(bios, sdata)) != 0xffffffff; sdata += 4) {
switch (addr) {
case 0x10021c: /* CKE_NORMAL */
case 0x1002d0: /* CMD_REFRESH */
@@ -2135,6 +2149,7 @@ static struct nvbios_init_opcode {
[0x99] = { init_zm_auxch },
[0x9a] = { init_i2c_long_if },
[0xa9] = { init_gpio_ne },
+ [0xaa] = { init_reserved },
};
#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 37712a6df923..e290cfa4acee 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -113,7 +113,7 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
pmc->use_msi = false;
break;
default:
- pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", true);
+ pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", false);
if (pmc->use_msi) {
pmc->use_msi = pci_enable_msi(device->pdev) == 0;
if (pmc->use_msi) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index d2712e6e5d31..7848590f5568 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -278,7 +278,6 @@ nouveau_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_display *disp;
- u32 pclass = dev->pdev->class >> 8;
int ret, gen;
disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
@@ -340,29 +339,25 @@ nouveau_display_create(struct drm_device *dev)
drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev);
- if (nouveau_modeset == 1 ||
- (nouveau_modeset < 0 && pclass == PCI_CLASS_DISPLAY_VGA)) {
- if (drm->vbios.dcb.entries) {
- if (nv_device(drm->device)->card_type < NV_50)
- ret = nv04_display_create(dev);
- else
- ret = nv50_display_create(dev);
- } else {
- ret = 0;
- }
-
- if (ret)
- goto disp_create_err;
+ if (drm->vbios.dcb.entries) {
+ if (nv_device(drm->device)->card_type < NV_50)
+ ret = nv04_display_create(dev);
+ else
+ ret = nv50_display_create(dev);
+ } else {
+ ret = 0;
+ }
- if (dev->mode_config.num_crtc) {
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
- if (ret)
- goto vblank_err;
- }
+ if (ret)
+ goto disp_create_err;
- nouveau_backlight_init(dev);
+ if (dev->mode_config.num_crtc) {
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret)
+ goto vblank_err;
}
+ nouveau_backlight_init(dev);
return 0;
vblank_err:
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8863644024b7..e893c5362402 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -636,7 +636,8 @@ int nouveau_pmops_resume(struct device *dev)
nouveau_fbcon_set_suspend(drm_dev, 0);
nouveau_fbcon_zfill_all(drm_dev);
- nouveau_display_resume(drm_dev);
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_display_resume(drm_dev);
nv_suspend_set_printk_level(NV_DBG_DEBUG);
return 0;
}
@@ -671,7 +672,8 @@ static int nouveau_pmops_thaw(struct device *dev)
if (drm_dev->mode_config.num_crtc)
nouveau_fbcon_set_suspend(drm_dev, 0);
nouveau_fbcon_zfill_all(drm_dev);
- nouveau_display_resume(drm_dev);
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_display_resume(drm_dev);
nv_suspend_set_printk_level(NV_DBG_DEBUG);
return 0;
}
@@ -906,7 +908,8 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev);
- nouveau_display_resume(drm_dev);
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_display_resume(drm_dev);
drm_kms_helper_poll_enable(drm_dev);
/* do magic */
nv_mask(device, 0x88488, (1 << 25), (1 << 25));
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8f6d63d7edd3..a86ecf65c164 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -454,7 +454,8 @@ nouveau_fbcon_init(struct drm_device *dev)
int preferred_bpp;
int ret;
- if (!dev->mode_config.num_crtc)
+ if (!dev->mode_config.num_crtc ||
+ (dev->pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
return 0;
fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index ca5492ac2da5..0843ebc910d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -104,9 +104,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
- if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
- kfree(nvbe);
+ if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
return NULL;
- }
return &nvbe->ttm.ttm;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dfac7965ea28..32923d2f6002 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -707,8 +707,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
+ (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ (radeon_connector->audio == RADEON_AUDIO_AUTO)))
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
@@ -718,8 +719,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
+ (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ (radeon_connector->audio == RADEON_AUDIO_AUTO)))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -732,8 +734,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
+ (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ (radeon_connector->audio == RADEON_AUDIO_AUTO)))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -1647,8 +1650,12 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- /* some early dce3.2 boards have a bug in their transmitter control table */
- if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
+ /* some dce3.x boards have a bug in their transmitter control table.
+ * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
+ * does the same thing and more.
+ */
+ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
+ (rdev->family != CHIP_RS880))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 084e69414fd1..9b6950d9b3c0 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -1168,6 +1168,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
{ 25000, 30000, RADEON_SCLK_UP }
};
+void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+ u32 *max_clock)
+{
+ u32 i, clock = 0;
+
+ if ((table == NULL) || (table->count == 0)) {
+ *max_clock = clock;
+ return;
+ }
+
+ for (i = 0; i < table->count; i++) {
+ if (clock < table->entries[i].clk)
+ clock = table->entries[i].clk;
+ }
+ *max_clock = clock;
+}
+
void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
u32 clock, u16 max_voltage, u16 *voltage)
{
@@ -1913,7 +1930,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
}
j++;
- if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+ if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
tmp = RREG32(MC_PMG_CMD_MRS);
@@ -1928,7 +1945,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
}
j++;
- if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+ if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
break;
case MC_SEQ_RESERVE_M >> 2:
@@ -1942,7 +1959,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
}
j++;
- if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+ if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
break;
default:
@@ -2080,6 +2097,7 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
bool disable_mclk_switching;
u32 mclk, sclk;
u16 vddc, vddci;
+ u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
btc_dpm_vblank_too_short(rdev))
@@ -2121,6 +2139,39 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
ps->low.vddci = max_limits->vddci;
}
+ /* limit clocks to max supported clocks based on voltage dependency tables */
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ &max_sclk_vddc);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &max_mclk_vddci);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &max_mclk_vddc);
+
+ if (max_sclk_vddc) {
+ if (ps->low.sclk > max_sclk_vddc)
+ ps->low.sclk = max_sclk_vddc;
+ if (ps->medium.sclk > max_sclk_vddc)
+ ps->medium.sclk = max_sclk_vddc;
+ if (ps->high.sclk > max_sclk_vddc)
+ ps->high.sclk = max_sclk_vddc;
+ }
+ if (max_mclk_vddci) {
+ if (ps->low.mclk > max_mclk_vddci)
+ ps->low.mclk = max_mclk_vddci;
+ if (ps->medium.mclk > max_mclk_vddci)
+ ps->medium.mclk = max_mclk_vddci;
+ if (ps->high.mclk > max_mclk_vddci)
+ ps->high.mclk = max_mclk_vddci;
+ }
+ if (max_mclk_vddc) {
+ if (ps->low.mclk > max_mclk_vddc)
+ ps->low.mclk = max_mclk_vddc;
+ if (ps->medium.mclk > max_mclk_vddc)
+ ps->medium.mclk = max_mclk_vddc;
+ if (ps->high.mclk > max_mclk_vddc)
+ ps->high.mclk = max_mclk_vddc;
+ }
+
/* XXX validate the min clocks required for display */
if (disable_mclk_switching) {
@@ -2340,12 +2391,6 @@ int btc_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
- ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("rv770_dpm_force_performance_level failed\n");
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h
index 1a15e0e41950..3b6f12b7760b 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.h
+++ b/drivers/gpu/drm/radeon/btc_dpm.h
@@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev,
struct rv7xx_pl *pl);
void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
u32 clock, u16 max_voltage, u16 *voltage);
+void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+ u32 *max_clock);
void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
u16 max_vddc, u16 max_vddci,
u16 *vddc, u16 *vddci);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 3cce533397c6..51e947a97edf 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -146,6 +146,8 @@ static const struct ci_pt_config_reg didt_config_ci[] =
};
extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
+extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+ u32 *max_clock);
extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
u32 arb_freq_src, u32 arb_freq_dest);
extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
@@ -712,6 +714,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_clock_and_voltage_limits *max_limits;
bool disable_mclk_switching;
u32 sclk, mclk;
+ u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i;
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -739,6 +742,29 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
}
}
+ /* limit clocks to max supported clocks based on voltage dependency tables */
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ &max_sclk_vddc);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &max_mclk_vddci);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &max_mclk_vddc);
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (max_sclk_vddc) {
+ if (ps->performance_levels[i].sclk > max_sclk_vddc)
+ ps->performance_levels[i].sclk = max_sclk_vddc;
+ }
+ if (max_mclk_vddci) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddci)
+ ps->performance_levels[i].mclk = max_mclk_vddci;
+ }
+ if (max_mclk_vddc) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddc)
+ ps->performance_levels[i].mclk = max_mclk_vddc;
+ }
+ }
+
/* XXX validate the min clocks required for display */
if (disable_mclk_switching) {
@@ -4748,12 +4774,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
if (pi->pcie_performance_request)
ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
- ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("ci_dpm_force_performance_level failed\n");
- return ret;
- }
-
cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
RADEON_CG_BLOCK_MC |
RADEON_CG_BLOCK_SDMA |
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 53b43dd3cf1e..252e10a41cf5 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -47,10 +47,11 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit)
{
+ unsigned long flags;
u32 data, original_data;
u32 addr;
u32 extra_shift;
- int ret;
+ int ret = 0;
if (smc_start_address & 3)
return -EINVAL;
@@ -59,13 +60,14 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
addr = smc_start_address;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
while (byte_count >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
ret = ci_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_IND_DATA_0, data);
@@ -80,7 +82,7 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
ret = ci_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
original_data = RREG32(SMC_IND_DATA_0);
@@ -97,11 +99,15 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
ret = ci_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_IND_DATA_0, data);
}
- return 0;
+
+done:
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+ return ret;
}
void ci_start_smc(struct radeon_device *rdev)
@@ -197,6 +203,7 @@ PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
{
+ unsigned long flags;
u32 ucode_start_address;
u32 ucode_size;
const u8 *src;
@@ -219,6 +226,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
return -EINVAL;
src = (const u8 *)rdev->smc_fw->data;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
while (ucode_size >= 4) {
@@ -231,6 +239,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
ucode_size -= 4;
}
WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return 0;
}
@@ -238,25 +247,29 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
int ci_read_smc_sram_dword(struct radeon_device *rdev,
u32 smc_address, u32 *value, u32 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = ci_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
+ if (ret == 0)
+ *value = RREG32(SMC_IND_DATA_0);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- *value = RREG32(SMC_IND_DATA_0);
- return 0;
+ return ret;
}
int ci_write_smc_sram_dword(struct radeon_device *rdev,
u32 smc_address, u32 value, u32 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = ci_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
+ if (ret == 0)
+ WREG32(SMC_IND_DATA_0, value);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- WREG32(SMC_IND_DATA_0, value);
- return 0;
+ return ret;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index a3bba0587276..b874ccdf52f7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -77,6 +77,10 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev);
static void cik_program_aspm(struct radeon_device *rdev);
static void cik_init_pg(struct radeon_device *rdev);
static void cik_init_cg(struct radeon_device *rdev);
+static void cik_fini_pg(struct radeon_device *rdev);
+static void cik_fini_cg(struct radeon_device *rdev);
+static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
+ bool enable);
/* get temperature in millidegrees */
int ci_get_temp(struct radeon_device *rdev)
@@ -120,20 +124,27 @@ int kv_get_temp(struct radeon_device *rdev)
*/
u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_INDEX, reg);
(void)RREG32(PCIE_INDEX);
r = RREG32(PCIE_DATA);
+ spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
return r;
}
void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_INDEX, reg);
(void)RREG32(PCIE_INDEX);
WREG32(PCIE_DATA, v);
(void)RREG32(PCIE_DATA);
+ spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
}
static const u32 spectre_rlc_save_restore_register_list[] =
@@ -2722,7 +2733,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
} else if ((rdev->pdev->device == 0x1309) ||
(rdev->pdev->device == 0x130A) ||
(rdev->pdev->device == 0x130D) ||
- (rdev->pdev->device == 0x1313)) {
+ (rdev->pdev->device == 0x1313) ||
+ (rdev->pdev->device == 0x131D)) {
rdev->config.cik.max_cu_per_sh = 6;
rdev->config.cik.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x1306) ||
@@ -2835,10 +2847,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
rdev->config.cik.tile_config |= (3 << 0);
break;
}
- if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
- rdev->config.cik.tile_config |= 1 << 4;
- else
- rdev->config.cik.tile_config |= 0 << 4;
+ rdev->config.cik.tile_config |=
+ ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
rdev->config.cik.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
rdev->config.cik.tile_config |=
@@ -4013,6 +4023,8 @@ static int cik_cp_resume(struct radeon_device *rdev)
{
int r;
+ cik_enable_gui_idle_interrupt(rdev, false);
+
r = cik_cp_load_microcode(rdev);
if (r)
return r;
@@ -4024,6 +4036,8 @@ static int cik_cp_resume(struct radeon_device *rdev)
if (r)
return r;
+ cik_enable_gui_idle_interrupt(rdev, true);
+
return 0;
}
@@ -4173,6 +4187,10 @@ static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* disable CG/PG */
+ cik_fini_pg(rdev);
+ cik_fini_cg(rdev);
+
/* stop the rlc */
cik_rlc_stop(rdev);
@@ -4442,8 +4460,8 @@ static int cik_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* size in MB on si */
- rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
- rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
si_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
@@ -4721,12 +4739,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
- char *block = (char *)&mc_client;
+ char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
+ (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
- printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
+ printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
protections, vmid, addr,
(status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
- block, mc_id);
+ block, mc_client, mc_id);
}
/**
@@ -5376,7 +5395,9 @@ static void cik_enable_hdp_ls(struct radeon_device *rdev,
void cik_update_cg(struct radeon_device *rdev,
u32 block, bool enable)
{
+
if (block & RADEON_CG_BLOCK_GFX) {
+ cik_enable_gui_idle_interrupt(rdev, false);
/* order matters! */
if (enable) {
cik_enable_mgcg(rdev, true);
@@ -5385,6 +5406,7 @@ void cik_update_cg(struct radeon_device *rdev,
cik_enable_cgcg(rdev, false);
cik_enable_mgcg(rdev, false);
}
+ cik_enable_gui_idle_interrupt(rdev, true);
}
if (block & RADEON_CG_BLOCK_MC) {
@@ -5541,7 +5563,7 @@ static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
{
u32 data, orig;
- if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
orig = data = RREG32(RLC_PG_CNTL);
data |= GFX_PG_ENABLE;
if (orig != data)
@@ -5805,7 +5827,7 @@ static void cik_init_pg(struct radeon_device *rdev)
if (rdev->pg_flags) {
cik_enable_sck_slowdown_on_pu(rdev, true);
cik_enable_sck_slowdown_on_pd(rdev, true);
- if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
cik_init_gfx_cgpg(rdev);
cik_enable_cp_pg(rdev, true);
cik_enable_gds_pg(rdev, true);
@@ -5819,7 +5841,7 @@ static void cik_fini_pg(struct radeon_device *rdev)
{
if (rdev->pg_flags) {
cik_update_gfx_pg(rdev, false);
- if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
cik_enable_cp_pg(rdev, false);
cik_enable_gds_pg(rdev, false);
}
@@ -5895,7 +5917,9 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
u32 tmp;
/* gfx ring */
- WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(CP_INT_CNTL_RING0) &
+ (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ WREG32(CP_INT_CNTL_RING0, tmp);
/* sdma */
tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
@@ -6036,8 +6060,7 @@ static int cik_irq_init(struct radeon_device *rdev)
*/
int cik_irq_set(struct radeon_device *rdev)
{
- u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE |
- PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
+ u32 cp_int_cntl;
u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
@@ -6058,6 +6081,10 @@ int cik_irq_set(struct radeon_device *rdev)
return 0;
}
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
+ (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
+
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 95a66db08d9b..91bb470de0a3 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2014,12 +2014,6 @@ int cypress_dpm_set_power_state(struct radeon_device *rdev)
if (eg_pi->pcie_performance_request)
cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
- ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("rv770_dpm_force_performance_level failed\n");
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 8953255e894b..85a69d2ea3d2 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -28,22 +28,30 @@
static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
u32 block_offset, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->end_idx_lock, flags);
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
+ spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
+
return r;
}
static void dce6_endpoint_wreg(struct radeon_device *rdev,
u32 block_offset, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->end_idx_lock, flags);
if (ASIC_IS_DCE8(rdev))
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
else
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
+ spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
}
#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg))
@@ -86,12 +94,12 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 offset = dig->afmt->offset;
- u32 id = dig->afmt->pin->id;
if (!dig->afmt->pin)
return;
- WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id));
+ WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
+ AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
}
void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 555164e270a7..b5c67a99dda9 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3131,7 +3131,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
- rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.max_hw_contexts = 4;
rdev->config.evergreen.sq_num_cf_insts = 2;
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index f71ce390aebe..f815c20640bd 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -288,8 +288,7 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
WREG32(HDMI_ACR_PACKET_CONTROL + offset,
- HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
- HDMI_ACR_SOURCE); /* select SW CTS value */
+ HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
evergreen_hdmi_update_ACR(encoder, mode->clock);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 8768fd6a1e27..4f6d2962767d 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1501,7 +1501,7 @@
* 6. COMMAND [29:22] | BYTE_COUNT [20:0]
*/
# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
- /* 0 - SRC_ADDR
+ /* 0 - DST_ADDR
* 1 - GDS
*/
# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
@@ -1516,7 +1516,7 @@
# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
/* COMMAND */
# define PACKET3_CP_DMA_DIS_WC (1 << 21)
-# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
/* 0 - none
* 1 - 8 in 16
* 2 - 8 in 32
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index ecd60809db4e..71399065db04 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -40,6 +40,7 @@ static int kv_calculate_dpm_settings(struct radeon_device *rdev);
static void kv_enable_new_levels(struct radeon_device *rdev);
static void kv_program_nbps_index_settings(struct radeon_device *rdev,
struct radeon_ps *new_rps);
+static int kv_set_enabled_level(struct radeon_device *rdev, u32 level);
static int kv_set_enabled_levels(struct radeon_device *rdev);
static int kv_force_dpm_highest(struct radeon_device *rdev);
static int kv_force_dpm_lowest(struct radeon_device *rdev);
@@ -519,7 +520,7 @@ static int kv_set_dpm_boot_state(struct radeon_device *rdev)
static void kv_program_vc(struct radeon_device *rdev)
{
- WREG32_SMC(CG_FTV_0, 0x3FFFC000);
+ WREG32_SMC(CG_FTV_0, 0x3FFFC100);
}
static void kv_clear_vc(struct radeon_device *rdev)
@@ -638,7 +639,10 @@ static int kv_force_lowest_valid(struct radeon_device *rdev)
static int kv_unforce_levels(struct radeon_device *rdev)
{
- return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
+ if (rdev->family == CHIP_KABINI)
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
+ else
+ return kv_set_enabled_levels(rdev);
}
static int kv_update_sclk_t(struct radeon_device *rdev)
@@ -667,9 +671,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev)
&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
if (table && table->count) {
- for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
- if ((table->entries[i].clk == pi->boot_pl.sclk) ||
- (i == 0))
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+ if (table->entries[i].clk == pi->boot_pl.sclk)
break;
}
@@ -682,9 +685,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev)
if (table->num_max_dpm_entries == 0)
return -EINVAL;
- for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
- if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) ||
- (i == 0))
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+ if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
break;
}
@@ -1078,6 +1080,13 @@ static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
}
+static void kv_reset_acp_boot_level(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->acp_boot_level = 0xff;
+}
+
static void kv_update_current_ps(struct radeon_device *rdev,
struct radeon_ps *rps)
{
@@ -1100,6 +1109,18 @@ static void kv_update_requested_ps(struct radeon_device *rdev,
pi->requested_rps.ps_priv = &pi->requested_ps;
}
+void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ if (pi->bapm_enable) {
+ ret = kv_smc_bapm_enable(rdev, enable);
+ if (ret)
+ DRM_ERROR("kv_smc_bapm_enable failed\n");
+ }
+}
+
int kv_dpm_enable(struct radeon_device *rdev)
{
struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1192,6 +1213,8 @@ int kv_dpm_enable(struct radeon_device *rdev)
return ret;
}
+ kv_reset_acp_boot_level(rdev);
+
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1203,6 +1226,12 @@ int kv_dpm_enable(struct radeon_device *rdev)
radeon_irq_set(rdev);
}
+ ret = kv_smc_bapm_enable(rdev, false);
+ if (ret) {
+ DRM_ERROR("kv_smc_bapm_enable failed\n");
+ return ret;
+ }
+
/* powerdown unused blocks for now */
kv_dpm_powergate_acp(rdev, true);
kv_dpm_powergate_samu(rdev, true);
@@ -1226,6 +1255,8 @@ void kv_dpm_disable(struct radeon_device *rdev)
RADEON_CG_BLOCK_BIF |
RADEON_CG_BLOCK_HDP), false);
+ kv_smc_bapm_enable(rdev, false);
+
/* powerup blocks */
kv_dpm_powergate_acp(rdev, false);
kv_dpm_powergate_samu(rdev, false);
@@ -1450,6 +1481,39 @@ static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
return kv_enable_samu_dpm(rdev, !gate);
}
+static u8 kv_get_acp_boot_level(struct radeon_device *rdev)
+{
+ u8 i;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->entries[i].clk >= 0) /* XXX */
+ break;
+ }
+
+ if (i >= table->count)
+ i = table->count - 1;
+
+ return i;
+}
+
+static void kv_update_acp_boot_level(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u8 acp_boot_level;
+
+ if (!pi->caps_stable_p_state) {
+ acp_boot_level = kv_get_acp_boot_level(rdev);
+ if (acp_boot_level != pi->acp_boot_level) {
+ pi->acp_boot_level = acp_boot_level;
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_ACPDPM_SetEnabledMask,
+ (1 << pi->acp_boot_level));
+ }
+ }
+}
+
static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
{
struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1461,7 +1525,7 @@ static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
if (pi->caps_stable_p_state)
pi->acp_boot_level = table->count - 1;
else
- pi->acp_boot_level = 0;
+ pi->acp_boot_level = kv_get_acp_boot_level(rdev);
ret = kv_copy_bytes_to_smc(rdev,
pi->dpm_table_start +
@@ -1588,13 +1652,11 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
}
}
- for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
- if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) ||
- (i == 0)) {
- pi->highest_valid = i;
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+ if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
break;
- }
}
+ pi->highest_valid = i;
if (pi->lowest_valid > pi->highest_valid) {
if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
@@ -1615,14 +1677,12 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
}
}
- for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
if (table->entries[i].sclk_frequency <=
- new_ps->levels[new_ps->num_levels - 1].sclk ||
- i == 0) {
- pi->highest_valid = i;
+ new_ps->levels[new_ps->num_levels - 1].sclk)
break;
- }
}
+ pi->highest_valid = i;
if (pi->lowest_valid > pi->highest_valid) {
if ((new_ps->levels[0].sclk -
@@ -1724,6 +1784,14 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
RADEON_CG_BLOCK_BIF |
RADEON_CG_BLOCK_HDP), false);
+ if (pi->bapm_enable) {
+ ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
+ if (ret) {
+ DRM_ERROR("kv_smc_bapm_enable failed\n");
+ return ret;
+ }
+ }
+
if (rdev->family == CHIP_KABINI) {
if (pi->enable_dpm) {
kv_set_valid_clock_range(rdev, new_ps);
@@ -1775,6 +1843,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
#endif
+ kv_update_acp_boot_level(rdev);
kv_update_sclk_t(rdev);
kv_enable_nb_dpm(rdev);
}
@@ -1785,7 +1854,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
RADEON_CG_BLOCK_BIF |
RADEON_CG_BLOCK_HDP), true);
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
return 0;
}
@@ -1806,12 +1874,23 @@ void kv_dpm_setup_asic(struct radeon_device *rdev)
void kv_dpm_reset_asic(struct radeon_device *rdev)
{
- kv_force_lowest_valid(rdev);
- kv_init_graphics_levels(rdev);
- kv_program_bootup_state(rdev);
- kv_upload_dpm_settings(rdev);
- kv_force_lowest_valid(rdev);
- kv_unforce_levels(rdev);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (rdev->family == CHIP_KABINI) {
+ kv_force_lowest_valid(rdev);
+ kv_init_graphics_levels(rdev);
+ kv_program_bootup_state(rdev);
+ kv_upload_dpm_settings(rdev);
+ kv_force_lowest_valid(rdev);
+ kv_unforce_levels(rdev);
+ } else {
+ kv_init_graphics_levels(rdev);
+ kv_program_bootup_state(rdev);
+ kv_freeze_sclk_dpm(rdev, true);
+ kv_upload_dpm_settings(rdev);
+ kv_freeze_sclk_dpm(rdev, false);
+ kv_set_enabled_level(rdev, pi->graphics_boot_level);
+ }
}
//XXX use sumo_dpm_display_configuration_changed
@@ -1871,12 +1950,15 @@ static int kv_force_dpm_highest(struct radeon_device *rdev)
if (ret)
return ret;
- for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) {
+ for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
if (enable_mask & (1 << i))
break;
}
- return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+ if (rdev->family == CHIP_KABINI)
+ return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+ else
+ return kv_set_enabled_level(rdev, i);
}
static int kv_force_dpm_lowest(struct radeon_device *rdev)
@@ -1893,7 +1975,10 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev)
break;
}
- return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+ if (rdev->family == CHIP_KABINI)
+ return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+ else
+ return kv_set_enabled_level(rdev, i);
}
static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
@@ -1911,9 +1996,9 @@ static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
if (!pi->caps_sclk_ds)
return 0;
- for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) {
+ for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
temp = sclk / sumo_get_sleep_divider_from_id(i);
- if ((temp >= min) || (i == 0))
+ if (temp >= min)
break;
}
@@ -2039,12 +2124,12 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
ps->dpmx_nb_ps_lo = 0x1;
ps->dpmx_nb_ps_hi = 0x0;
} else {
- ps->dpm0_pg_nb_ps_lo = 0x1;
+ ps->dpm0_pg_nb_ps_lo = 0x3;
ps->dpm0_pg_nb_ps_hi = 0x0;
- ps->dpmx_nb_ps_lo = 0x2;
- ps->dpmx_nb_ps_hi = 0x1;
+ ps->dpmx_nb_ps_lo = 0x3;
+ ps->dpmx_nb_ps_hi = 0x0;
- if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
+ if (pi->sys_info.nb_dpm_enable) {
force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
pi->disable_nb_ps3_in_battery;
@@ -2210,6 +2295,15 @@ static void kv_enable_new_levels(struct radeon_device *rdev)
}
}
+static int kv_set_enabled_level(struct radeon_device *rdev, u32 level)
+{
+ u32 new_mask = (1 << level);
+
+ return kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ new_mask);
+}
+
static int kv_set_enabled_levels(struct radeon_device *rdev)
{
struct kv_power_info *pi = kv_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h
index 32bb079572d7..8cef7525d7a8 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.h
+++ b/drivers/gpu/drm/radeon/kv_dpm.h
@@ -192,6 +192,7 @@ int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
u32 *value, u32 limit);
int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable);
+int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable);
int kv_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit);
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
index 34a226d7e34a..0000b59a6d05 100644
--- a/drivers/gpu/drm/radeon/kv_smc.c
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -107,6 +107,14 @@ int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable)
return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
}
+int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable)
+{
+ if (enable)
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
+ else
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
+}
+
int kv_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit)
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index f7b625c9e0e9..f26339028154 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -787,6 +787,7 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
bool disable_mclk_switching;
u32 mclk, sclk;
u16 vddc, vddci;
+ u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i;
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -813,6 +814,29 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
}
}
+ /* limit clocks to max supported clocks based on voltage dependency tables */
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ &max_sclk_vddc);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &max_mclk_vddci);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &max_mclk_vddc);
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (max_sclk_vddc) {
+ if (ps->performance_levels[i].sclk > max_sclk_vddc)
+ ps->performance_levels[i].sclk = max_sclk_vddc;
+ }
+ if (max_mclk_vddci) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddci)
+ ps->performance_levels[i].mclk = max_mclk_vddci;
+ }
+ if (max_mclk_vddc) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddc)
+ ps->performance_levels[i].mclk = max_mclk_vddc;
+ }
+ }
+
/* XXX validate the min clocks required for display */
if (disable_mclk_switching) {
@@ -3865,12 +3889,6 @@ int ni_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
- ret = ni_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("ni_dpm_force_performance_level failed\n");
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index 682842804bce..5670b8291285 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -163,6 +163,8 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
+#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120)
+#define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121)
#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 9fc61dd68bc0..d71333033b2b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2853,21 +2853,28 @@ static void r100_pll_errata_after_data(struct radeon_device *rdev)
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t data;
+ spin_lock_irqsave(&rdev->pll_idx_lock, flags);
WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
r100_pll_errata_after_index(rdev);
data = RREG32(RADEON_CLOCK_CNTL_DATA);
r100_pll_errata_after_data(rdev);
+ spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
return data;
}
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pll_idx_lock, flags);
WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
r100_pll_errata_after_index(rdev);
WREG32(RADEON_CLOCK_CNTL_DATA, v);
r100_pll_errata_after_data(rdev);
+ spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
}
static void r100_set_safe_registers(struct radeon_device *rdev)
@@ -2926,9 +2933,11 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
- for (j = 0; j <= count; j++) {
- i = (rdp + j) & ring->ptr_mask;
- seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+ if (ring->ready) {
+ for (j = 0; j <= count; j++) {
+ i = (rdp + j) & ring->ptr_mask;
+ seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 4e796ecf9ea4..6edf2b3a52b4 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -160,18 +160,25 @@ void r420_pipes_init(struct radeon_device *rdev)
u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
r = RREG32(R_0001FC_MC_IND_DATA);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
S_0001F8_MC_IND_WR_EN(1));
WREG32(R_0001FC_MC_IND_DATA, v);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
static void r420_debugfs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index ea4d3734e6d9..2a1b1876b431 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -119,6 +119,11 @@ u32 r600_get_xclk(struct radeon_device *rdev)
return rdev->clock.spll.reference_freq;
}
+int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ return 0;
+}
+
/* get temperature in millidegrees */
int rv6xx_get_temp(struct radeon_device *rdev)
{
@@ -1045,20 +1050,27 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t r;
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
r = RREG32(R_0028FC_MC_DATA);
WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
S_0028F8_MC_IND_WR_EN(1));
WREG32(R_0028FC_MC_DATA, v);
WREG32(R_0028F8_MC_INDEX, 0x7F);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
static void r600_mc_program(struct radeon_device *rdev)
@@ -2092,20 +2104,27 @@ static void r600_gpu_init(struct radeon_device *rdev)
*/
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
(void)RREG32(PCIE_PORT_INDEX);
r = RREG32(PCIE_PORT_DATA);
+ spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
return r;
}
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
(void)RREG32(PCIE_PORT_INDEX);
WREG32(PCIE_PORT_DATA, (v));
(void)RREG32(PCIE_PORT_DATA);
+ spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
}
/*
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index fa0de46fcc0d..5513d8f06252 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -1084,7 +1084,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(limits->entries[i].usVoltage);
+ le16_to_cpu(entry->usVoltage);
entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
}
@@ -1219,30 +1219,20 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
void r600_free_extended_power_table(struct radeon_device *rdev)
{
- if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries)
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
- if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries)
- kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
- if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries)
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
- if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries)
- kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries);
- if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries)
- kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
- if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
- kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries);
- if (rdev->pm.dpm.dyn_state.ppm_table)
- kfree(rdev->pm.dpm.dyn_state.ppm_table);
- if (rdev->pm.dpm.dyn_state.cac_tdp_table)
- kfree(rdev->pm.dpm.dyn_state.cac_tdp_table);
- if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
- kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries);
- if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
- kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries);
- if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
- kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries);
- if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
- kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries);
+ struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
+
+ kfree(dyn_state->vddc_dependency_on_sclk.entries);
+ kfree(dyn_state->vddci_dependency_on_mclk.entries);
+ kfree(dyn_state->vddc_dependency_on_mclk.entries);
+ kfree(dyn_state->mvdd_dependency_on_mclk.entries);
+ kfree(dyn_state->cac_leakage_table.entries);
+ kfree(dyn_state->phase_shedding_limits_table.entries);
+ kfree(dyn_state->ppm_table);
+ kfree(dyn_state->cac_tdp_table);
+ kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
}
enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f443010ce90b..5b729319f27b 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -57,15 +57,15 @@ enum r600_hdmi_iec_status_bits {
static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
/* 32kHz 44.1kHz 48kHz */
/* Clock N CTS N CTS N CTS */
- { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
+ { 25175, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
{ 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
{ 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
{ 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
{ 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
{ 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
- { 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
+ { 74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
{ 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
- { 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
+ { 148352, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
{ 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
{ 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
};
@@ -75,8 +75,15 @@ static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
*/
static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
{
- if (*CTS == 0)
- *CTS = clock * N / (128 * freq) * 1000;
+ u64 n;
+ u32 d;
+
+ if (*CTS == 0) {
+ n = (u64)clock * (u64)N * 1000ULL;
+ d = 128 * freq;
+ do_div(n, d);
+ *CTS = n;
+ }
DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
N, *CTS, freq);
}
@@ -257,10 +264,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
- if (ASIC_IS_DCE3(rdev)) {
- /* according to the reg specs, this should DCE3.2 only, but in
- * practice it seems to cover DCE3.0 as well.
- */
+ if (ASIC_IS_DCE32(rdev)) {
if (dig->dig_encoder == 0) {
dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
@@ -276,8 +280,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
}
+ } else if (ASIC_IS_DCE3(rdev)) {
+ /* according to the reg specs, this should DCE3.2 only, but in
+ * practice it seems to cover DCE3.0/3.1 as well.
+ */
+ if (dig->dig_encoder == 0) {
+ WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ } else {
+ WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+ }
} else {
- /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
+ /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
AUDIO_DTO_MODULE(clock / 10));
}
@@ -434,8 +451,8 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
}
WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
- HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
- HDMI0_ACR_SOURCE); /* select SW CTS value */
+ HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
+ HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
HDMI0_NULL_SEND | /* send null packets when required */
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 454f90a849e4..7b3c7b5932c5 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1040,7 +1040,7 @@
# define HDMI0_AVI_INFO_CONT (1 << 1)
# define HDMI0_AUDIO_INFO_SEND (1 << 4)
# define HDMI0_AUDIO_INFO_CONT (1 << 5)
-# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
# define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
# define HDMI0_MPEG_INFO_SEND (1 << 8)
# define HDMI0_MPEG_INFO_CONT (1 << 9)
@@ -1523,7 +1523,7 @@
*/
# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
/* COMMAND */
-# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
/* 0 - none
* 1 - 8 in 16
* 2 - 8 in 32
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ff8b564ce2b2..a400ac1c4147 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -181,7 +181,7 @@ extern int radeon_aspm;
#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16)
/* PG flags */
-#define RADEON_PG_SUPPORT_GFX_CG (1 << 0)
+#define RADEON_PG_SUPPORT_GFX_PG (1 << 0)
#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1)
#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2)
#define RADEON_PG_SUPPORT_UVD (1 << 3)
@@ -1778,6 +1778,7 @@ struct radeon_asic {
int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
bool (*vblank_too_short)(struct radeon_device *rdev);
void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
+ void (*enable_bapm)(struct radeon_device *rdev, bool enable);
} dpm;
/* pageflipping */
struct {
@@ -2110,6 +2111,28 @@ struct radeon_device {
resource_size_t rmmio_size;
/* protects concurrent MM_INDEX/DATA based register access */
spinlock_t mmio_idx_lock;
+ /* protects concurrent SMC based register access */
+ spinlock_t smc_idx_lock;
+ /* protects concurrent PLL register access */
+ spinlock_t pll_idx_lock;
+ /* protects concurrent MC register access */
+ spinlock_t mc_idx_lock;
+ /* protects concurrent PCIE register access */
+ spinlock_t pcie_idx_lock;
+ /* protects concurrent PCIE_PORT register access */
+ spinlock_t pciep_idx_lock;
+ /* protects concurrent PIF register access */
+ spinlock_t pif_idx_lock;
+ /* protects concurrent CG register access */
+ spinlock_t cg_idx_lock;
+ /* protects concurrent UVD register access */
+ spinlock_t uvd_idx_lock;
+ /* protects concurrent RCU register access */
+ spinlock_t rcu_idx_lock;
+ /* protects concurrent DIDT register access */
+ spinlock_t didt_idx_lock;
+ /* protects concurrent ENDPOINT (audio) register access */
+ spinlock_t end_idx_lock;
void __iomem *rmmio;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
@@ -2277,123 +2300,179 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
*/
static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t r;
+ spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
r = RREG32(RADEON_PCIE_DATA);
+ spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
return r;
}
static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
WREG32(RADEON_PCIE_DATA, (v));
+ spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
}
static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(TN_SMC_IND_INDEX_0, (reg));
r = RREG32(TN_SMC_IND_DATA_0);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return r;
}
static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(TN_SMC_IND_INDEX_0, (reg));
WREG32(TN_SMC_IND_DATA_0, (v));
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
}
static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
r = RREG32(R600_RCU_DATA);
+ spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
return r;
}
static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
WREG32(R600_RCU_DATA, (v));
+ spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
}
static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->cg_idx_lock, flags);
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
r = RREG32(EVERGREEN_CG_IND_DATA);
+ spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
return r;
}
static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->cg_idx_lock, flags);
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
WREG32(EVERGREEN_CG_IND_DATA, (v));
+ spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
}
static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
r = RREG32(EVERGREEN_PIF_PHY0_DATA);
+ spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
return r;
}
static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
+ spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
}
static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
r = RREG32(EVERGREEN_PIF_PHY1_DATA);
+ spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
return r;
}
static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
+ spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
}
static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
r = RREG32(R600_UVD_CTX_DATA);
+ spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
return r;
}
static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
WREG32(R600_UVD_CTX_DATA, (v));
+ spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
}
static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->didt_idx_lock, flags);
WREG32(CIK_DIDT_IND_INDEX, (reg));
r = RREG32(CIK_DIDT_IND_DATA);
+ spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
return r;
}
static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->didt_idx_lock, flags);
WREG32(CIK_DIDT_IND_INDEX, (reg));
WREG32(CIK_DIDT_IND_DATA, (v));
+ spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
}
void r100_pll_errata_after_index(struct radeon_device *rdev);
@@ -2569,6 +2648,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
+#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
/* Common functions */
/* AGP */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 630853b96841..8f7e04538fd6 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1004,6 +1004,8 @@ static struct radeon_asic rv6xx_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_cpdma,
@@ -1037,6 +1039,7 @@ static struct radeon_asic rv6xx_asic = {
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
.get_temperature = &rv6xx_get_temp,
+ .set_uvd_clocks = &r600_set_uvd_clocks,
},
.dpm = {
.init = &rv6xx_dpm_init,
@@ -1126,6 +1129,7 @@ static struct radeon_asic rs780_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.get_temperature = &rv6xx_get_temp,
+ .set_uvd_clocks = &r600_set_uvd_clocks,
},
.dpm = {
.init = &rs780_dpm_init,
@@ -1141,6 +1145,7 @@ static struct radeon_asic rs780_asic = {
.get_mclk = &rs780_dpm_get_mclk,
.print_power_state = &rs780_dpm_print_power_state,
.debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &rs780_dpm_force_performance_level,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
@@ -1791,6 +1796,7 @@ static struct radeon_asic trinity_asic = {
.print_power_state = &trinity_dpm_print_power_state,
.debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
.force_performance_level = &trinity_dpm_force_performance_level,
+ .enable_bapm = &trinity_dpm_enable_bapm,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -2166,6 +2172,7 @@ static struct radeon_asic kv_asic = {
.debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
.force_performance_level = &kv_dpm_force_performance_level,
.powergate_uvd = &kv_dpm_powergate_uvd,
+ .enable_bapm = &kv_dpm_enable_bapm,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -2390,7 +2397,7 @@ int radeon_asic_init(struct radeon_device *rdev)
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0 |
- /*RADEON_PG_SUPPORT_GFX_CG | */
+ /*RADEON_PG_SUPPORT_GFX_PG | */
RADEON_PG_SUPPORT_SDMA;
break;
case CHIP_OLAND:
@@ -2479,7 +2486,7 @@ int radeon_asic_init(struct radeon_device *rdev)
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0;
- /*RADEON_PG_SUPPORT_GFX_CG |
+ /*RADEON_PG_SUPPORT_GFX_PG |
RADEON_PG_SUPPORT_GFX_SMG |
RADEON_PG_SUPPORT_GFX_DMG |
RADEON_PG_SUPPORT_UVD |
@@ -2507,7 +2514,7 @@ int radeon_asic_init(struct radeon_device *rdev)
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0;
- /*RADEON_PG_SUPPORT_GFX_CG |
+ /*RADEON_PG_SUPPORT_GFX_PG |
RADEON_PG_SUPPORT_GFX_SMG |
RADEON_PG_SUPPORT_UVD |
RADEON_PG_SUPPORT_VCE |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 818bbe6b884b..70c29d5e080d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -389,6 +389,7 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
u32 r600_get_xclk(struct radeon_device *rdev);
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
int rv6xx_get_temp(struct radeon_device *rdev);
+int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
void r600_dpm_post_set_power_state(struct radeon_device *rdev);
/* r600 dma */
@@ -428,6 +429,8 @@ void rs780_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
+int rs780_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level);
/*
* rv770,rv730,rv710,rv740
@@ -625,6 +628,7 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r
struct seq_file *m);
int trinity_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
+void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -781,6 +785,7 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
int kv_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
/* uvd v1.0 */
uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 404e25d285ba..f79ee184ffd5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1367,6 +1367,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
uint16_t data_offset, size;
struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
+ struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign;
uint8_t frev, crev;
int i, num_indices;
@@ -1378,18 +1379,21 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
-
+ ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+ ((u8 *)&ss_info->asSS_Info[0]);
for (i = 0; i < num_indices; i++) {
- if (ss_info->asSS_Info[i].ucSS_Id == id) {
+ if (ss_assign->ucSS_Id == id) {
ss->percentage =
- le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
- ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
- ss->step = ss_info->asSS_Info[i].ucSS_Step;
- ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
- ss->range = ss_info->asSS_Info[i].ucSS_Range;
- ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+ le16_to_cpu(ss_assign->usSpreadSpectrumPercentage);
+ ss->type = ss_assign->ucSpreadSpectrumType;
+ ss->step = ss_assign->ucSS_Step;
+ ss->delay = ss_assign->ucSS_Delay;
+ ss->range = ss_assign->ucSS_Range;
+ ss->refdiv = ss_assign->ucRecommendedRef_Div;
return true;
}
+ ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+ ((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
}
}
return false;
@@ -1477,6 +1481,12 @@ union asic_ss_info {
struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
};
+union asic_ss_assignment {
+ struct _ATOM_ASIC_SS_ASSIGNMENT v1;
+ struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
+ struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
+};
+
bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id, u32 clock)
@@ -1485,6 +1495,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
uint16_t data_offset, size;
union asic_ss_info *ss_info;
+ union asic_ss_assignment *ss_assign;
uint8_t frev, crev;
int i, num_indices;
@@ -1509,45 +1520,52 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT);
+ ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
for (i = 0; i < num_indices; i++) {
- if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
- (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
+ if ((ss_assign->v1.ucClockIndication == id) &&
+ (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
ss->percentage =
- le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
- ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
- ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
+ le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
+ ss->type = ss_assign->v1.ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
return true;
}
+ ss_assign = (union asic_ss_assignment *)
+ ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
}
break;
case 2:
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+ ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
for (i = 0; i < num_indices; i++) {
- if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
- (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
+ if ((ss_assign->v2.ucClockIndication == id) &&
+ (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
ss->percentage =
- le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
- ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
- ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+ le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
+ ss->type = ss_assign->v2.ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
if ((crev == 2) &&
((id == ASIC_INTERNAL_ENGINE_SS) ||
(id == ASIC_INTERNAL_MEMORY_SS)))
ss->rate /= 100;
return true;
}
+ ss_assign = (union asic_ss_assignment *)
+ ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
}
break;
case 3:
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+ ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
for (i = 0; i < num_indices; i++) {
- if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
- (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
+ if ((ss_assign->v3.ucClockIndication == id) &&
+ (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
ss->percentage =
- le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
- ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
- ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+ le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
+ ss->type = ss_assign->v3.ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
if ((id == ASIC_INTERNAL_ENGINE_SS) ||
(id == ASIC_INTERNAL_MEMORY_SS))
ss->rate /= 100;
@@ -1555,6 +1573,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
return true;
}
+ ss_assign = (union asic_ss_assignment *)
+ ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
}
break;
default:
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2399f25ec037..79159b5da05b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -396,6 +396,21 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
}
}
+ if (property == rdev->mode_info.audio_property) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_connector->audio != val) {
+ radeon_connector->audio = val;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+ }
+
if (property == rdev->mode_info.underscan_property) {
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -1420,7 +1435,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
- /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
+ /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
if (radeon_ddc_probe(radeon_connector, false))
ret = connector_status_connected;
}
@@ -1489,6 +1504,24 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
.force = radeon_dvi_force,
};
+static const struct drm_connector_funcs radeon_edp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = radeon_lvds_set_property,
+ .destroy = radeon_dp_connector_destroy,
+ .force = radeon_dvi_force,
+};
+
+static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = radeon_lvds_set_property,
+ .destroy = radeon_dp_connector_destroy,
+ .force = radeon_dvi_force,
+};
+
void
radeon_add_atom_connector(struct drm_device *dev,
uint32_t connector_id,
@@ -1580,8 +1613,6 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1598,6 +1629,10 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
default:
+ drm_connector_init(dev, &radeon_connector->base,
+ &radeon_dp_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base,
+ &radeon_dp_connector_helper_funcs);
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
radeon_connector->dac_load_detect = true;
@@ -1610,6 +1645,10 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort:
+ drm_connector_init(dev, &radeon_connector->base,
+ &radeon_dp_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base,
+ &radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
@@ -1619,6 +1658,9 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_DISABLE);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1634,6 +1676,10 @@ radeon_add_atom_connector(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
+ drm_connector_init(dev, &radeon_connector->base,
+ &radeon_lvds_bridge_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base,
+ &radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
@@ -1708,6 +1754,11 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property,
0);
}
+ if (ASIC_IS_DCE2(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_DISABLE);
+ }
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
@@ -1748,6 +1799,11 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property,
0);
}
+ if (ASIC_IS_DCE2(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_DISABLE);
+ }
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1787,6 +1843,11 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property,
0);
}
+ if (ASIC_IS_DCE2(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_DISABLE);
+ }
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
connector->doublescan_allowed = false;
@@ -1797,7 +1858,7 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+ drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a56084410372..66c222836631 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -28,6 +28,7 @@
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_trace.h"
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
{
@@ -80,10 +81,13 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].lobj.bo = p->relocs[i].robj;
p->relocs[i].lobj.written = !!r->write_domain;
- /* the first reloc of an UVD job is the
- msg and that must be in VRAM */
- if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) {
- /* TODO: is this still needed for NI+ ? */
+ /* the first reloc of an UVD job is the msg and that must be in
+ VRAM, also but everything into VRAM on AGP cards to avoid
+ image corruptions */
+ if (p->ring == R600_RING_TYPE_UVD_INDEX &&
+ p->rdev->family < CHIP_PALM &&
+ (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
+
p->relocs[i].lobj.domain =
RADEON_GEM_DOMAIN_VRAM;
@@ -559,6 +563,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return r;
}
+ trace_radeon_cs(&parser);
+
r = radeon_cs_ib_chunk(rdev, &parser);
if (r) {
goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 16cb8792b1e6..841d0e09be3e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1249,6 +1249,17 @@ int radeon_device_init(struct radeon_device *rdev,
/* Registers mapping */
/* TODO: block userspace mapping of io register */
spin_lock_init(&rdev->mmio_idx_lock);
+ spin_lock_init(&rdev->smc_idx_lock);
+ spin_lock_init(&rdev->pll_idx_lock);
+ spin_lock_init(&rdev->mc_idx_lock);
+ spin_lock_init(&rdev->pcie_idx_lock);
+ spin_lock_init(&rdev->pciep_idx_lock);
+ spin_lock_init(&rdev->pif_idx_lock);
+ spin_lock_init(&rdev->cg_idx_lock);
+ spin_lock_init(&rdev->uvd_idx_lock);
+ spin_lock_init(&rdev->rcu_idx_lock);
+ spin_lock_init(&rdev->didt_idx_lock);
+ spin_lock_init(&rdev->end_idx_lock);
if (rdev->family >= CHIP_BONAIRE) {
rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
@@ -1309,13 +1320,22 @@ int radeon_device_init(struct radeon_device *rdev,
return r;
}
if ((radeon_testing & 1)) {
- radeon_test_moves(rdev);
+ if (rdev->accel_working)
+ radeon_test_moves(rdev);
+ else
+ DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
}
if ((radeon_testing & 2)) {
- radeon_test_syncing(rdev);
+ if (rdev->accel_working)
+ radeon_test_syncing(rdev);
+ else
+ DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
}
if (radeon_benchmarking) {
- radeon_benchmark(rdev, radeon_benchmarking);
+ if (rdev->accel_working)
+ radeon_benchmark(rdev, radeon_benchmarking);
+ else
+ DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b055bddaa94c..0d1aa050d41d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1172,6 +1172,12 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] =
{ UNDERSCAN_AUTO, "auto" },
};
+static struct drm_prop_enum_list radeon_audio_enum_list[] =
+{ { RADEON_AUDIO_DISABLE, "off" },
+ { RADEON_AUDIO_ENABLE, "on" },
+ { RADEON_AUDIO_AUTO, "auto" },
+};
+
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int sz;
@@ -1222,6 +1228,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
if (!rdev->mode_info.underscan_vborder_property)
return -ENOMEM;
+ sz = ARRAY_SIZE(radeon_audio_enum_list);
+ rdev->mode_info.audio_property =
+ drm_property_create_enum(rdev->ddev, 0,
+ "audio",
+ radeon_audio_enum_list, sz);
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb4445f55a96..cdd12dcd988b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -153,7 +153,7 @@ int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
-int radeon_audio = 0;
+int radeon_audio = 1;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = -1;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index d908d8d68f6b..ef63d3f00b2f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -247,6 +247,8 @@ struct radeon_mode_info {
struct drm_property *underscan_property;
struct drm_property *underscan_hborder_property;
struct drm_property *underscan_vborder_property;
+ /* audio */
+ struct drm_property *audio_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
@@ -471,6 +473,12 @@ struct radeon_router {
u8 cd_mux_state;
};
+enum radeon_connector_audio {
+ RADEON_AUDIO_DISABLE = 0,
+ RADEON_AUDIO_ENABLE = 1,
+ RADEON_AUDIO_AUTO = 2
+};
+
struct radeon_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -489,6 +497,7 @@ struct radeon_connector {
struct radeon_hpd hpd;
struct radeon_router router;
struct radeon_i2c_chan *router_bus;
+ enum radeon_connector_audio audio;
};
struct radeon_framebuffer {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index d7555369a3e5..4f6b7fc7ad3c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -67,7 +67,16 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
{
- if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ mutex_lock(&rdev->pm.mutex);
+ if (power_supply_is_system_supplied() > 0)
+ rdev->pm.dpm.ac_power = true;
+ else
+ rdev->pm.dpm.ac_power = false;
+ if (rdev->asic->dpm.enable_bapm)
+ radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+ mutex_unlock(&rdev->pm.mutex);
+ } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
radeon_pm_update_profile(rdev);
@@ -333,7 +342,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
int cp = rdev->pm.profile;
@@ -349,7 +358,7 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
mutex_lock(&rdev->pm.mutex);
@@ -383,7 +392,7 @@ static ssize_t radeon_get_pm_method(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
int pm = rdev->pm.pm_method;
@@ -397,7 +406,7 @@ static ssize_t radeon_set_pm_method(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
/* we don't support the legacy modes with dpm */
@@ -433,7 +442,7 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
@@ -447,7 +456,7 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
mutex_lock(&rdev->pm.mutex);
@@ -472,7 +481,7 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
@@ -486,7 +495,7 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
enum radeon_dpm_forced_level level;
int ret = 0;
@@ -524,7 +533,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
int temp;
@@ -536,6 +545,23 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
+static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct radeon_device *rdev = ddev->dev_private;
+ int hyst = to_sensor_dev_attr(attr)->index;
+ int temp;
+
+ if (hyst)
+ temp = rdev->pm.dpm.thermal.min_temp;
+ else
+ temp = rdev->pm.dpm.thermal.max_temp;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
static ssize_t radeon_hwmon_show_name(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -544,16 +570,37 @@ static ssize_t radeon_hwmon_show_name(struct device *dev,
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
&sensor_dev_attr_name.dev_attr.attr,
NULL
};
+static umode_t hwmon_attributes_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct radeon_device *rdev = ddev->dev_private;
+
+ /* Skip limit attributes if DPM is not enabled */
+ if (rdev->pm.pm_method != PM_METHOD_DPM &&
+ (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group hwmon_attrgroup = {
.attrs = hwmon_attributes,
+ .is_visible = hwmon_attributes_visible,
};
static int radeon_hwmon_init(struct radeon_device *rdev)
@@ -870,10 +917,13 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
radeon_dpm_post_set_power_state(rdev);
- /* force low perf level for thermal */
- if (rdev->pm.dpm.thermal_active &&
- rdev->asic->dpm.force_performance_level) {
- radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
+ if (rdev->asic->dpm.force_performance_level) {
+ if (rdev->pm.dpm.thermal_active)
+ /* force low perf level for thermal */
+ radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
+ else
+ /* otherwise, enable auto */
+ radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
}
done:
@@ -895,6 +945,8 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
if (enable) {
mutex_lock(&rdev->pm.mutex);
rdev->pm.dpm.uvd_active = true;
+ /* disable this for now */
+#if 0
if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
@@ -904,6 +956,7 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
else
+#endif
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
rdev->pm.dpm.state = dpm_state;
mutex_unlock(&rdev->pm.mutex);
@@ -952,7 +1005,7 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
{
/* set up the default clocks if the MC ucode is loaded */
if ((rdev->family >= CHIP_BARTS) &&
- (rdev->family <= CHIP_HAINAN) &&
+ (rdev->family <= CHIP_CAYMAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -996,7 +1049,7 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
if (ret) {
DRM_ERROR("radeon: dpm resume failed\n");
if ((rdev->family >= CHIP_BARTS) &&
- (rdev->family <= CHIP_HAINAN) &&
+ (rdev->family <= CHIP_CAYMAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1047,7 +1100,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
radeon_pm_init_profile(rdev);
/* set up the default clocks if the MC ucode is loaded */
if ((rdev->family >= CHIP_BARTS) &&
- (rdev->family <= CHIP_HAINAN) &&
+ (rdev->family <= CHIP_CAYMAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1102,9 +1155,10 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
{
int ret;
- /* default to performance state */
+ /* default to balanced state */
rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+ rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
rdev->pm.default_sclk = rdev->clock.default_sclk;
rdev->pm.default_mclk = rdev->clock.default_mclk;
rdev->pm.current_sclk = rdev->clock.default_sclk;
@@ -1132,7 +1186,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
if (ret) {
rdev->pm.dpm_enabled = false;
if ((rdev->family >= CHIP_BARTS) &&
- (rdev->family <= CHIP_HAINAN) &&
+ (rdev->family <= CHIP_CAYMAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 46a25f037b84..18254e1c3e71 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -839,9 +839,11 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
* packet that is the root issue
*/
i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
- for (j = 0; j <= (count + 32); j++) {
- seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
- i = (i + 1) & ring->ptr_mask;
+ if (ring->ready) {
+ for (j = 0; j <= (count + 32); j++) {
+ seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
+ i = (i + 1) & ring->ptr_mask;
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index f4d6bcee9006..12e8099a0823 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -36,8 +36,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
struct radeon_bo *vram_obj = NULL;
struct radeon_bo **gtt_obj = NULL;
uint64_t gtt_addr, vram_addr;
- unsigned i, n, size;
- int r, ring;
+ unsigned n, size;
+ int i, r, ring;
switch (flag) {
case RADEON_TEST_COPY_DMA:
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index eafd8160a155..f7e367815964 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -27,6 +27,26 @@ TRACE_EVENT(radeon_bo_create,
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
);
+TRACE_EVENT(radeon_cs,
+ TP_PROTO(struct radeon_cs_parser *p),
+ TP_ARGS(p),
+ TP_STRUCT__entry(
+ __field(u32, ring)
+ __field(u32, dw)
+ __field(u32, fences)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = p->ring;
+ __entry->dw = p->chunks[p->chunk_ib_idx].length_dw;
+ __entry->fences = radeon_fence_count_emitted(
+ p->rdev, p->ring);
+ ),
+ TP_printk("ring=%u, dw=%u, fences=%u",
+ __entry->ring, __entry->dw,
+ __entry->fences)
+);
+
DECLARE_EVENT_CLASS(radeon_fence_request,
TP_PROTO(struct drm_device *dev, u32 seqno),
@@ -53,13 +73,6 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
TP_ARGS(dev, seqno)
);
-DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno)
-);
-
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
TP_PROTO(struct drm_device *dev, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 1a01bbff9bfa..4f2e73f79638 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -476,8 +476,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
return -EINVAL;
}
- /* TODO: is this still necessary on NI+ ? */
- if ((cmd == 0 || cmd == 0x3) &&
+ if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) &&
(start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
start, end);
@@ -799,7 +798,8 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
(rdev->pm.dpm.hd != hd)) {
rdev->pm.dpm.sd = sd;
rdev->pm.dpm.hd = hd;
- streams_changed = true;
+ /* disable this for now */
+ /*streams_changed = true;*/
}
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index b8074a8ec75a..9566b5940a5a 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -274,19 +274,26 @@ static void rs400_mc_init(struct radeon_device *rdev)
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t r;
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(RS480_NB_MC_INDEX, reg & 0xff);
r = RREG32(RS480_NB_MC_DATA);
WREG32(RS480_NB_MC_INDEX, 0xff);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
WREG32(RS480_NB_MC_DATA, (v));
WREG32(RS480_NB_MC_INDEX, 0xff);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 670b555d2ca2..6acba8017b9a 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -847,16 +847,26 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
S_000070_MC_IND_CITF_ARB0(1));
- return RREG32(R_000074_MC_IND_DATA);
+ r = RREG32(R_000074_MC_IND_DATA);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+ return r;
}
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
WREG32(R_000074_MC_IND_DATA, v);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
static void rs600_debugfs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index d8ddfb34545d..1447d794c22a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -631,20 +631,27 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t r;
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
r = RREG32(R_00007C_MC_DATA);
WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
S_000078_MC_IND_WR_EN(1));
WREG32(R_00007C_MC_DATA, v);
WREG32(R_000078_MC_INDEX, 0x7F);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
static void rs690_mc_program(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index d1a1ce73bd45..6af8505cf4d2 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -62,9 +62,7 @@ static void rs780_get_pm_mode_parameters(struct radeon_device *rdev)
radeon_crtc = to_radeon_crtc(crtc);
pi->crtc_id = radeon_crtc->crtc_id;
if (crtc->mode.htotal && crtc->mode.vtotal)
- pi->refresh_rate =
- (crtc->mode.clock * 1000) /
- (crtc->mode.htotal * crtc->mode.vtotal);
+ pi->refresh_rate = drm_mode_vrefresh(&crtc->mode);
break;
}
}
@@ -376,9 +374,8 @@ static void rs780_disable_vbios_powersaving(struct radeon_device *rdev)
WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000);
}
-static void rs780_force_voltage_to_high(struct radeon_device *rdev)
+static void rs780_force_voltage(struct radeon_device *rdev, u16 voltage)
{
- struct igp_power_info *pi = rs780_get_pi(rdev);
struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) &&
@@ -390,7 +387,7 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev)
udelay(1);
WREG32_P(FVTHROT_PWM_CTRL_REG0,
- STARTING_PWM_HIGHTIME(pi->max_voltage),
+ STARTING_PWM_HIGHTIME(voltage),
~STARTING_PWM_HIGHTIME_MASK);
WREG32_P(FVTHROT_PWM_CTRL_REG0,
@@ -404,6 +401,26 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev)
WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
}
+static void rs780_force_fbdiv(struct radeon_device *rdev, u32 fb_div)
+{
+ struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
+
+ if (current_state->sclk_low == current_state->sclk_high)
+ return;
+
+ WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
+
+ WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fb_div),
+ ~FORCED_FEEDBACK_DIV_MASK);
+ WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fb_div),
+ ~STARTING_FEEDBACK_DIV_MASK);
+ WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
+
+ udelay(100);
+
+ WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
+}
+
static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
struct radeon_ps *new_ps,
struct radeon_ps *old_ps)
@@ -432,17 +449,13 @@ static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
if (ret)
return ret;
- WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
-
- WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(max_dividers.fb_div),
- ~FORCED_FEEDBACK_DIV_MASK);
- WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(max_dividers.fb_div),
- ~STARTING_FEEDBACK_DIV_MASK);
- WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
-
- udelay(100);
+ if ((min_dividers.ref_div != max_dividers.ref_div) ||
+ (min_dividers.post_div != max_dividers.post_div) ||
+ (max_dividers.ref_div != current_max_dividers.ref_div) ||
+ (max_dividers.post_div != current_max_dividers.post_div))
+ return -EINVAL;
- WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
+ rs780_force_fbdiv(rdev, max_dividers.fb_div);
if (max_dividers.fb_div > min_dividers.fb_div) {
WREG32_P(FVTHROT_FBDIV_REG0,
@@ -486,6 +499,9 @@ static void rs780_activate_engine_clk_scaling(struct radeon_device *rdev,
(new_state->sclk_low == old_state->sclk_low))
return;
+ if (new_state->sclk_high == new_state->sclk_low)
+ return;
+
rs780_clk_scaling_enable(rdev, true);
}
@@ -649,7 +665,7 @@ int rs780_dpm_set_power_state(struct radeon_device *rdev)
rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
if (pi->voltage_control) {
- rs780_force_voltage_to_high(rdev);
+ rs780_force_voltage(rdev, pi->max_voltage);
mdelay(5);
}
@@ -717,14 +733,18 @@ static void rs780_parse_pplib_non_clock_info(struct radeon_device *rdev,
if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
- } else if (r600_is_uvd_state(rps->class, rps->class2)) {
- rps->vclk = RS780_DEFAULT_VCLK_FREQ;
- rps->dclk = RS780_DEFAULT_DCLK_FREQ;
} else {
rps->vclk = 0;
rps->dclk = 0;
}
+ if (r600_is_uvd_state(rps->class, rps->class2)) {
+ if ((rps->vclk == 0) || (rps->dclk == 0)) {
+ rps->vclk = RS780_DEFAULT_VCLK_FREQ;
+ rps->dclk = RS780_DEFAULT_DCLK_FREQ;
+ }
+ }
+
if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
rdev->pm.dpm.boot_ps = rps;
if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
@@ -986,3 +1006,55 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n",
ps->sclk_high, ps->max_voltage);
}
+
+int rs780_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level)
+{
+ struct igp_power_info *pi = rs780_get_pi(rdev);
+ struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct igp_ps *ps = rs780_get_ps(rps);
+ struct atom_clock_dividers dividers;
+ int ret;
+
+ rs780_clk_scaling_enable(rdev, false);
+ rs780_voltage_scaling_enable(rdev, false);
+
+ if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+ if (pi->voltage_control)
+ rs780_force_voltage(rdev, pi->max_voltage);
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ ps->sclk_high, false, &dividers);
+ if (ret)
+ return ret;
+
+ rs780_force_fbdiv(rdev, dividers.fb_div);
+ } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ ps->sclk_low, false, &dividers);
+ if (ret)
+ return ret;
+
+ rs780_force_fbdiv(rdev, dividers.fb_div);
+
+ if (pi->voltage_control)
+ rs780_force_voltage(rdev, pi->min_voltage);
+ } else {
+ if (pi->voltage_control)
+ rs780_force_voltage(rdev, pi->max_voltage);
+
+ if (ps->sclk_high != ps->sclk_low) {
+ WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV);
+ rs780_clk_scaling_enable(rdev, true);
+ }
+
+ if (pi->voltage_control) {
+ rs780_voltage_scaling_enable(rdev, true);
+ rs780_enable_voltage_scaling(rdev, rps);
+ }
+ }
+
+ rdev->pm.dpm.forced_level = level;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 8ea1573ae820..873eb4b193b4 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -209,19 +209,27 @@ static void rv515_mc_init(struct radeon_device *rdev)
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t r;
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
r = RREG32(MC_IND_DATA);
WREG32(MC_IND_INDEX, 0);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+
return r;
}
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
WREG32(MC_IND_DATA, (v));
WREG32(MC_IND_INDEX, 0);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index ab1f2016f21e..5811d277a36a 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1758,8 +1758,6 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 8cbb85dae5aa..913b025ae9b3 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2064,12 +2064,6 @@ int rv770_dpm_set_power_state(struct radeon_device *rdev)
rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps);
rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
- ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("rv770_dpm_force_performance_level failed\n");
- return ret;
- }
-
return 0;
}
@@ -2147,14 +2141,18 @@ static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
- } else if (r600_is_uvd_state(rps->class, rps->class2)) {
- rps->vclk = RV770_DEFAULT_VCLK_FREQ;
- rps->dclk = RV770_DEFAULT_DCLK_FREQ;
} else {
rps->vclk = 0;
rps->dclk = 0;
}
+ if (r600_is_uvd_state(rps->class, rps->class2)) {
+ if ((rps->vclk == 0) || (rps->dclk == 0)) {
+ rps->vclk = RV770_DEFAULT_VCLK_FREQ;
+ rps->dclk = RV770_DEFAULT_DCLK_FREQ;
+ }
+ }
+
if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
rdev->pm.dpm.boot_ps = rps;
if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c
index ab95da570215..b2a224407365 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.c
+++ b/drivers/gpu/drm/radeon/rv770_smc.c
@@ -274,8 +274,8 @@ static const u8 cayman_smc_int_vectors[] =
0x08, 0x72, 0x08, 0x72
};
-int rv770_set_smc_sram_address(struct radeon_device *rdev,
- u16 smc_address, u16 limit)
+static int rv770_set_smc_sram_address(struct radeon_device *rdev,
+ u16 smc_address, u16 limit)
{
u32 addr;
@@ -296,9 +296,10 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
u16 smc_start_address, const u8 *src,
u16 byte_count, u16 limit)
{
+ unsigned long flags;
u32 data, original_data, extra_shift;
u16 addr;
- int ret;
+ int ret = 0;
if (smc_start_address & 3)
return -EINVAL;
@@ -307,13 +308,14 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
addr = smc_start_address;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
while (byte_count >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
ret = rv770_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_SRAM_DATA, data);
@@ -328,7 +330,7 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
ret = rv770_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
original_data = RREG32(SMC_SRAM_DATA);
@@ -346,12 +348,15 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
ret = rv770_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_SRAM_DATA, data);
}
- return 0;
+done:
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+ return ret;
}
static int rv770_program_interrupt_vectors(struct radeon_device *rdev,
@@ -461,12 +466,15 @@ PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev)
static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit)
{
+ unsigned long flags;
u16 i;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
for (i = 0; i < limit; i += 4) {
rv770_set_smc_sram_address(rdev, i, limit);
WREG32(SMC_SRAM_DATA, 0);
}
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
}
int rv770_load_smc_ucode(struct radeon_device *rdev,
@@ -595,27 +603,29 @@ int rv770_load_smc_ucode(struct radeon_device *rdev,
int rv770_read_smc_sram_dword(struct radeon_device *rdev,
u16 smc_address, u32 *value, u16 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
-
- *value = RREG32(SMC_SRAM_DATA);
+ if (ret == 0)
+ *value = RREG32(SMC_SRAM_DATA);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- return 0;
+ return ret;
}
int rv770_write_smc_sram_dword(struct radeon_device *rdev,
u16 smc_address, u32 value, u16 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
+ if (ret == 0)
+ WREG32(SMC_SRAM_DATA, value);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- WREG32(SMC_SRAM_DATA, value);
-
- return 0;
+ return ret;
}
diff --git a/drivers/gpu/drm/radeon/rv770_smc.h b/drivers/gpu/drm/radeon/rv770_smc.h
index f78d92a4b325..3b2c963c4880 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.h
+++ b/drivers/gpu/drm/radeon/rv770_smc.h
@@ -187,8 +187,6 @@ typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
#define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C
#define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0
-int rv770_set_smc_sram_address(struct radeon_device *rdev,
- u16 smc_address, u16 limit);
int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
u16 smc_start_address, const u8 *src,
u16 byte_count, u16 limit);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9fe60e542922..1ae277152cc7 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -852,7 +852,7 @@
#define AFMT_VBI_PACKET_CONTROL 0x7608
# define AFMT_GENERIC0_UPDATE (1 << 2)
#define AFMT_INFOFRAME_CONTROL0 0x760c
-# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
# define AFMT_MPEG_INFO_UPDATE (1 << 10)
#define AFMT_GENERIC0_7 0x7610
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 3e23b757dcfa..d4652af425b8 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -83,6 +83,11 @@ extern void si_dma_vm_set_page(struct radeon_device *rdev,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
+static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
+ bool enable);
+static void si_fini_pg(struct radeon_device *rdev);
+static void si_fini_cg(struct radeon_device *rdev);
+static void si_rlc_stop(struct radeon_device *rdev);
static const u32 verde_rlc_save_restore_register_list[] =
{
@@ -3386,6 +3391,8 @@ static int si_cp_resume(struct radeon_device *rdev)
u32 rb_bufsz;
int r;
+ si_enable_gui_idle_interrupt(rdev, false);
+
WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
@@ -3501,6 +3508,8 @@ static int si_cp_resume(struct radeon_device *rdev)
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
}
+ si_enable_gui_idle_interrupt(rdev, true);
+
return 0;
}
@@ -3602,6 +3611,13 @@ static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* disable PG/CG */
+ si_fini_pg(rdev);
+ si_fini_cg(rdev);
+
+ /* stop the rlc */
+ si_rlc_stop(rdev);
+
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
@@ -4888,7 +4904,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev,
{
u32 tmp;
- if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
WREG32(RLC_TTOP_D, tmp);
@@ -5250,6 +5266,7 @@ void si_update_cg(struct radeon_device *rdev,
u32 block, bool enable)
{
if (block & RADEON_CG_BLOCK_GFX) {
+ si_enable_gui_idle_interrupt(rdev, false);
/* order matters! */
if (enable) {
si_enable_mgcg(rdev, true);
@@ -5258,6 +5275,7 @@ void si_update_cg(struct radeon_device *rdev,
si_enable_cgcg(rdev, false);
si_enable_mgcg(rdev, false);
}
+ si_enable_gui_idle_interrupt(rdev, true);
}
if (block & RADEON_CG_BLOCK_MC) {
@@ -5408,7 +5426,7 @@ static void si_init_pg(struct radeon_device *rdev)
si_init_dma_pg(rdev);
}
si_init_ao_cu_mask(rdev);
- if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
si_init_gfx_cgpg(rdev);
}
si_enable_dma_pg(rdev, true);
@@ -5560,7 +5578,9 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
- WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(CP_INT_CNTL_RING0) &
+ (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ WREG32(CP_INT_CNTL_RING0, tmp);
WREG32(CP_INT_CNTL_RING1, 0);
WREG32(CP_INT_CNTL_RING2, 0);
tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
@@ -5685,7 +5705,7 @@ static int si_irq_init(struct radeon_device *rdev)
int si_irq_set(struct radeon_device *rdev)
{
- u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 cp_int_cntl;
u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
@@ -5706,6 +5726,9 @@ int si_irq_set(struct radeon_device *rdev)
return 0;
}
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
+ (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+
if (!ASIC_IS_NODCE(rdev)) {
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 5be9b4e72350..2332aa1bf93c 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2910,6 +2910,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
bool disable_sclk_switching = false;
u32 mclk, sclk;
u16 vddc, vddci;
+ u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i;
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -2943,6 +2944,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
}
+ /* limit clocks to max supported clocks based on voltage dependency tables */
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ &max_sclk_vddc);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &max_mclk_vddci);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &max_mclk_vddc);
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (max_sclk_vddc) {
+ if (ps->performance_levels[i].sclk > max_sclk_vddc)
+ ps->performance_levels[i].sclk = max_sclk_vddc;
+ }
+ if (max_mclk_vddci) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddci)
+ ps->performance_levels[i].mclk = max_mclk_vddci;
+ }
+ if (max_mclk_vddc) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddc)
+ ps->performance_levels[i].mclk = max_mclk_vddc;
+ }
+ }
+
/* XXX validate the min clocks required for display */
if (disable_mclk_switching) {
@@ -5184,7 +5208,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
}
j++;
- if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
if (!pi->mem_gddr5) {
@@ -5194,7 +5218,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
table->mc_reg_table_entry[k].mc_data[j] =
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
j++;
- if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
}
break;
@@ -5207,7 +5231,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
(temp_reg & 0xffff0000) |
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
j++;
- if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
break;
default:
@@ -6075,12 +6099,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
- ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("si_dpm_force_performance_level failed\n");
- return ret;
- }
-
si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
RADEON_CG_BLOCK_MC |
RADEON_CG_BLOCK_SDMA |
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index 5f524c0a541e..d422a1cbf727 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -29,8 +29,8 @@
#include "ppsmc.h"
#include "radeon_ucode.h"
-int si_set_smc_sram_address(struct radeon_device *rdev,
- u32 smc_address, u32 limit)
+static int si_set_smc_sram_address(struct radeon_device *rdev,
+ u32 smc_address, u32 limit)
{
if (smc_address & 3)
return -EINVAL;
@@ -47,7 +47,8 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit)
{
- int ret;
+ unsigned long flags;
+ int ret = 0;
u32 data, original_data, addr, extra_shift;
if (smc_start_address & 3)
@@ -57,13 +58,14 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
addr = smc_start_address;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
while (byte_count >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
ret = si_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_IND_DATA_0, data);
@@ -78,7 +80,7 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
ret = si_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
original_data = RREG32(SMC_IND_DATA_0);
@@ -96,11 +98,15 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
ret = si_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_IND_DATA_0, data);
}
- return 0;
+
+done:
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+ return ret;
}
void si_start_smc(struct radeon_device *rdev)
@@ -203,6 +209,7 @@ PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev)
int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
{
+ unsigned long flags;
u32 ucode_start_address;
u32 ucode_size;
const u8 *src;
@@ -241,6 +248,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
return -EINVAL;
src = (const u8 *)rdev->smc_fw->data;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
while (ucode_size >= 4) {
@@ -253,6 +261,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
ucode_size -= 4;
}
WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return 0;
}
@@ -260,25 +269,29 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
u32 *value, u32 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
+ if (ret == 0)
+ *value = RREG32(SMC_IND_DATA_0);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- *value = RREG32(SMC_IND_DATA_0);
- return 0;
+ return ret;
}
int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
u32 value, u32 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
+ if (ret == 0)
+ WREG32(SMC_IND_DATA_0, value);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- WREG32(SMC_IND_DATA_0, value);
- return 0;
+ return ret;
}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 52d2ab6b67a0..7e2e0ea66a00 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1553,7 +1553,7 @@
* 6. COMMAND [30:21] | BYTE_COUNT [20:0]
*/
# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
- /* 0 - SRC_ADDR
+ /* 0 - DST_ADDR
* 1 - GDS
*/
# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
@@ -1568,7 +1568,7 @@
# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
/* COMMAND */
# define PACKET3_CP_DMA_DIS_WC (1 << 21)
-# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
/* 0 - none
* 1 - 8 in 16
* 2 - 8 in 32
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 864761c0120e..96ea6db8bf57 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1319,8 +1319,6 @@ int sumo_dpm_set_power_state(struct radeon_device *rdev)
if (pi->enable_dpm)
sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index b07b7b8f1aff..9364129ba292 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1068,6 +1068,17 @@ static void trinity_update_requested_ps(struct radeon_device *rdev,
pi->requested_rps.ps_priv = &pi->requested_ps;
}
+void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
+{
+ struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+ if (pi->enable_bapm) {
+ trinity_acquire_mutex(rdev);
+ trinity_dpm_bapm_enable(rdev, enable);
+ trinity_release_mutex(rdev);
+ }
+}
+
int trinity_dpm_enable(struct radeon_device *rdev)
{
struct trinity_power_info *pi = trinity_get_pi(rdev);
@@ -1091,6 +1102,7 @@ int trinity_dpm_enable(struct radeon_device *rdev)
trinity_program_sclk_dpm(rdev);
trinity_start_dpm(rdev);
trinity_wait_for_dpm_enabled(rdev);
+ trinity_dpm_bapm_enable(rdev, false);
trinity_release_mutex(rdev);
if (rdev->irq.installed &&
@@ -1116,6 +1128,7 @@ void trinity_dpm_disable(struct radeon_device *rdev)
trinity_release_mutex(rdev);
return;
}
+ trinity_dpm_bapm_enable(rdev, false);
trinity_disable_clock_power_gating(rdev);
sumo_clear_vc(rdev);
trinity_wait_for_level_0(rdev);
@@ -1212,6 +1225,8 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev)
trinity_acquire_mutex(rdev);
if (pi->enable_dpm) {
+ if (pi->enable_bapm)
+ trinity_dpm_bapm_enable(rdev, rdev->pm.dpm.ac_power);
trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
trinity_enable_power_level_0(rdev);
trinity_force_level_0(rdev);
@@ -1221,7 +1236,6 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev)
trinity_force_level_0(rdev);
trinity_unforce_levels(rdev);
trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
}
trinity_release_mutex(rdev);
@@ -1854,6 +1868,7 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
+ pi->enable_bapm = false;
pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h
index e82df071f8b3..c261657750ca 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.h
+++ b/drivers/gpu/drm/radeon/trinity_dpm.h
@@ -108,6 +108,7 @@ struct trinity_power_info {
bool enable_auto_thermal_throttling;
bool enable_dpm;
bool enable_sclk_ds;
+ bool enable_bapm;
bool uvd_dpm;
struct radeon_ps current_rps;
struct trinity_ps current_ps;
@@ -118,6 +119,7 @@ struct trinity_power_info {
#define TRINITY_AT_DFLT 30
/* trinity_smc.c */
+int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable);
int trinity_dpm_config(struct radeon_device *rdev, bool enable);
int trinity_uvd_dpm_config(struct radeon_device *rdev);
int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
index a42d89f1830c..9672bcbc7312 100644
--- a/drivers/gpu/drm/radeon/trinity_smc.c
+++ b/drivers/gpu/drm/radeon/trinity_smc.c
@@ -56,6 +56,14 @@ static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id)
return 0;
}
+int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable)
+{
+ if (enable)
+ return trinity_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
+ else
+ return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
+}
+
int trinity_dpm_config(struct radeon_device *rdev, bool enable)
{
if (enable)
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 7266805d9786..3100fa9cb52f 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev)
/* enable VCPU clock */
WREG32(UVD_VCPU_CNTL, 1 << 9);
- /* enable UMC */
- WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+ /* enable UMC and NC0 */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13)));
/* boot up the VCPU */
WREG32(UVD_SOFT_RESET, 0);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 58a5f3261c0b..a868176c258a 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -218,7 +218,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key)
{
struct ttm_object_device *tdev = tfile->tdev;
- struct ttm_base_object *base;
+ struct ttm_base_object *uninitialized_var(base);
struct drm_hash_item *hash;
int ret;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index bd2a3b40cd12..863bef9f9234 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -377,28 +377,26 @@ out:
return nr_free;
}
-/* Get good estimation how many pages are free in pools */
-static int ttm_pool_get_num_unused_pages(void)
-{
- unsigned i;
- int total = 0;
- for (i = 0; i < NUM_POOLS; ++i)
- total += _manager->pools[i].npages;
-
- return total;
-}
-
/**
* Callback for mm to request pool to reduce number of page held.
+ *
+ * XXX: (dchinner) Deadlock warning!
+ *
+ * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
+ * this can deadlock when called a sc->gfp_mask that is not equal to
+ * GFP_KERNEL.
+ *
+ * This code is crying out for a shrinker per pool....
*/
-static int ttm_pool_mm_shrink(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long
+ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned i;
unsigned pool_offset = atomic_add_return(1, &start_pool);
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
+ unsigned long freed = 0;
pool_offset = pool_offset % NUM_POOLS;
/* select start pool in round robin fashion */
@@ -408,14 +406,28 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free);
+ freed += nr_free - shrink_pages;
}
- /* return estimated number of unused pages in pool */
- return ttm_pool_get_num_unused_pages();
+ return freed;
+}
+
+
+static unsigned long
+ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned i;
+ unsigned long count = 0;
+
+ for (i = 0; i < NUM_POOLS; ++i)
+ count += _manager->pools[i].npages;
+
+ return count;
}
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
- manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
+ manager->mm_shrink.count_objects = ttm_pool_shrink_count;
+ manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index b8b394319b45..7957beeeaf73 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -918,19 +918,6 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
}
EXPORT_SYMBOL_GPL(ttm_dma_populate);
-/* Get good estimation how many pages are free in pools */
-static int ttm_dma_pool_get_num_unused_pages(void)
-{
- struct device_pools *p;
- unsigned total = 0;
-
- mutex_lock(&_manager->lock);
- list_for_each_entry(p, &_manager->pools, pools)
- total += p->pool->npages_free;
- mutex_unlock(&_manager->lock);
- return total;
-}
-
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
@@ -1002,18 +989,29 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
/**
* Callback for mm to request pool to reduce number of page held.
+ *
+ * XXX: (dchinner) Deadlock warning!
+ *
+ * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
+ * needs to be paid to sc->gfp_mask to determine if this can be done or not.
+ * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
+ * bad.
+ *
+ * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
+ * shrinkers
*/
-static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long
+ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned idx = 0;
unsigned pool_offset = atomic_add_return(1, &start_pool);
unsigned shrink_pages = sc->nr_to_scan;
struct device_pools *p;
+ unsigned long freed = 0;
if (list_empty(&_manager->pools))
- return 0;
+ return SHRINK_STOP;
mutex_lock(&_manager->lock);
pool_offset = pool_offset % _manager->npools;
@@ -1029,18 +1027,33 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
continue;
nr_free = shrink_pages;
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+ freed += nr_free - shrink_pages;
+
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
p->pool->dev_name, p->pool->name, current->pid,
nr_free, shrink_pages);
}
mutex_unlock(&_manager->lock);
- /* return estimated number of unused pages in pool */
- return ttm_dma_pool_get_num_unused_pages();
+ return freed;
+}
+
+static unsigned long
+ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct device_pools *p;
+ unsigned long count = 0;
+
+ mutex_lock(&_manager->lock);
+ list_for_each_entry(p, &_manager->pools, pools)
+ count += p->pool->npages_free;
+ mutex_unlock(&_manager->lock);
+ return count;
}
static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
- manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
+ manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
+ manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 5e93a52d4f2c..210d50365162 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -170,7 +170,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm_tt_unbind(ttm);
}
- if (likely(ttm->pages != NULL)) {
+ if (ttm->state == tt_unbound) {
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8dbe9d0ae9a7..8bf646183bac 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -97,7 +97,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
switch (ret) {
case -EAGAIN:
- set_need_resched();
case 0:
case -ERESTARTSYS:
return VM_FAULT_NOPAGE;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index e893f6e1937d..af0259708358 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -257,9 +257,9 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
if (!conflict->bridge_has_one_vga) {
vga_irq_set_state(conflict, false);
flags |= PCI_VGA_STATE_CHANGE_DECODES;
- if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
+ if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
pci_bits |= PCI_COMMAND_MEMORY;
- if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
+ if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
pci_bits |= PCI_COMMAND_IO;
}
@@ -267,11 +267,11 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
- conflict->owns &= ~lwants;
+ conflict->owns &= ~match;
/* If he also owned non-legacy, that is no longer the case */
- if (lwants & VGA_RSRC_LEGACY_MEM)
+ if (match & VGA_RSRC_LEGACY_MEM)
conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
- if (lwants & VGA_RSRC_LEGACY_IO)
+ if (match & VGA_RSRC_LEGACY_IO)
conflict->owns &= ~VGA_RSRC_NORMAL_IO;
}
@@ -644,10 +644,12 @@ bail:
static inline void vga_update_device_decodes(struct vga_device *vgadev,
int new_decodes)
{
- int old_decodes;
- struct vga_device *new_vgadev, *conflict;
+ int old_decodes, decodes_removed, decodes_unlocked;
old_decodes = vgadev->decodes;
+ decodes_removed = ~new_decodes & old_decodes;
+ decodes_unlocked = vgadev->locks & decodes_removed;
+ vgadev->owns &= ~decodes_removed;
vgadev->decodes = new_decodes;
pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
@@ -656,31 +658,22 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns));
-
- /* if we own the decodes we should move them along to
- another card */
- if ((vgadev->owns & old_decodes) && (vga_count > 1)) {
- /* set us to own nothing */
- vgadev->owns &= ~old_decodes;
- list_for_each_entry(new_vgadev, &vga_list, list) {
- if ((new_vgadev != vgadev) &&
- (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) {
- pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev));
- conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK);
- if (!conflict)
- __vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK);
- break;
- }
- }
+ /* if we removed locked decodes, lock count goes to zero, and release */
+ if (decodes_unlocked) {
+ if (decodes_unlocked & VGA_RSRC_LEGACY_IO)
+ vgadev->io_lock_cnt = 0;
+ if (decodes_unlocked & VGA_RSRC_LEGACY_MEM)
+ vgadev->mem_lock_cnt = 0;
+ __vga_put(vgadev, decodes_unlocked);
}
/* change decodes counter */
- if (old_decodes != new_decodes) {
- if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
- vga_decode_count++;
- else
- vga_decode_count--;
- }
+ if (old_decodes & VGA_RSRC_LEGACY_MASK &&
+ !(new_decodes & VGA_RSRC_LEGACY_MASK))
+ vga_decode_count--;
+ if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
+ new_decodes & VGA_RSRC_LEGACY_MASK)
+ vga_decode_count++;
pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
}