aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_drv.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c93
1 files changed, 67 insertions, 26 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 184340d486c3..70e1a8820a7c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -301,6 +301,8 @@ static void vmw_print_capabilities(uint32_t capabilities)
DRM_INFO(" Guest Backed Resources.\n");
if (capabilities & SVGA_CAP_DX)
DRM_INFO(" DX Features.\n");
+ if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
+ DRM_INFO(" HP Command Queue.\n");
}
/**
@@ -1277,8 +1279,7 @@ static void vmw_master_drop(struct drm_device *dev,
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
- if (dev_priv->enable_fb)
- vmw_fb_on(dev_priv);
+ vmw_fb_refresh(dev_priv);
}
/**
@@ -1337,6 +1338,19 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv)
*/
void vmw_svga_disable(struct vmw_private *dev_priv)
{
+ /*
+ * Disabling SVGA will turn off device modesetting capabilities, so
+ * notify KMS about that so that it doesn't cache atomic state that
+ * isn't valid anymore, for example crtcs turned on.
+ * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
+ * but vmw_kms_lost_device() takes the reservation sem and thus we'll
+ * end up with lock order reversal. Thus, a master may actually perform
+ * a new modeset just after we call vmw_kms_lost_device() and race with
+ * vmw_svga_disable(), but that should at worst cause atomic KMS state
+ * to be inconsistent with the device, causing modesetting problems.
+ *
+ */
+ vmw_kms_lost_device(dev_priv->dev);
ttm_write_lock(&dev_priv->reservation_sem, false);
spin_lock(&dev_priv->svga_lock);
if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
@@ -1368,28 +1382,23 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
switch (val) {
case PM_HIBERNATION_PREPARE:
- if (dev_priv->enable_fb)
- vmw_fb_off(dev_priv);
- ttm_suspend_lock(&dev_priv->reservation_sem);
-
/*
- * This empties VRAM and unbinds all GMR bindings.
- * Buffer contents is moved to swappable memory.
+ * Take the reservation sem in write mode, which will make sure
+ * there are no other processes holding a buffer object
+ * reservation, meaning we should be able to evict all buffer
+ * objects if needed.
+ * Once user-space processes have been frozen, we can release
+ * the lock again.
*/
- vmw_execbuf_release_pinned_bo(dev_priv);
- vmw_resource_evict_all(dev_priv);
- vmw_release_device_early(dev_priv);
- ttm_bo_swapout_all(&dev_priv->bdev);
- vmw_fence_fifo_down(dev_priv->fman);
+ ttm_suspend_lock(&dev_priv->reservation_sem);
+ dev_priv->suspend_locked = true;
break;
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
- vmw_fence_fifo_up(dev_priv->fman);
- ttm_suspend_unlock(&dev_priv->reservation_sem);
- if (dev_priv->enable_fb)
- vmw_fb_on(dev_priv);
- break;
- case PM_RESTORE_PREPARE:
+ if (READ_ONCE(dev_priv->suspend_locked)) {
+ dev_priv->suspend_locked = false;
+ ttm_suspend_unlock(&dev_priv->reservation_sem);
+ }
break;
default:
break;
@@ -1440,25 +1449,48 @@ static int vmw_pm_freeze(struct device *kdev)
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
+ int ret;
- dev_priv->suspended = true;
+ /*
+ * Unlock for vmw_kms_suspend.
+ * No user-space processes should be running now.
+ */
+ ttm_suspend_unlock(&dev_priv->reservation_sem);
+ ret = vmw_kms_suspend(dev_priv->dev);
+ if (ret) {
+ ttm_suspend_lock(&dev_priv->reservation_sem);
+ DRM_ERROR("Failed to freeze modesetting.\n");
+ return ret;
+ }
if (dev_priv->enable_fb)
- vmw_fifo_resource_dec(dev_priv);
+ vmw_fb_off(dev_priv);
+ ttm_suspend_lock(&dev_priv->reservation_sem);
+ vmw_execbuf_release_pinned_bo(dev_priv);
+ vmw_resource_evict_all(dev_priv);
+ vmw_release_device_early(dev_priv);
+ ttm_bo_swapout_all(&dev_priv->bdev);
+ if (dev_priv->enable_fb)
+ vmw_fifo_resource_dec(dev_priv);
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
DRM_ERROR("Can't hibernate while 3D resources are active.\n");
if (dev_priv->enable_fb)
vmw_fifo_resource_inc(dev_priv);
WARN_ON(vmw_request_device_late(dev_priv));
- dev_priv->suspended = false;
+ dev_priv->suspend_locked = false;
+ ttm_suspend_unlock(&dev_priv->reservation_sem);
+ if (dev_priv->suspend_state)
+ vmw_kms_resume(dev);
+ if (dev_priv->enable_fb)
+ vmw_fb_on(dev_priv);
+ vmw_fb_refresh(dev_priv);
return -EBUSY;
}
- if (dev_priv->enable_fb)
- __vmw_svga_disable(dev_priv);
+ vmw_fence_fifo_down(dev_priv->fman);
+ __vmw_svga_disable(dev_priv);
vmw_release_device_late(dev_priv);
-
return 0;
}
@@ -1482,7 +1514,16 @@ static int vmw_pm_restore(struct device *kdev)
if (dev_priv->enable_fb)
__vmw_svga_enable(dev_priv);
- dev_priv->suspended = false;
+ vmw_fence_fifo_up(dev_priv->fman);
+ dev_priv->suspend_locked = false;
+ ttm_suspend_unlock(&dev_priv->reservation_sem);
+ if (dev_priv->suspend_state)
+ vmw_kms_resume(dev_priv->dev);
+
+ if (dev_priv->enable_fb)
+ vmw_fb_on(dev_priv);
+
+ vmw_fb_refresh(dev_priv);
return 0;
}