aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c83
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c6
11 files changed, 83 insertions, 85 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 7ce1c2f87d9a..5d5c2bce01f3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -534,7 +534,6 @@ static void vmw_user_bo_release(struct ttm_base_object **p_base)
{
struct vmw_user_buffer_object *vmw_user_bo;
struct ttm_base_object *base = *p_base;
- struct ttm_buffer_object *bo;
*p_base = NULL;
@@ -543,8 +542,7 @@ static void vmw_user_bo_release(struct ttm_base_object **p_base)
vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
prime.base);
- bo = &vmw_user_bo->vbo.base;
- ttm_bo_unref(&bo);
+ ttm_bo_put(&vmw_user_bo->vbo.base);
}
@@ -597,7 +595,6 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
struct ttm_base_object **p_base)
{
struct vmw_user_buffer_object *user_bo;
- struct ttm_buffer_object *tmp;
int ret;
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
@@ -614,7 +611,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- tmp = ttm_bo_reference(&user_bo->vbo.base);
+ ttm_bo_get(&user_bo->vbo.base);
ret = ttm_prime_object_init(tfile,
size,
&user_bo->prime,
@@ -623,7 +620,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
&vmw_user_bo_release,
&vmw_user_bo_ref_obj_release);
if (unlikely(ret != 0)) {
- ttm_bo_unref(&tmp);
+ ttm_bo_put(&user_bo->vbo.base);
goto out_no_base_object;
}
@@ -911,7 +908,7 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile,
vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
prime.base);
- (void)ttm_bo_reference(&vmw_user_bo->vbo.base);
+ ttm_bo_get(&vmw_user_bo->vbo.base);
if (p_base)
*p_base = base;
else
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 48d1380a952e..70dab55e7888 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -765,7 +765,7 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
if (info->done)
return true;
-
+
memset(info->node, 0, sizeof(*info->node));
spin_lock(&man->lock);
ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
@@ -1276,8 +1276,10 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
return 0;
out_no_map:
- if (man->using_mob)
- ttm_bo_unref(&man->cmd_space);
+ if (man->using_mob) {
+ ttm_bo_put(man->cmd_space);
+ man->cmd_space = NULL;
+ }
return ret;
}
@@ -1380,7 +1382,8 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
if (man->using_mob) {
(void) ttm_bo_kunmap(&man->map_obj);
- ttm_bo_unref(&man->cmd_space);
+ ttm_bo_put(man->cmd_space);
+ man->cmd_space = NULL;
} else {
dma_free_coherent(&man->dev_priv->dev->pdev->dev,
man->size, man->map, man->handle);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 25afb1d594e3..6165fe2c4504 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -26,6 +26,7 @@
**************************************************************************/
#include <linux/module.h>
#include <linux/console.h>
+#include <linux/dma-mapping.h>
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
@@ -34,7 +35,6 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
-#include <linux/intel-iommu.h>
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
#define VMWGFX_CHIP_SVGAII 0
@@ -546,6 +546,21 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
}
/**
+ * vmw_assume_iommu - Figure out whether coherent dma-remapping might be
+ * taking place.
+ * @dev: Pointer to the struct drm_device.
+ *
+ * Return: true if iommu present, false otherwise.
+ */
+static bool vmw_assume_iommu(struct drm_device *dev)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev->dev);
+
+ return !dma_is_direct(ops) && ops &&
+ ops->map_page != dma_direct_map_page;
+}
+
+/**
* vmw_dma_select_mode - Determine how DMA mappings should be set up for this
* system.
*
@@ -565,55 +580,27 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
[vmw_dma_map_populate] = "Keeping DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
-#ifdef CONFIG_X86
- const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
-#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_enabled) {
+ if (vmw_force_coherent)
+ dev_priv->map_mode = vmw_dma_alloc_coherent;
+ else if (vmw_assume_iommu(dev_priv->dev))
dev_priv->map_mode = vmw_dma_map_populate;
- goto out_fixup;
- }
-#endif
-
- if (!(vmw_force_iommu || vmw_force_coherent)) {
+ else if (!vmw_force_iommu)
dev_priv->map_mode = vmw_dma_phys;
- DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
- return 0;
- }
-
- dev_priv->map_mode = vmw_dma_map_populate;
-
- if (dma_ops && dma_ops->sync_single_for_cpu)
+ else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
dev_priv->map_mode = vmw_dma_alloc_coherent;
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl() == 0)
+ else
dev_priv->map_mode = vmw_dma_map_populate;
-#endif
-#ifdef CONFIG_INTEL_IOMMU
-out_fixup:
-#endif
- if (dev_priv->map_mode == vmw_dma_map_populate &&
- vmw_restrict_iommu)
+ if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;
- if (vmw_force_coherent)
- dev_priv->map_mode = vmw_dma_alloc_coherent;
-
-#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
- /*
- * No coherent page pool
- */
- if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+ /* No TTM coherent page pool? FIXME: Ask TTM instead! */
+ if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
+ (dev_priv->map_mode == vmw_dma_alloc_coherent))
return -EINVAL;
-#endif
-
-#else /* CONFIG_X86 */
- dev_priv->map_mode = vmw_dma_map_populate;
-#endif /* CONFIG_X86 */
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
-
return 0;
}
@@ -625,24 +612,20 @@ out_fixup:
* With 32-bit we can only handle 32 bit PFNs. Optionally set that
* restriction also for 64-bit systems.
*/
-#ifdef CONFIG_INTEL_IOMMU
static int vmw_dma_masks(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ int ret = 0;
- if (intel_iommu_enabled &&
+ ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
+ if (dev_priv->map_mode != vmw_dma_phys &&
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
- return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
+ return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
}
- return 0;
-}
-#else
-static int vmw_dma_masks(struct vmw_private *dev_priv)
-{
- return 0;
+
+ return ret;
}
-#endif
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
@@ -1582,7 +1565,7 @@ static const struct file_operations vmwgfx_driver_fops = {
};
static struct drm_driver driver = {
- .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+ .driver_features =
DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
.load = vmw_driver_load,
.unload = vmw_driver_unload,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index cd607ba9c2fe..accb2fafe2f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1337,18 +1337,15 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
*buf = NULL;
if (tmp_buf != NULL) {
- struct ttm_buffer_object *bo = &tmp_buf->base;
-
- ttm_bo_unref(&bo);
+ ttm_bo_put(&tmp_buf->base);
}
}
static inline struct vmw_buffer_object *
vmw_bo_reference(struct vmw_buffer_object *buf)
{
- if (ttm_bo_reference(&buf->base))
- return buf;
- return NULL;
+ ttm_bo_get(&buf->base);
+ return buf;
}
static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index f2d13a72c05d..88b8178d4687 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
*p_fence = NULL;
}
- return 0;
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b351fb5214d3..ed2f67822f45 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1646,7 +1646,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_connector_state *conn_state;
struct vmw_connector_state *vmw_conn_state;
- if (!du->pref_active) {
+ if (!du->pref_active && new_crtc_state->enable) {
ret = -EINVAL;
goto clean;
}
@@ -2554,8 +2554,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
user_fence_rep)
{
struct vmw_fence_obj *fence = NULL;
- uint32_t handle;
- int ret;
+ uint32_t handle = 0;
+ int ret = 0;
if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
out_fence)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 655abbcd4058..535b03599e55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -29,8 +29,8 @@
#define VMWGFX_KMS_H_
#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_probe_helper.h>
#include "vmwgfx_drv.h"
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 7ed179d30ec5..d83cc66e1210 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -300,7 +300,8 @@ out_no_setup:
&batch->otables[i]);
}
- ttm_bo_unref(&batch->otable_bo);
+ ttm_bo_put(batch->otable_bo);
+ batch->otable_bo = NULL;
out_no_bo:
return ret;
}
@@ -365,7 +366,8 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
- ttm_bo_unref(&batch->otable_bo);
+ ttm_bo_put(batch->otable_bo);
+ batch->otable_bo = NULL;
}
/*
@@ -463,7 +465,8 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
out_unreserve:
ttm_bo_unreserve(mob->pt_bo);
- ttm_bo_unref(&mob->pt_bo);
+ ttm_bo_put(mob->pt_bo);
+ mob->pt_bo = NULL;
return ret;
}
@@ -580,8 +583,10 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
*/
void vmw_mob_destroy(struct vmw_mob *mob)
{
- if (mob->pt_bo)
- ttm_bo_unref(&mob->pt_bo);
+ if (mob->pt_bo) {
+ ttm_bo_put(mob->pt_bo);
+ mob->pt_bo = NULL;
+ }
kfree(mob);
}
@@ -698,8 +703,10 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
out_no_cmd_space:
vmw_fifo_resource_dec(dev_priv);
- if (pt_set_up)
- ttm_bo_unref(&mob->pt_bo);
+ if (pt_set_up) {
+ ttm_bo_put(mob->pt_bo);
+ mob->pt_bo = NULL;
+ }
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 3025bfc001a1..a7c30e567f09 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -461,7 +461,8 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
}
INIT_LIST_HEAD(&val_list);
- val_buf->bo = ttm_bo_reference(&res->backup->base);
+ ttm_bo_get(&res->backup->base);
+ val_buf->bo = &res->backup->base;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
@@ -484,7 +485,8 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
out_no_validate:
ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
- ttm_bo_unref(&val_buf->bo);
+ ttm_bo_put(val_buf->bo);
+ val_buf->bo = NULL;
if (backup_dirty)
vmw_bo_unreference(&res->backup);
@@ -544,7 +546,8 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(ticket, &val_list);
- ttm_bo_unref(&val_buf->bo);
+ ttm_bo_put(val_buf->bo);
+ val_buf->bo = NULL;
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 31786b200afc..a3357ff7540d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -311,7 +311,13 @@ static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
{
- return sg_page_iter_dma_address(&viter->iter);
+ /*
+ * FIXME: This driver wrongly mixes DMA and CPU SG list iteration and
+ * needs revision. See
+ * https://lore.kernel.org/lkml/20190104223531.GA1705@ziepe.ca/
+ */
+ return sg_page_iter_dma_address(
+ container_of(&viter->iter, struct sg_dma_page_iter, base));
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index b3f547fc5d3d..e9944ac2e057 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -628,8 +628,10 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
struct vmw_validation_bo_node *entry;
struct vmw_validation_res_node *val;
- list_for_each_entry(entry, &ctx->bo_list, base.head)
- ttm_bo_unref(&entry->base.bo);
+ list_for_each_entry(entry, &ctx->bo_list, base.head) {
+ ttm_bo_put(entry->base.bo);
+ entry->base.bo = NULL;
+ }
list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
list_for_each_entry(val, &ctx->resource_list, head)