aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig1
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile5
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_escape.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h14
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_memory.c683
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_memory.h96
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c180
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h59
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c582
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c43
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c84
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h146
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c84
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c43
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c294
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c199
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h83
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c91
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c158
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c90
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c135
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c74
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_va.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h53
53 files changed, 1307 insertions, 2320 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index c9ce47c448e0..a4fabe208d9f 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -4,6 +4,7 @@ config DRM_VMWGFX
depends on DRM && PCI && MMU
depends on X86 || ARM64
select DRM_TTM
+ select DRM_TTM_HELPER
select MAPPING_DIRTY_HELPERS
# Only needed for the transitional use of drm_crtc_init - can be removed
# again once vmwgfx sets up the primary plane itself.
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index bc323f7d4032..e9d13f155d8c 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
+vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
@@ -9,7 +9,8 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
- vmwgfx_devcaps.o ttm_object.o ttm_memory.o
+ vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
+ vmwgfx_gem.o
vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o
vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
index 945c84b27e81..d90d940ad3f4 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2012-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_cmd.h --
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
index 379ec15c7758..815d0ab0053f 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 1998-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_devcaps.h --
@@ -347,6 +347,10 @@ typedef uint32 SVGA3dDevCapIndex;
#define SVGA3D_DEVCAP_SM5 258
#define SVGA3D_DEVCAP_MULTISAMPLE_8X 259
+#define SVGA3D_DEVCAP_MAX_FORCED_SAMPLE_COUNT 260
+
+#define SVGA3D_DEVCAP_GL43 261
+
#define SVGA3D_DEVCAP_MAX 262
#define SVGA3D_DXFMT_SUPPORTED (1 << 0)
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
index 5af442dad542..925bf4b93f01 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2012-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_dx.h --
@@ -508,11 +508,11 @@ typedef struct SVGA3dCmdDXSetPredication {
#pragma pack(pop)
#pragma pack(push, 1)
-typedef struct MKS3dDXSOState {
+typedef struct SVGA3dDXSOState {
uint32 offset;
uint32 intOffset;
- uint32 vertexCount;
- uint32 dead;
+ uint32 dead1;
+ uint32 dead2;
} SVGA3dDXSOState;
#pragma pack(pop)
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
index 35494a728c7a..6103b41fe92b 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2012-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_limits.h --
@@ -82,4 +82,6 @@
#define SVGA3D_MIN_SBX_DATA_SIZE (GBYTES_2_BYTES(1))
#define SVGA3D_MAX_SBX_DATA_SIZE (GBYTES_2_BYTES(4))
+#define SVGA3D_MIN_SBX_DATA_SIZE_DVM (MBYTES_2_BYTES(900))
+#define SVGA3D_MAX_SBX_DATA_SIZE_DVM (MBYTES_2_BYTES(910))
#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
index 988d8509c472..b24b4f55c941 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 1998-2015 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_reg.h --
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
index 70b88ee16cf6..e9219eb380a2 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2012-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_types.h --
@@ -370,7 +370,6 @@ typedef enum SVGA3dSurfaceFormat {
#define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER (CONST64U(1) << 30)
#define SVGA3D_SURFACE_RESERVED1 (CONST64U(1) << 31)
-#define SVGA3D_SURFACE_VADECODE SVGA3D_SURFACE_RESERVED1
#define SVGA3D_SURFACE_MULTISAMPLE (CONST64U(1) << 32)
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
index bf242c21f352..405f20fc26c2 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2007,2020 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga_escape.h --
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
index aec17c3c6c29..691f48f77e33 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2007-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga_overlay.h --
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index b3602557de2e..acabdb550c10 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 1998-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga_reg.h --
@@ -442,6 +442,7 @@ typedef struct {
#define SVGA_CAP2_TRACE_FULL_FB 0x00002000
#define SVGA_CAP2_EXTRA_REGS 0x00004000
#define SVGA_CAP2_LO_STAGING 0x00008000
+#define SVGA_CAP2_VIDEO_BLT 0x00010000
#define SVGA_CAP2_RESERVED 0x80000000
typedef enum {
@@ -450,9 +451,10 @@ typedef enum {
SVGABackdoorCap3dHWVersion = 2,
SVGABackdoorCapDeviceCaps2 = 3,
SVGABackdoorCapDevelCaps = 4,
- SVGABackdoorDevelRenderer = 5,
- SVGABackdoorDevelUsingISB = 6,
- SVGABackdoorCapMax = 7,
+ SVGABackdoorCapDevCaps = 5,
+ SVGABackdoorDevelRenderer = 6,
+ SVGABackdoorDevelUsingISB = 7,
+ SVGABackdoorCapMax = 8,
} SVGABackdoorCapType;
enum {
diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c
deleted file mode 100644
index 7f7fe35fc21d..000000000000
--- a/drivers/gpu/drm/vmwgfx/ttm_memory.c
+++ /dev/null
@@ -1,683 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/swap.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/ttm/ttm_device.h>
-
-#include "ttm_memory.h"
-
-#define TTM_MEMORY_ALLOC_RETRIES 4
-
-struct ttm_mem_global ttm_mem_glob;
-EXPORT_SYMBOL(ttm_mem_glob);
-
-struct ttm_mem_zone {
- struct kobject kobj;
- struct ttm_mem_global *glob;
- const char *name;
- uint64_t zone_mem;
- uint64_t emer_mem;
- uint64_t max_mem;
- uint64_t swap_limit;
- uint64_t used_mem;
-};
-
-static struct attribute ttm_mem_sys = {
- .name = "zone_memory",
- .mode = S_IRUGO
-};
-static struct attribute ttm_mem_emer = {
- .name = "emergency_memory",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_mem_max = {
- .name = "available_memory",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_mem_swap = {
- .name = "swap_limit",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_mem_used = {
- .name = "used_memory",
- .mode = S_IRUGO
-};
-
-static void ttm_mem_zone_kobj_release(struct kobject *kobj)
-{
- struct ttm_mem_zone *zone =
- container_of(kobj, struct ttm_mem_zone, kobj);
-
- pr_info("Zone %7s: Used memory at exit: %llu KiB\n",
- zone->name, (unsigned long long)zone->used_mem >> 10);
- kfree(zone);
-}
-
-static ssize_t ttm_mem_zone_show(struct kobject *kobj,
- struct attribute *attr,
- char *buffer)
-{
- struct ttm_mem_zone *zone =
- container_of(kobj, struct ttm_mem_zone, kobj);
- uint64_t val = 0;
-
- spin_lock(&zone->glob->lock);
- if (attr == &ttm_mem_sys)
- val = zone->zone_mem;
- else if (attr == &ttm_mem_emer)
- val = zone->emer_mem;
- else if (attr == &ttm_mem_max)
- val = zone->max_mem;
- else if (attr == &ttm_mem_swap)
- val = zone->swap_limit;
- else if (attr == &ttm_mem_used)
- val = zone->used_mem;
- spin_unlock(&zone->glob->lock);
-
- return snprintf(buffer, PAGE_SIZE, "%llu\n",
- (unsigned long long) val >> 10);
-}
-
-static void ttm_check_swapping(struct ttm_mem_global *glob);
-
-static ssize_t ttm_mem_zone_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t size)
-{
- struct ttm_mem_zone *zone =
- container_of(kobj, struct ttm_mem_zone, kobj);
- int chars;
- unsigned long val;
- uint64_t val64;
-
- chars = sscanf(buffer, "%lu", &val);
- if (chars == 0)
- return size;
-
- val64 = val;
- val64 <<= 10;
-
- spin_lock(&zone->glob->lock);
- if (val64 > zone->zone_mem)
- val64 = zone->zone_mem;
- if (attr == &ttm_mem_emer) {
- zone->emer_mem = val64;
- if (zone->max_mem > val64)
- zone->max_mem = val64;
- } else if (attr == &ttm_mem_max) {
- zone->max_mem = val64;
- if (zone->emer_mem < val64)
- zone->emer_mem = val64;
- } else if (attr == &ttm_mem_swap)
- zone->swap_limit = val64;
- spin_unlock(&zone->glob->lock);
-
- ttm_check_swapping(zone->glob);
-
- return size;
-}
-
-static struct attribute *ttm_mem_zone_attrs[] = {
- &ttm_mem_sys,
- &ttm_mem_emer,
- &ttm_mem_max,
- &ttm_mem_swap,
- &ttm_mem_used,
- NULL
-};
-
-static const struct sysfs_ops ttm_mem_zone_ops = {
- .show = &ttm_mem_zone_show,
- .store = &ttm_mem_zone_store
-};
-
-static struct kobj_type ttm_mem_zone_kobj_type = {
- .release = &ttm_mem_zone_kobj_release,
- .sysfs_ops = &ttm_mem_zone_ops,
- .default_attrs = ttm_mem_zone_attrs,
-};
-
-static struct attribute ttm_mem_global_lower_mem_limit = {
- .name = "lower_mem_limit",
- .mode = S_IRUGO | S_IWUSR
-};
-
-static ssize_t ttm_mem_global_show(struct kobject *kobj,
- struct attribute *attr,
- char *buffer)
-{
- struct ttm_mem_global *glob =
- container_of(kobj, struct ttm_mem_global, kobj);
- uint64_t val = 0;
-
- spin_lock(&glob->lock);
- val = glob->lower_mem_limit;
- spin_unlock(&glob->lock);
- /* convert from number of pages to KB */
- val <<= (PAGE_SHIFT - 10);
- return snprintf(buffer, PAGE_SIZE, "%llu\n",
- (unsigned long long) val);
-}
-
-static ssize_t ttm_mem_global_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t size)
-{
- int chars;
- uint64_t val64;
- unsigned long val;
- struct ttm_mem_global *glob =
- container_of(kobj, struct ttm_mem_global, kobj);
-
- chars = sscanf(buffer, "%lu", &val);
- if (chars == 0)
- return size;
-
- val64 = val;
- /* convert from KB to number of pages */
- val64 >>= (PAGE_SHIFT - 10);
-
- spin_lock(&glob->lock);
- glob->lower_mem_limit = val64;
- spin_unlock(&glob->lock);
-
- return size;
-}
-
-static struct attribute *ttm_mem_global_attrs[] = {
- &ttm_mem_global_lower_mem_limit,
- NULL
-};
-
-static const struct sysfs_ops ttm_mem_global_ops = {
- .show = &ttm_mem_global_show,
- .store = &ttm_mem_global_store,
-};
-
-static struct kobj_type ttm_mem_glob_kobj_type = {
- .sysfs_ops = &ttm_mem_global_ops,
- .default_attrs = ttm_mem_global_attrs,
-};
-
-static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
- bool from_wq, uint64_t extra)
-{
- unsigned int i;
- struct ttm_mem_zone *zone;
- uint64_t target;
-
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
-
- if (from_wq)
- target = zone->swap_limit;
- else if (capable(CAP_SYS_ADMIN))
- target = zone->emer_mem;
- else
- target = zone->max_mem;
-
- target = (extra > target) ? 0ULL : target;
-
- if (zone->used_mem > target)
- return true;
- }
- return false;
-}
-
-/*
- * At this point we only support a single shrink callback.
- * Extend this if needed, perhaps using a linked list of callbacks.
- * Note that this function is reentrant:
- * many threads may try to swap out at any given time.
- */
-
-static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
- uint64_t extra, struct ttm_operation_ctx *ctx)
-{
- int ret;
-
- spin_lock(&glob->lock);
-
- while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
- spin_unlock(&glob->lock);
- ret = ttm_global_swapout(ctx, GFP_KERNEL);
- spin_lock(&glob->lock);
- if (unlikely(ret <= 0))
- break;
- }
-
- spin_unlock(&glob->lock);
-}
-
-static void ttm_shrink_work(struct work_struct *work)
-{
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
- struct ttm_mem_global *glob =
- container_of(work, struct ttm_mem_global, work);
-
- ttm_shrink(glob, true, 0ULL, &ctx);
-}
-
-static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
- const struct sysinfo *si)
-{
- struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- uint64_t mem;
- int ret;
-
- if (unlikely(!zone))
- return -ENOMEM;
-
- mem = si->totalram - si->totalhigh;
- mem *= si->mem_unit;
-
- zone->name = "kernel";
- zone->zone_mem = mem;
- zone->max_mem = mem >> 1;
- zone->emer_mem = (mem >> 1) + (mem >> 2);
- zone->swap_limit = zone->max_mem - (mem >> 3);
- zone->used_mem = 0;
- zone->glob = glob;
- glob->zone_kernel = zone;
- ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
- }
- glob->zones[glob->num_zones++] = zone;
- return 0;
-}
-
-#ifdef CONFIG_HIGHMEM
-static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
- const struct sysinfo *si)
-{
- struct ttm_mem_zone *zone;
- uint64_t mem;
- int ret;
-
- if (si->totalhigh == 0)
- return 0;
-
- zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- if (unlikely(!zone))
- return -ENOMEM;
-
- mem = si->totalram;
- mem *= si->mem_unit;
-
- zone->name = "highmem";
- zone->zone_mem = mem;
- zone->max_mem = mem >> 1;
- zone->emer_mem = (mem >> 1) + (mem >> 2);
- zone->swap_limit = zone->max_mem - (mem >> 3);
- zone->used_mem = 0;
- zone->glob = glob;
- glob->zone_highmem = zone;
- ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
- zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
- }
- glob->zones[glob->num_zones++] = zone;
- return 0;
-}
-#else
-static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
- const struct sysinfo *si)
-{
- struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- uint64_t mem;
- int ret;
-
- if (unlikely(!zone))
- return -ENOMEM;
-
- mem = si->totalram;
- mem *= si->mem_unit;
-
- /**
- * No special dma32 zone needed.
- */
-
- if (mem <= ((uint64_t) 1ULL << 32)) {
- kfree(zone);
- return 0;
- }
-
- /*
- * Limit max dma32 memory to 4GB for now
- * until we can figure out how big this
- * zone really is.
- */
-
- mem = ((uint64_t) 1ULL << 32);
- zone->name = "dma32";
- zone->zone_mem = mem;
- zone->max_mem = mem >> 1;
- zone->emer_mem = (mem >> 1) + (mem >> 2);
- zone->swap_limit = zone->max_mem - (mem >> 3);
- zone->used_mem = 0;
- zone->glob = glob;
- glob->zone_dma32 = zone;
- ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
- }
- glob->zones[glob->num_zones++] = zone;
- return 0;
-}
-#endif
-
-int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev)
-{
- struct sysinfo si;
- int ret;
- int i;
- struct ttm_mem_zone *zone;
-
- spin_lock_init(&glob->lock);
- glob->swap_queue = create_singlethread_workqueue("ttm_swap");
- INIT_WORK(&glob->work, ttm_shrink_work);
-
- ret = kobject_init_and_add(&glob->kobj, &ttm_mem_glob_kobj_type,
- &dev->kobj, "memory_accounting");
- if (unlikely(ret != 0)) {
- kobject_put(&glob->kobj);
- return ret;
- }
-
- si_meminfo(&si);
-
- spin_lock(&glob->lock);
- /* set it as 0 by default to keep original behavior of OOM */
- glob->lower_mem_limit = 0;
- spin_unlock(&glob->lock);
-
- ret = ttm_mem_init_kernel_zone(glob, &si);
- if (unlikely(ret != 0))
- goto out_no_zone;
-#ifdef CONFIG_HIGHMEM
- ret = ttm_mem_init_highmem_zone(glob, &si);
- if (unlikely(ret != 0))
- goto out_no_zone;
-#else
- ret = ttm_mem_init_dma32_zone(glob, &si);
- if (unlikely(ret != 0))
- goto out_no_zone;
-#endif
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
- zone->name, (unsigned long long)zone->max_mem >> 10);
- }
- return 0;
-out_no_zone:
- ttm_mem_global_release(glob);
- return ret;
-}
-
-void ttm_mem_global_release(struct ttm_mem_global *glob)
-{
- struct ttm_mem_zone *zone;
- unsigned int i;
-
- destroy_workqueue(glob->swap_queue);
- glob->swap_queue = NULL;
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- kobject_del(&zone->kobj);
- kobject_put(&zone->kobj);
- }
- kobject_del(&glob->kobj);
- kobject_put(&glob->kobj);
- memset(glob, 0, sizeof(*glob));
-}
-
-static void ttm_check_swapping(struct ttm_mem_global *glob)
-{
- bool needs_swapping = false;
- unsigned int i;
- struct ttm_mem_zone *zone;
-
- spin_lock(&glob->lock);
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (zone->used_mem > zone->swap_limit) {
- needs_swapping = true;
- break;
- }
- }
-
- spin_unlock(&glob->lock);
-
- if (unlikely(needs_swapping))
- (void)queue_work(glob->swap_queue, &glob->work);
-
-}
-
-static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
- struct ttm_mem_zone *single_zone,
- uint64_t amount)
-{
- unsigned int i;
- struct ttm_mem_zone *zone;
-
- spin_lock(&glob->lock);
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (single_zone && zone != single_zone)
- continue;
- zone->used_mem -= amount;
- }
- spin_unlock(&glob->lock);
-}
-
-void ttm_mem_global_free(struct ttm_mem_global *glob,
- uint64_t amount)
-{
- return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount);
-}
-EXPORT_SYMBOL(ttm_mem_global_free);
-
-/*
- * check if the available mem is under lower memory limit
- *
- * a. if no swap disk at all or free swap space is under swap_mem_limit
- * but available system mem is bigger than sys_mem_limit, allow TTM
- * allocation;
- *
- * b. if the available system mem is less than sys_mem_limit but free
- * swap disk is bigger than swap_mem_limit, allow TTM allocation.
- */
-bool
-ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
- uint64_t num_pages,
- struct ttm_operation_ctx *ctx)
-{
- int64_t available;
-
- /* We allow over commit during suspend */
- if (ctx->force_alloc)
- return false;
-
- available = get_nr_swap_pages() + si_mem_available();
- available -= num_pages;
- if (available < glob->lower_mem_limit)
- return true;
-
- return false;
-}
-
-static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
- struct ttm_mem_zone *single_zone,
- uint64_t amount, bool reserve)
-{
- uint64_t limit;
- int ret = -ENOMEM;
- unsigned int i;
- struct ttm_mem_zone *zone;
-
- spin_lock(&glob->lock);
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (single_zone && zone != single_zone)
- continue;
-
- limit = (capable(CAP_SYS_ADMIN)) ?
- zone->emer_mem : zone->max_mem;
-
- if (zone->used_mem > limit)
- goto out_unlock;
- }
-
- if (reserve) {
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (single_zone && zone != single_zone)
- continue;
- zone->used_mem += amount;
- }
- }
-
- ret = 0;
-out_unlock:
- spin_unlock(&glob->lock);
- ttm_check_swapping(glob);
-
- return ret;
-}
-
-
-static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
- struct ttm_mem_zone *single_zone,
- uint64_t memory,
- struct ttm_operation_ctx *ctx)
-{
- int count = TTM_MEMORY_ALLOC_RETRIES;
-
- while (unlikely(ttm_mem_global_reserve(glob,
- single_zone,
- memory, true)
- != 0)) {
- if (ctx->no_wait_gpu)
- return -ENOMEM;
- if (unlikely(count-- == 0))
- return -ENOMEM;
- ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
- }
-
- return 0;
-}
-
-int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- struct ttm_operation_ctx *ctx)
-{
- /**
- * Normal allocations of kernel memory are registered in
- * the kernel zone.
- */
-
- return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx);
-}
-EXPORT_SYMBOL(ttm_mem_global_alloc);
-
-int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size,
- struct ttm_operation_ctx *ctx)
-{
- struct ttm_mem_zone *zone = NULL;
-
- /**
- * Page allocations may be registed in a single zone
- * only if highmem or !dma32.
- */
-
-#ifdef CONFIG_HIGHMEM
- if (PageHighMem(page) && glob->zone_highmem != NULL)
- zone = glob->zone_highmem;
-#else
- if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
- zone = glob->zone_kernel;
-#endif
- return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
-}
-
-void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
- uint64_t size)
-{
- struct ttm_mem_zone *zone = NULL;
-
-#ifdef CONFIG_HIGHMEM
- if (PageHighMem(page) && glob->zone_highmem != NULL)
- zone = glob->zone_highmem;
-#else
- if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
- zone = glob->zone_kernel;
-#endif
- ttm_mem_global_free_zone(glob, zone, size);
-}
-
-size_t ttm_round_pot(size_t size)
-{
- if ((size & (size - 1)) == 0)
- return size;
- else if (size > PAGE_SIZE)
- return PAGE_ALIGN(size);
- else {
- size_t tmp_size = 4;
-
- while (tmp_size < size)
- tmp_size <<= 1;
-
- return tmp_size;
- }
- return 0;
-}
-EXPORT_SYMBOL(ttm_round_pot);
diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.h b/drivers/gpu/drm/vmwgfx/ttm_memory.h
deleted file mode 100644
index c50dba774485..000000000000
--- a/drivers/gpu/drm/vmwgfx/ttm_memory.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef TTM_MEMORY_H
-#define TTM_MEMORY_H
-
-#include <linux/workqueue.h>
-#include <linux/spinlock.h>
-#include <linux/bug.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/kobject.h>
-#include <linux/mm.h>
-
-#include <drm/ttm/ttm_bo_api.h>
-
-/**
- * struct ttm_mem_global - Global memory accounting structure.
- *
- * @shrink: A single callback to shrink TTM memory usage. Extend this
- * to a linked list to be able to handle multiple callbacks when needed.
- * @swap_queue: A workqueue to handle shrinking in low memory situations. We
- * need a separate workqueue since it will spend a lot of time waiting
- * for the GPU, and this will otherwise block other workqueue tasks(?)
- * At this point we use only a single-threaded workqueue.
- * @work: The workqueue callback for the shrink queue.
- * @lock: Lock to protect the @shrink - and the memory accounting members,
- * that is, essentially the whole structure with some exceptions.
- * @lower_mem_limit: include lower limit of swap space and lower limit of
- * system memory.
- * @zones: Array of pointers to accounting zones.
- * @num_zones: Number of populated entries in the @zones array.
- * @zone_kernel: Pointer to the kernel zone.
- * @zone_highmem: Pointer to the highmem zone if there is one.
- * @zone_dma32: Pointer to the dma32 zone if there is one.
- *
- * Note that this structure is not per device. It should be global for all
- * graphics devices.
- */
-
-#define TTM_MEM_MAX_ZONES 2
-struct ttm_mem_zone;
-extern struct ttm_mem_global {
- struct kobject kobj;
- struct workqueue_struct *swap_queue;
- struct work_struct work;
- spinlock_t lock;
- uint64_t lower_mem_limit;
- struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
- unsigned int num_zones;
- struct ttm_mem_zone *zone_kernel;
-#ifdef CONFIG_HIGHMEM
- struct ttm_mem_zone *zone_highmem;
-#else
- struct ttm_mem_zone *zone_dma32;
-#endif
-} ttm_mem_glob;
-
-int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev);
-void ttm_mem_global_release(struct ttm_mem_global *glob);
-int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- struct ttm_operation_ctx *ctx);
-void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount);
-int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size,
- struct ttm_operation_ctx *ctx);
-void ttm_mem_global_free_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size);
-size_t ttm_round_pot(size_t size);
-bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages,
- struct ttm_operation_ctx *ctx);
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 899945f54dc7..26a55fef1ab5 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -50,6 +50,7 @@
#include <linux/atomic.h>
#include <linux/module.h>
#include "ttm_object.h"
+#include "vmwgfx_drv.h"
MODULE_IMPORT_NS(DMA_BUF);
@@ -73,7 +74,7 @@ struct ttm_object_file {
struct ttm_object_device *tdev;
spinlock_t lock;
struct list_head ref_list;
- struct drm_open_hash ref_hash[TTM_REF_NUM];
+ struct vmwgfx_open_hash ref_hash;
struct kref refcount;
};
@@ -91,12 +92,10 @@ struct ttm_object_file {
struct ttm_object_device {
spinlock_t object_lock;
- struct drm_open_hash object_hash;
+ struct vmwgfx_open_hash object_hash;
atomic_t object_count;
- struct ttm_mem_global *mem_glob;
struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf);
- size_t dma_buf_size;
struct idr idr;
};
@@ -123,10 +122,9 @@ struct ttm_object_device {
struct ttm_ref_object {
struct rcu_head rcu_head;
- struct drm_hash_item hash;
+ struct vmwgfx_hash_item hash;
struct list_head head;
struct kref kref;
- enum ttm_ref_type ref_type;
struct ttm_base_object *obj;
struct ttm_object_file *tfile;
};
@@ -162,9 +160,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
struct ttm_base_object *base,
bool shareable,
enum ttm_object_type object_type,
- void (*refcount_release) (struct ttm_base_object **),
- void (*ref_obj_release) (struct ttm_base_object *,
- enum ttm_ref_type ref_type))
+ void (*refcount_release) (struct ttm_base_object **))
{
struct ttm_object_device *tdev = tfile->tdev;
int ret;
@@ -172,7 +168,6 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
base->shareable = shareable;
base->tfile = ttm_object_file_ref(tfile);
base->refcount_release = refcount_release;
- base->ref_obj_release = ref_obj_release;
base->object_type = object_type;
kref_init(&base->refcount);
idr_preload(GFP_KERNEL);
@@ -184,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
return ret;
base->handle = ret;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
+ ret = ttm_ref_object_add(tfile, base, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
@@ -247,12 +242,12 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
{
- struct drm_hash_item *hash;
- struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
+ struct vmwgfx_hash_item *hash;
+ struct vmwgfx_open_hash *ht = &tfile->ref_hash;
int ret;
rcu_read_lock();
- ret = drm_ht_find_item_rcu(ht, key, &hash);
+ ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
if (ret) {
rcu_read_unlock();
return NULL;
@@ -267,12 +262,12 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key)
{
struct ttm_base_object *base = NULL;
- struct drm_hash_item *hash;
- struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
+ struct vmwgfx_hash_item *hash;
+ struct vmwgfx_open_hash *ht = &tfile->ref_hash;
int ret;
rcu_read_lock();
- ret = drm_ht_find_item_rcu(ht, key, &hash);
+ ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
if (likely(ret == 0)) {
base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
@@ -299,64 +294,14 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
return base;
}
-/**
- * ttm_ref_object_exists - Check whether a caller has a valid ref object
- * (has opened) a base object.
- *
- * @tfile: Pointer to a struct ttm_object_file identifying the caller.
- * @base: Pointer to a struct base object.
- *
- * Checks wether the caller identified by @tfile has put a valid USAGE
- * reference object on the base object identified by @base.
- */
-bool ttm_ref_object_exists(struct ttm_object_file *tfile,
- struct ttm_base_object *base)
-{
- struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
- struct drm_hash_item *hash;
- struct ttm_ref_object *ref;
-
- rcu_read_lock();
- if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
- goto out_false;
-
- /*
- * Verify that the ref object is really pointing to our base object.
- * Our base object could actually be dead, and the ref object pointing
- * to another base object with the same handle.
- */
- ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
- if (unlikely(base != ref->obj))
- goto out_false;
-
- /*
- * Verify that the ref->obj pointer was actually valid!
- */
- rmb();
- if (unlikely(kref_read(&ref->kref) == 0))
- goto out_false;
-
- rcu_read_unlock();
- return true;
-
- out_false:
- rcu_read_unlock();
- return false;
-}
-
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed,
+ bool *existed,
bool require_existed)
{
- struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct vmwgfx_open_hash *ht = &tfile->ref_hash;
struct ttm_ref_object *ref;
- struct drm_hash_item *hash;
- struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
+ struct vmwgfx_hash_item *hash;
int ret = -EINVAL;
if (base->tfile != tfile && !base->shareable)
@@ -367,7 +312,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
while (ret == -EINVAL) {
rcu_read_lock();
- ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
+ ret = vmwgfx_ht_find_item_rcu(ht, base->handle, &hash);
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
@@ -381,24 +326,18 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
if (require_existed)
return -EPERM;
- ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
- &ctx);
- if (unlikely(ret != 0))
- return ret;
ref = kmalloc(sizeof(*ref), GFP_KERNEL);
if (unlikely(ref == NULL)) {
- ttm_mem_global_free(mem_glob, sizeof(*ref));
return -ENOMEM;
}
ref->hash.key = base->handle;
ref->obj = base;
ref->tfile = tfile;
- ref->ref_type = ref_type;
kref_init(&ref->kref);
spin_lock(&tfile->lock);
- ret = drm_ht_insert_item_rcu(ht, &ref->hash);
+ ret = vmwgfx_ht_insert_item_rcu(ht, &ref->hash);
if (likely(ret == 0)) {
list_add_tail(&ref->head, &tfile->ref_list);
@@ -412,7 +351,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
spin_unlock(&tfile->lock);
BUG_ON(ret != -EINVAL);
- ttm_mem_global_free(mem_glob, sizeof(*ref));
kfree(ref);
}
@@ -424,35 +362,29 @@ ttm_ref_object_release(struct kref *kref)
{
struct ttm_ref_object *ref =
container_of(kref, struct ttm_ref_object, kref);
- struct ttm_base_object *base = ref->obj;
struct ttm_object_file *tfile = ref->tfile;
- struct drm_open_hash *ht;
- struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+ struct vmwgfx_open_hash *ht;
- ht = &tfile->ref_hash[ref->ref_type];
- (void)drm_ht_remove_item_rcu(ht, &ref->hash);
+ ht = &tfile->ref_hash;
+ (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash);
list_del(&ref->head);
spin_unlock(&tfile->lock);
- if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
- base->ref_obj_release(base, ref->ref_type);
-
ttm_base_object_unref(&ref->obj);
- ttm_mem_global_free(mem_glob, sizeof(*ref));
kfree_rcu(ref, rcu_head);
spin_lock(&tfile->lock);
}
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
- unsigned long key, enum ttm_ref_type ref_type)
+ unsigned long key)
{
- struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct vmwgfx_open_hash *ht = &tfile->ref_hash;
struct ttm_ref_object *ref;
- struct drm_hash_item *hash;
+ struct vmwgfx_hash_item *hash;
int ret;
spin_lock(&tfile->lock);
- ret = drm_ht_find_item(ht, key, &hash);
+ ret = vmwgfx_ht_find_item(ht, key, &hash);
if (unlikely(ret != 0)) {
spin_unlock(&tfile->lock);
return -EINVAL;
@@ -467,7 +399,6 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
{
struct ttm_ref_object *ref;
struct list_head *list;
- unsigned int i;
struct ttm_object_file *tfile = *p_tfile;
*p_tfile = NULL;
@@ -485,8 +416,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
}
spin_unlock(&tfile->lock);
- for (i = 0; i < TTM_REF_NUM; ++i)
- drm_ht_remove(&tfile->ref_hash[i]);
+ vmwgfx_ht_remove(&tfile->ref_hash);
ttm_object_file_unref(&tfile);
}
@@ -495,8 +425,6 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
unsigned int hash_order)
{
struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
- unsigned int i;
- unsigned int j = 0;
int ret;
if (unlikely(tfile == NULL))
@@ -507,18 +435,13 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
kref_init(&tfile->refcount);
INIT_LIST_HEAD(&tfile->ref_list);
- for (i = 0; i < TTM_REF_NUM; ++i) {
- ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
- if (ret) {
- j = i;
- goto out_err;
- }
- }
+ ret = vmwgfx_ht_create(&tfile->ref_hash, hash_order);
+ if (ret)
+ goto out_err;
return tfile;
out_err:
- for (i = 0; i < j; ++i)
- drm_ht_remove(&tfile->ref_hash[i]);
+ vmwgfx_ht_remove(&tfile->ref_hash);
kfree(tfile);
@@ -526,8 +449,7 @@ out_err:
}
struct ttm_object_device *
-ttm_object_device_init(struct ttm_mem_global *mem_glob,
- unsigned int hash_order,
+ttm_object_device_init(unsigned int hash_order,
const struct dma_buf_ops *ops)
{
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
@@ -536,19 +458,24 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
if (unlikely(tdev == NULL))
return NULL;
- tdev->mem_glob = mem_glob;
spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
- ret = drm_ht_create(&tdev->object_hash, hash_order);
+ ret = vmwgfx_ht_create(&tdev->object_hash, hash_order);
if (ret != 0)
goto out_no_object_hash;
- idr_init_base(&tdev->idr, 1);
+ /*
+ * Our base is at VMWGFX_NUM_MOB + 1 because we want to create
+ * a seperate namespace for GEM handles (which are
+ * 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's
+ * can take either handle as an argument so we want to
+ * easily be able to tell whether the handle refers to a
+ * GEM buffer or a surface.
+ */
+ idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1);
tdev->ops = *ops;
tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release;
- tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
- ttm_round_pot(sizeof(struct file));
return tdev;
out_no_object_hash:
@@ -564,7 +491,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
idr_destroy(&tdev->idr);
- drm_ht_remove(&tdev->object_hash);
+ vmwgfx_ht_remove(&tdev->object_hash);
kfree(tdev);
}
@@ -633,7 +560,6 @@ static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
if (prime->dma_buf == dma_buf)
prime->dma_buf = NULL;
mutex_unlock(&prime->mutex);
- ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
ttm_base_object_unref(&base);
}
@@ -667,7 +593,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
*handle = base->handle;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
+ ret = ttm_ref_object_add(tfile, base, NULL, false);
dma_buf_put(dma_buf);
@@ -715,30 +641,18 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
dma_buf = prime->dma_buf;
if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
exp_info.ops = &tdev->ops;
exp_info.size = prime->size;
exp_info.flags = flags;
exp_info.priv = prime;
/*
- * Need to create a new dma_buf, with memory accounting.
+ * Need to create a new dma_buf
*/
- ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
- &ctx);
- if (unlikely(ret != 0)) {
- mutex_unlock(&prime->mutex);
- goto out_unref;
- }
dma_buf = dma_buf_export(&exp_info);
if (IS_ERR(dma_buf)) {
ret = PTR_ERR(dma_buf);
- ttm_mem_global_free(tdev->mem_glob,
- tdev->dma_buf_size);
mutex_unlock(&prime->mutex);
goto out_unref;
}
@@ -773,7 +687,6 @@ out_unref:
* @shareable: See ttm_base_object_init
* @type: See ttm_base_object_init
* @refcount_release: See ttm_base_object_init
- * @ref_obj_release: See ttm_base_object_init
*
* Initializes an object which is compatible with the drm_prime model
* for data sharing between processes and devices.
@@ -781,9 +694,7 @@ out_unref:
int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
struct ttm_prime_object *prime, bool shareable,
enum ttm_object_type type,
- void (*refcount_release) (struct ttm_base_object **),
- void (*ref_obj_release) (struct ttm_base_object *,
- enum ttm_ref_type ref_type))
+ void (*refcount_release) (struct ttm_base_object **))
{
mutex_init(&prime->mutex);
prime->size = PAGE_ALIGN(size);
@@ -792,6 +703,5 @@ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
prime->refcount_release = refcount_release;
return ttm_base_object_init(tfile, &prime->base, shareable,
ttm_prime_type,
- ttm_prime_refcount_release,
- ref_obj_release);
+ ttm_prime_refcount_release);
}
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
index 49b064f0cb19..4c8700027c6d 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -42,31 +42,7 @@
#include <linux/list.h>
#include <linux/rcupdate.h>
-#include <drm/drm_hashtab.h>
-
-#include "ttm_memory.h"
-
-/**
- * enum ttm_ref_type
- *
- * Describes what type of reference a ref object holds.
- *
- * TTM_REF_USAGE is a simple refcount on a base object.
- *
- * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
- * buffer object.
- *
- * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
- * buffer object.
- *
- */
-
-enum ttm_ref_type {
- TTM_REF_USAGE,
- TTM_REF_SYNCCPU_READ,
- TTM_REF_SYNCCPU_WRITE,
- TTM_REF_NUM
-};
+#include "vmwgfx_hashtab.h"
/**
* enum ttm_object_type
@@ -78,7 +54,6 @@ enum ttm_ref_type {
enum ttm_object_type {
ttm_fence_type,
- ttm_buffer_type,
ttm_lock_type,
ttm_prime_type,
ttm_driver_type0 = 256,
@@ -129,8 +104,6 @@ struct ttm_base_object {
struct ttm_object_file *tfile;
struct kref refcount;
void (*refcount_release) (struct ttm_base_object **base);
- void (*ref_obj_release) (struct ttm_base_object *base,
- enum ttm_ref_type ref_type);
u32 handle;
enum ttm_object_type object_type;
u32 shareable;
@@ -179,11 +152,7 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile,
bool shareable,
enum ttm_object_type type,
void (*refcount_release) (struct ttm_base_object
- **),
- void (*ref_obj_release) (struct ttm_base_object
- *,
- enum ttm_ref_type
- ref_type));
+ **));
/**
* ttm_base_object_lookup
@@ -247,12 +216,9 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
*/
extern int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed,
+ bool *existed,
bool require_existed);
-extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
- struct ttm_base_object *base);
-
/**
* ttm_ref_object_base_unref
*
@@ -265,8 +231,7 @@ extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
* will be unreferenced.
*/
extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
- unsigned long key,
- enum ttm_ref_type ref_type);
+ unsigned long key);
/**
* ttm_object_file_init - initialize a struct ttm_object file
@@ -297,7 +262,6 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
/**
* ttm_object device init - initialize a struct ttm_object_device
*
- * @mem_glob: struct ttm_mem_global for memory accounting.
* @hash_order: Order of hash table used to hash the base objects.
* @ops: DMA buf ops for prime objects of this device.
*
@@ -306,8 +270,7 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
*/
extern struct ttm_object_device *
-ttm_object_device_init(struct ttm_mem_global *mem_glob,
- unsigned int hash_order,
+ttm_object_device_init(unsigned int hash_order,
const struct dma_buf_ops *ops);
/**
@@ -332,10 +295,7 @@ extern int ttm_prime_object_init(struct ttm_object_file *tfile,
bool shareable,
enum ttm_object_type type,
void (*refcount_release)
- (struct ttm_base_object **),
- void (*ref_obj_release)
- (struct ttm_base_object *,
- enum ttm_ref_type ref_type));
+ (struct ttm_base_object **));
static inline enum ttm_object_type
ttm_base_object_type(struct ttm_base_object *base)
@@ -353,13 +313,6 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
#define ttm_prime_object_kfree(__obj, __prime) \
kfree_rcu(__obj, __prime.base.rhead)
-/*
- * Extra memory required by the base object's idr storage, which is allocated
- * separately from the base object itself. We estimate an on-average 128 bytes
- * per idr.
- */
-#define TTM_OBJ_EXTRA_SIZE 128
-
struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 6f27d69bad0e..ae2de914eb89 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -354,6 +354,27 @@ void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
}
/**
+ * vmw_binding_cb_offset_update: Update the offset of a cb binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @shader_slot: The shader slot of the binding.
+ * @slot: The slot of the binding.
+ * @offsetInBytes: The new offset of the binding.
+ *
+ * Updates the offset of an existing cb binding in the context binding
+ * state structure @cbs.
+ */
+void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs,
+ u32 shader_slot, u32 slot, u32 offsetInBytes)
+{
+ struct vmw_ctx_bindinfo *loc =
+ vmw_binding_loc(cbs, vmw_ctx_binding_cb, shader_slot, slot);
+ struct vmw_ctx_bindinfo_cb *loc_cb =
+ (struct vmw_ctx_bindinfo_cb *)((u8 *) loc);
+ loc_cb->offset = offsetInBytes;
+}
+
+/**
* vmw_binding_add_uav_index - Add UAV index for tracking.
* @cbs: Pointer to the context binding state tracker.
* @slot: UAV type to which bind this index.
@@ -1070,7 +1091,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
size_t cmd_size, view_id_size;
const struct vmw_resource *ctx = vmw_cbs_context(cbs);
- vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
+ vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv));
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
@@ -1100,7 +1121,7 @@ static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
size_t cmd_size, view_id_size;
const struct vmw_resource *ctx = vmw_cbs_context(cbs);
- vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
+ vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv));
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
@@ -1327,8 +1348,7 @@ static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
}
/**
- * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
- * memory accounting.
+ * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state.
*
* @dev_priv: Pointer to a device private structure.
*
@@ -1338,20 +1358,9 @@ struct vmw_ctx_binding_state *
vmw_binding_state_alloc(struct vmw_private *dev_priv)
{
struct vmw_ctx_binding_state *cbs;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
- int ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
- &ctx);
- if (ret)
- return ERR_PTR(ret);
cbs = vzalloc(sizeof(*cbs));
if (!cbs) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
return ERR_PTR(-ENOMEM);
}
@@ -1362,17 +1371,13 @@ vmw_binding_state_alloc(struct vmw_private *dev_priv)
}
/**
- * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
- * memory accounting info.
+ * vmw_binding_state_free - Free a struct vmw_ctx_binding_state.
*
* @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
*/
void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
{
- struct vmw_private *dev_priv = cbs->dev_priv;
-
vfree(cbs);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
index dcb71fd0bb3b..85b90f7d398d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -200,7 +200,7 @@ struct vmw_dx_shader_bindings {
* @splice_index: The device splice index set by user-space.
*/
struct vmw_ctx_bindinfo_uav {
- struct vmw_ctx_bindinfo_view views[SVGA3D_MAX_UAVIEWS];
+ struct vmw_ctx_bindinfo_view views[SVGA3D_DX11_1_MAX_UAVIEWS];
uint32 index;
};
@@ -217,6 +217,8 @@ struct vmw_ctx_bindinfo_so {
extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *ci,
u32 shader_slot, u32 slot);
+extern void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs,
+ u32 shader_slot, u32 slot, u32 offsetInBytes);
extern void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs,
uint32 slot, uint32 splice_index);
extern void
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index fd007f1c1776..15fead85450c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -33,18 +33,6 @@
/**
- * struct vmw_user_buffer_object - User-space-visible buffer object
- *
- * @prime: The prime object providing user visibility.
- * @vbo: The struct vmw_buffer_object
- */
-struct vmw_user_buffer_object {
- struct ttm_prime_object prime;
- struct vmw_buffer_object vbo;
-};
-
-
-/**
* vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
* vmw_buffer_object.
*
@@ -60,23 +48,6 @@ vmw_buffer_object(struct ttm_buffer_object *bo)
/**
- * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
- * vmw_user_buffer_object.
- *
- * @bo: Pointer to the TTM buffer object.
- * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
- * object.
- */
-static struct vmw_user_buffer_object *
-vmw_user_buffer_object(struct ttm_buffer_object *bo)
-{
- struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
-
- return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
-}
-
-
-/**
* vmw_bo_pin_in_placement - Validate a buffer to placement.
*
* @dev_priv: Driver private.
@@ -392,39 +363,6 @@ void vmw_bo_unmap(struct vmw_buffer_object *vbo)
/**
- * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
- *
- * @dev_priv: Pointer to a struct vmw_private identifying the device.
- * @size: The requested buffer size.
- * @user: Whether this is an ordinary dma buffer or a user dma buffer.
- */
-static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
- bool user)
-{
- static size_t struct_size, user_struct_size;
- size_t num_pages = PFN_UP(size);
- size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
-
- if (unlikely(struct_size == 0)) {
- size_t backend_size = ttm_round_pot(vmw_tt_size);
-
- struct_size = backend_size +
- ttm_round_pot(sizeof(struct vmw_buffer_object));
- user_struct_size = backend_size +
- ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
- TTM_OBJ_EXTRA_SIZE;
- }
-
- if (dev_priv->map_mode == vmw_dma_alloc_coherent)
- page_array_size +=
- ttm_round_pot(num_pages * sizeof(dma_addr_t));
-
- return ((user) ? user_struct_size : struct_size) +
- page_array_size;
-}
-
-
-/**
* vmw_bo_bo_free - vmw buffer object destructor
*
* @bo: Pointer to the embedded struct ttm_buffer_object
@@ -436,27 +374,10 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
WARN_ON(vmw_bo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
vmw_bo_unmap(vmw_bo);
- dma_resv_fini(&bo->base._resv);
+ drm_gem_object_release(&bo->base);
kfree(vmw_bo);
}
-
-/**
- * vmw_user_bo_destroy - vmw buffer object destructor
- *
- * @bo: Pointer to the embedded struct ttm_buffer_object
- */
-static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
-{
- struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
- struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
-
- WARN_ON(vbo->dirty);
- WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
- vmw_bo_unmap(vbo);
- ttm_prime_object_kfree(vmw_user_bo, prime);
-}
-
/**
* vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
*
@@ -471,33 +392,27 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
struct ttm_placement *placement,
struct ttm_buffer_object **p_bo)
{
- struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
struct ttm_buffer_object *bo;
- size_t acc_size;
+ struct drm_device *vdev = &dev_priv->drm;
int ret;
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (unlikely(!bo))
return -ENOMEM;
- acc_size = ttm_round_pot(sizeof(*bo));
- acc_size += ttm_round_pot(PFN_UP(size) * sizeof(void *));
- acc_size += ttm_round_pot(sizeof(struct ttm_tt));
-
- ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
- if (unlikely(ret))
- goto error_free;
-
+ size = ALIGN(size, PAGE_SIZE);
- bo->base.size = size;
- dma_resv_init(&bo->base._resv);
- drm_vma_node_reset(&bo->base.vma_node);
+ drm_gem_private_object_init(vdev, &bo->base, size);
ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
- ttm_bo_type_device, placement, 0,
+ ttm_bo_type_kernel, placement, 0,
&ctx, NULL, NULL, NULL);
if (unlikely(ret))
- goto error_account;
+ goto error_free;
ttm_bo_pin(bo);
ttm_bo_unreserve(bo);
@@ -505,14 +420,38 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
return 0;
-error_account:
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
-
error_free:
kfree(bo);
return ret;
}
+int vmw_bo_create(struct vmw_private *vmw,
+ size_t size, struct ttm_placement *placement,
+ bool interruptible, bool pin,
+ void (*bo_free)(struct ttm_buffer_object *bo),
+ struct vmw_buffer_object **p_bo)
+{
+ int ret;
+
+ *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
+ if (unlikely(!*p_bo)) {
+ DRM_ERROR("Failed to allocate a buffer.\n");
+ return -ENOMEM;
+ }
+
+ ret = vmw_bo_init(vmw, *p_bo, size,
+ placement, interruptible, pin,
+ bo_free);
+ if (unlikely(ret != 0))
+ goto out_error;
+
+ return ret;
+out_error:
+ kfree(*p_bo);
+ *p_bo = NULL;
+ return ret;
+}
+
/**
* vmw_bo_init - Initialize a vmw buffer object
*
@@ -533,192 +472,44 @@ int vmw_bo_init(struct vmw_private *dev_priv,
bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo))
{
- struct ttm_operation_ctx ctx = { interruptible, false };
+ struct ttm_operation_ctx ctx = {
+ .interruptible = interruptible,
+ .no_wait_gpu = false
+ };
struct ttm_device *bdev = &dev_priv->bdev;
- size_t acc_size;
+ struct drm_device *vdev = &dev_priv->drm;
int ret;
- bool user = (bo_free == &vmw_user_bo_destroy);
-
- WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
- acc_size = vmw_bo_acc_size(dev_priv, size, user);
+ WARN_ON_ONCE(!bo_free);
memset(vmw_bo, 0, sizeof(*vmw_bo));
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->base.priority = 3;
vmw_bo->res_tree = RB_ROOT;
- ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
- if (unlikely(ret))
- return ret;
-
- vmw_bo->base.base.size = size;
- dma_resv_init(&vmw_bo->base.base._resv);
- drm_vma_node_reset(&vmw_bo->base.base.vma_node);
+ size = ALIGN(size, PAGE_SIZE);
+ drm_gem_private_object_init(vdev, &vmw_bo->base.base, size);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
- ttm_bo_type_device, placement,
+ ttm_bo_type_device,
+ placement,
0, &ctx, NULL, NULL, bo_free);
if (unlikely(ret)) {
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
return ret;
}
if (pin)
ttm_bo_pin(&vmw_bo->base);
ttm_bo_unreserve(&vmw_bo->base);
- return 0;
-}
-
-
-/**
- * vmw_user_bo_release - TTM reference base object release callback for
- * vmw user buffer objects
- *
- * @p_base: The TTM base object pointer about to be unreferenced.
- *
- * Clears the TTM base object pointer and drops the reference the
- * base object has on the underlying struct vmw_buffer_object.
- */
-static void vmw_user_bo_release(struct ttm_base_object **p_base)
-{
- struct vmw_user_buffer_object *vmw_user_bo;
- struct ttm_base_object *base = *p_base;
-
- *p_base = NULL;
-
- if (unlikely(base == NULL))
- return;
-
- vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
- prime.base);
- ttm_bo_put(&vmw_user_bo->vbo.base);
-}
-
-
-/**
- * vmw_user_bo_ref_obj_release - TTM synccpu reference object release callback
- * for vmw user buffer objects
- *
- * @base: Pointer to the TTM base object
- * @ref_type: Reference type of the reference reaching zero.
- *
- * Called when user-space drops its last synccpu reference on the buffer
- * object, Either explicitly or as part of a cleanup file close.
- */
-static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
- enum ttm_ref_type ref_type)
-{
- struct vmw_user_buffer_object *user_bo;
- user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
-
- switch (ref_type) {
- case TTM_REF_SYNCCPU_WRITE:
- atomic_dec(&user_bo->vbo.cpu_writers);
- break;
- default:
- WARN_ONCE(true, "Undefined buffer object reference release.\n");
- }
-}
-
-
-/**
- * vmw_user_bo_alloc - Allocate a user buffer object
- *
- * @dev_priv: Pointer to a struct device private.
- * @tfile: Pointer to a struct ttm_object_file on which to register the user
- * object.
- * @size: Size of the buffer object.
- * @shareable: Boolean whether the buffer is shareable with other open files.
- * @handle: Pointer to where the handle value should be assigned.
- * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
- * should be assigned.
- * @p_base: The TTM base object pointer about to be allocated.
- * Return: Zero on success, negative error code on error.
- */
-int vmw_user_bo_alloc(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t size,
- bool shareable,
- uint32_t *handle,
- struct vmw_buffer_object **p_vbo,
- struct ttm_base_object **p_base)
-{
- struct vmw_user_buffer_object *user_bo;
- int ret;
-
- user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
- if (unlikely(!user_bo)) {
- DRM_ERROR("Failed to allocate a buffer.\n");
- return -ENOMEM;
- }
-
- ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
- (dev_priv->has_mob) ?
- &vmw_sys_placement :
- &vmw_vram_sys_placement, true, false,
- &vmw_user_bo_destroy);
- if (unlikely(ret != 0))
- return ret;
-
- ttm_bo_get(&user_bo->vbo.base);
- ret = ttm_prime_object_init(tfile,
- size,
- &user_bo->prime,
- shareable,
- ttm_buffer_type,
- &vmw_user_bo_release,
- &vmw_user_bo_ref_obj_release);
- if (unlikely(ret != 0)) {
- ttm_bo_put(&user_bo->vbo.base);
- goto out_no_base_object;
- }
-
- *p_vbo = &user_bo->vbo;
- if (p_base) {
- *p_base = &user_bo->prime.base;
- kref_get(&(*p_base)->refcount);
- }
- *handle = user_bo->prime.base.handle;
-
-out_no_base_object:
- return ret;
-}
-
-
-/**
- * vmw_user_bo_verify_access - verify access permissions on this
- * buffer object.
- *
- * @bo: Pointer to the buffer object being accessed
- * @tfile: Identifying the caller.
- */
-int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
- struct ttm_object_file *tfile)
-{
- struct vmw_user_buffer_object *vmw_user_bo;
-
- if (unlikely(bo->destroy != vmw_user_bo_destroy))
- return -EPERM;
-
- vmw_user_bo = vmw_user_buffer_object(bo);
-
- /* Check that the caller has opened the object. */
- if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
- return 0;
-
- DRM_ERROR("Could not grant buffer access.\n");
- return -EPERM;
+ return 0;
}
-
/**
- * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
+ * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu
* access, idling previous GPU operations on the buffer and optionally
* blocking it for further command submissions.
*
- * @user_bo: Pointer to the buffer object being grabbed for CPU access
- * @tfile: Identifying the caller.
+ * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
* @flags: Flags indicating how the grab should be performed.
* Return: Zero on success, Negative error code on error. In particular,
* -EBUSY will be returned if a dontblock operation is requested and the
@@ -727,13 +518,11 @@ int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
*
* A blocking grab will be automatically released when @tfile is closed.
*/
-static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
- struct ttm_object_file *tfile,
+static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo,
uint32_t flags)
{
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
- struct ttm_buffer_object *bo = &user_bo->vbo.base;
- bool existed;
+ struct ttm_buffer_object *bo = &vmw_bo->base;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
@@ -755,17 +544,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
ret = ttm_bo_wait(bo, true, nonblock);
if (likely(ret == 0))
- atomic_inc(&user_bo->vbo.cpu_writers);
+ atomic_inc(&vmw_bo->cpu_writers);
ttm_bo_unreserve(bo);
if (unlikely(ret != 0))
return ret;
- ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_SYNCCPU_WRITE, &existed, false);
- if (ret != 0 || existed)
- atomic_dec(&user_bo->vbo.cpu_writers);
-
return ret;
}
@@ -773,19 +557,23 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
* vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
* and unblock command submission on the buffer if blocked.
*
+ * @filp: Identifying the caller.
* @handle: Handle identifying the buffer object.
- * @tfile: Identifying the caller.
* @flags: Flags indicating the type of release.
*/
-static int vmw_user_bo_synccpu_release(uint32_t handle,
- struct ttm_object_file *tfile,
- uint32_t flags)
+static int vmw_user_bo_synccpu_release(struct drm_file *filp,
+ uint32_t handle,
+ uint32_t flags)
{
- if (!(flags & drm_vmw_synccpu_allow_cs))
- return ttm_ref_object_base_unref(tfile, handle,
- TTM_REF_SYNCCPU_WRITE);
+ struct vmw_buffer_object *vmw_bo;
+ int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
- return 0;
+ if (!(flags & drm_vmw_synccpu_allow_cs)) {
+ atomic_dec(&vmw_bo->cpu_writers);
+ }
+ ttm_bo_put(&vmw_bo->base);
+
+ return ret;
}
@@ -807,9 +595,6 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_synccpu_arg *arg =
(struct drm_vmw_synccpu_arg *) data;
struct vmw_buffer_object *vbo;
- struct vmw_user_buffer_object *user_bo;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct ttm_base_object *buffer_base;
int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
@@ -822,16 +607,12 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
switch (arg->op) {
case drm_vmw_synccpu_grab:
- ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
- &buffer_base);
+ ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
if (unlikely(ret != 0))
return ret;
- user_bo = container_of(vbo, struct vmw_user_buffer_object,
- vbo);
- ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
+ ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo);
- ttm_base_object_unref(&buffer_base);
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
ret != -EBUSY)) {
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
@@ -840,7 +621,8 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
}
break;
case drm_vmw_synccpu_release:
- ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
+ ret = vmw_user_bo_synccpu_release(file_priv,
+ arg->handle,
arg->flags);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
@@ -856,50 +638,6 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
return 0;
}
-
-/**
- * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
- * allocation functionality.
- *
- * @dev: Identifies the drm device.
- * @data: Pointer to the ioctl argument.
- * @file_priv: Identifies the caller.
- * Return: Zero on success, negative error code on error.
- *
- * This function checks the ioctl arguments for validity and allocates a
- * struct vmw_user_buffer_object bo.
- */
-int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- union drm_vmw_alloc_dmabuf_arg *arg =
- (union drm_vmw_alloc_dmabuf_arg *)data;
- struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
- struct drm_vmw_dmabuf_rep *rep = &arg->rep;
- struct vmw_buffer_object *vbo;
- uint32_t handle;
- int ret;
-
- ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
- req->size, false, &handle, &vbo,
- NULL);
- if (unlikely(ret != 0))
- goto out_no_bo;
-
- rep->handle = handle;
- rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
- rep->cur_gmr_id = handle;
- rep->cur_gmr_offset = 0;
-
- vmw_bo_unreference(&vbo);
-
-out_no_bo:
-
- return ret;
-}
-
-
/**
* vmw_bo_unref_ioctl - Generic handle close ioctl.
*
@@ -917,65 +655,48 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_unref_dmabuf_arg *arg =
(struct drm_vmw_unref_dmabuf_arg *)data;
- return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- arg->handle,
- TTM_REF_USAGE);
+ drm_gem_handle_delete(file_priv, arg->handle);
+ return 0;
}
/**
* vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
*
- * @tfile: The TTM object file the handle is registered with.
+ * @filp: The file the handle is registered with.
* @handle: The user buffer object handle
* @out: Pointer to a where a pointer to the embedded
* struct vmw_buffer_object should be placed.
- * @p_base: Pointer to where a pointer to the TTM base object should be
- * placed, or NULL if no such pointer is required.
* Return: Zero on success, Negative error code on error.
*
- * Both the output base object pointer and the vmw buffer object pointer
- * will be refcounted.
+ * The vmw buffer object pointer will be refcounted.
*/
-int vmw_user_bo_lookup(struct ttm_object_file *tfile,
- uint32_t handle, struct vmw_buffer_object **out,
- struct ttm_base_object **p_base)
+int vmw_user_bo_lookup(struct drm_file *filp,
+ uint32_t handle,
+ struct vmw_buffer_object **out)
{
- struct vmw_user_buffer_object *vmw_user_bo;
- struct ttm_base_object *base;
+ struct drm_gem_object *gobj;
- base = ttm_base_object_lookup(tfile, handle);
- if (unlikely(base == NULL)) {
+ gobj = drm_gem_object_lookup(filp, handle);
+ if (!gobj) {
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return -ESRCH;
}
- if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
- ttm_base_object_unref(&base);
- DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
- (unsigned long)handle);
- return -EINVAL;
- }
-
- vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
- prime.base);
- ttm_bo_get(&vmw_user_bo->vbo.base);
- if (p_base)
- *p_base = base;
- else
- ttm_base_object_unref(&base);
- *out = &vmw_user_bo->vbo;
+ *out = gem_to_vmw_bo(gobj);
+ ttm_bo_get(&(*out)->base);
+ drm_gem_object_put(gobj);
return 0;
}
/**
* vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
- * @tfile: The TTM object file the handle is registered with.
+ * @filp: The TTM object file the handle is registered with.
* @handle: The user buffer object handle.
*
- * This function looks up a struct vmw_user_bo and returns a pointer to the
+ * This function looks up a struct vmw_bo and returns a pointer to the
* struct vmw_buffer_object it derives from without refcounting the pointer.
* The returned pointer is only valid until vmw_user_bo_noref_release() is
* called, and the object pointed to by the returned pointer may be doomed.
@@ -988,52 +709,23 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile,
* error pointer on failure.
*/
struct vmw_buffer_object *
-vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
+vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle)
{
- struct vmw_user_buffer_object *vmw_user_bo;
- struct ttm_base_object *base;
+ struct vmw_buffer_object *vmw_bo;
+ struct ttm_buffer_object *bo;
+ struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle);
- base = ttm_base_object_noref_lookup(tfile, handle);
- if (!base) {
+ if (!gobj) {
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return ERR_PTR(-ESRCH);
}
+ vmw_bo = gem_to_vmw_bo(gobj);
+ bo = ttm_bo_get_unless_zero(&vmw_bo->base);
+ vmw_bo = vmw_buffer_object(bo);
+ drm_gem_object_put(gobj);
- if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
- ttm_base_object_noref_release();
- DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
- (unsigned long)handle);
- return ERR_PTR(-EINVAL);
- }
-
- vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
- prime.base);
- return &vmw_user_bo->vbo;
-}
-
-/**
- * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
- *
- * @tfile: The TTM object file to register the handle with.
- * @vbo: The embedded vmw buffer object.
- * @handle: Pointer to where the new handle should be placed.
- * Return: Zero on success, Negative error code on error.
- */
-int vmw_user_bo_reference(struct ttm_object_file *tfile,
- struct vmw_buffer_object *vbo,
- uint32_t *handle)
-{
- struct vmw_user_buffer_object *user_bo;
-
- if (vbo->base.destroy != vmw_user_bo_destroy)
- return -EINVAL;
-
- user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
-
- *handle = user_bo->prime.base.handle;
- return ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_USAGE, NULL, false);
+ return vmw_bo;
}
@@ -1087,68 +779,15 @@ int vmw_dumb_create(struct drm_file *file_priv,
int ret;
args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
+ args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
- ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
- args->size, false, &args->handle,
- &vbo, NULL);
- if (unlikely(ret != 0))
- goto out_no_bo;
+ ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
+ args->size, &args->handle,
+ &vbo);
- vmw_bo_unreference(&vbo);
-out_no_bo:
return ret;
}
-
-/**
- * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- * @offset: The address space offset returned.
- * Return: Zero on success, negative error code on failure.
- *
- * This is a driver callback for the core drm dumb_map_offset functionality.
- */
-int vmw_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset)
-{
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_buffer_object *out_buf;
- int ret;
-
- ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
- if (ret != 0)
- return -EINVAL;
-
- *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
- vmw_bo_unreference(&out_buf);
- return 0;
-}
-
-
-/**
- * vmw_dumb_destroy - Destroy a dumb boffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- * Return: Zero on success, negative error code on failure.
- *
- * This is a driver callback for the core drm dumb_destroy functionality.
- */
-int vmw_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle)
-{
- return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
-}
-
-
/**
* vmw_bo_swap_notify - swapout notify callback.
*
@@ -1157,8 +796,7 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
{
/* Is @bo embedded in a struct vmw_buffer_object? */
- if (bo->destroy != vmw_bo_bo_free &&
- bo->destroy != vmw_user_bo_destroy)
+ if (vmw_bo_is_vmw_bo(bo))
return;
/* Kill any cached kernel maps before swapout */
@@ -1182,8 +820,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct vmw_buffer_object *vbo;
/* Make sure @bo is embedded in a struct vmw_buffer_object? */
- if (bo->destroy != vmw_bo_bo_free &&
- bo->destroy != vmw_user_bo_destroy)
+ if (vmw_bo_is_vmw_bo(bo))
return;
vbo = container_of(bo, struct vmw_buffer_object, base);
@@ -1204,3 +841,22 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo);
}
+
+/**
+ * vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object
+ * @bo: buffer object to be checked
+ *
+ * Uses destroy function associated with the object to determine if this is
+ * a &vmw_buffer_object.
+ *
+ * Returns:
+ * true if the object is of &vmw_buffer_object type, false if not.
+ */
+bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &vmw_bo_bo_free ||
+ bo->destroy == &vmw_gem_destroy)
+ return true;
+
+ return false;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index 67db472d3493..a3bfbb6c3e14 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -145,6 +145,13 @@ struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
(unsigned int) max,
(unsigned int) min,
(unsigned int) fifo->capabilities);
+
+ if (unlikely(min >= max)) {
+ drm_warn(&dev_priv->drm,
+ "FIFO memory is not usable. Driver failed to initialize.");
+ return ERR_PTR(-ENXIO);
+ }
+
return fifo;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 8381750db81b..415774fde796 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -42,7 +42,7 @@
*/
struct vmw_cmdbuf_res {
struct vmw_resource *res;
- struct drm_hash_item hash;
+ struct vmwgfx_hash_item hash;
struct list_head head;
enum vmw_cmdbuf_res_state state;
struct vmw_cmdbuf_res_manager *man;
@@ -59,7 +59,7 @@ struct vmw_cmdbuf_res {
* @resources and @list are protected by the cmdbuf mutex for now.
*/
struct vmw_cmdbuf_res_manager {
- struct drm_open_hash resources;
+ struct vmwgfx_open_hash resources;
struct list_head list;
struct vmw_private *dev_priv;
};
@@ -81,11 +81,11 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key)
{
- struct drm_hash_item *hash;
+ struct vmwgfx_hash_item *hash;
int ret;
unsigned long key = user_key | (res_type << 24);
- ret = drm_ht_find_item(&man->resources, key, &hash);
+ ret = vmwgfx_ht_find_item(&man->resources, key, &hash);
if (unlikely(ret != 0))
return ERR_PTR(ret);
@@ -105,7 +105,7 @@ static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
struct vmw_cmdbuf_res *entry)
{
list_del(&entry->head);
- WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash));
+ WARN_ON(vmwgfx_ht_remove_item(&man->resources, &entry->hash));
vmw_resource_unreference(&entry->res);
kfree(entry);
}
@@ -167,7 +167,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
vmw_cmdbuf_res_free(entry->man, entry);
break;
case VMW_CMDBUF_RES_DEL:
- ret = drm_ht_insert_item(&entry->man->resources, &entry->hash);
+ ret = vmwgfx_ht_insert_item(&entry->man->resources, &entry->hash);
BUG_ON(ret);
list_move_tail(&entry->head, &entry->man->list);
entry->state = VMW_CMDBUF_RES_COMMITTED;
@@ -206,7 +206,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
return -ENOMEM;
cres->hash.key = user_key | (res_type << 24);
- ret = drm_ht_insert_item(&man->resources, &cres->hash);
+ ret = vmwgfx_ht_insert_item(&man->resources, &cres->hash);
if (unlikely(ret != 0)) {
kfree(cres);
goto out_invalid_key;
@@ -244,10 +244,10 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
struct vmw_resource **res_p)
{
struct vmw_cmdbuf_res *entry;
- struct drm_hash_item *hash;
+ struct vmwgfx_hash_item *hash;
int ret;
- ret = drm_ht_find_item(&man->resources, user_key | (res_type << 24),
+ ret = vmwgfx_ht_find_item(&man->resources, user_key | (res_type << 24),
&hash);
if (likely(ret != 0))
return -EINVAL;
@@ -260,7 +260,7 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
*res_p = NULL;
break;
case VMW_CMDBUF_RES_COMMITTED:
- (void) drm_ht_remove_item(&man->resources, &entry->hash);
+ (void) vmwgfx_ht_remove_item(&man->resources, &entry->hash);
list_del(&entry->head);
entry->state = VMW_CMDBUF_RES_DEL;
list_add_tail(&entry->head, list);
@@ -295,7 +295,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
man->dev_priv = dev_priv;
INIT_LIST_HEAD(&man->list);
- ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
+ ret = vmwgfx_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
if (ret == 0)
return man;
@@ -320,26 +320,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
list_for_each_entry_safe(entry, next, &man->list, head)
vmw_cmdbuf_res_free(man, entry);
- drm_ht_remove(&man->resources);
+ vmwgfx_ht_remove(&man->resources);
kfree(man);
}
-/**
- * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed
- * resource manager
- *
- * Returns the approximate allocation size of a command buffer managed
- * resource manager.
- */
-size_t vmw_cmdbuf_res_man_size(void)
-{
- static size_t res_man_size;
-
- if (unlikely(res_man_size == 0))
- res_man_size =
- ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) +
- ttm_round_pot(sizeof(struct hlist_head) <<
- VMW_CMDBUF_RES_MAN_HT_ORDER);
-
- return res_man_size;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 4446758b6880..e0f48cd9529b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -60,8 +60,6 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_dx_context_destroy(struct vmw_resource *res);
-static uint64_t vmw_user_context_size;
-
static const struct vmw_user_resource_conv user_context_conv = {
.object_type = VMW_RES_CONTEXT,
.base_obj_to_res = vmw_user_context_base_to_res,
@@ -686,7 +684,6 @@ static void vmw_user_context_free(struct vmw_resource *res)
{
struct vmw_user_context *ctx =
container_of(res, struct vmw_user_context, res);
- struct vmw_private *dev_priv = res->dev_priv;
if (ctx->cbs)
vmw_binding_state_free(ctx->cbs);
@@ -694,8 +691,6 @@ static void vmw_user_context_free(struct vmw_resource *res)
(void) vmw_context_bind_dx_query(res, NULL);
ttm_base_object_kfree(ctx, base);
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
}
/*
@@ -720,7 +715,7 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
+ return ttm_ref_object_base_unref(tfile, arg->cid);
}
static int vmw_context_define(struct drm_device *dev, void *data,
@@ -732,10 +727,6 @@ static int vmw_context_define(struct drm_device *dev, void *data,
struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
if (!has_sm4_context(dev_priv) && dx) {
@@ -743,25 +734,8 @@ static int vmw_context_define(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (unlikely(vmw_user_context_size == 0))
- vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
- ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
- + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_user_context_size,
- &ttm_opt_ctx);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for context"
- " creation.\n");
- goto out_ret;
- }
-
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (unlikely(!ctx)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
ret = -ENOMEM;
goto out_ret;
}
@@ -780,7 +754,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
tmp = vmw_resource_reference(&ctx->res);
ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
- &vmw_user_context_base_release, NULL);
+ &vmw_user_context_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 17a98db00017..16f986b6cbea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -407,12 +407,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_placement,
- true, true, vmw_bo_bo_free);
+ ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement,
+ true, true, vmw_bo_bo_free, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
return ret;
@@ -546,8 +542,6 @@ static void vmw_hw_cotable_destroy(struct vmw_resource *res)
(void) vmw_cotable_destroy(res);
}
-static size_t cotable_acc_size;
-
/**
* vmw_cotable_free - Cotable resource destructor
*
@@ -555,10 +549,7 @@ static size_t cotable_acc_size;
*/
static void vmw_cotable_free(struct vmw_resource *res)
{
- struct vmw_private *dev_priv = res->dev_priv;
-
kfree(res);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
}
/**
@@ -574,21 +565,9 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
u32 type)
{
struct vmw_cotable *vcotbl;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
u32 num_entries;
- if (unlikely(cotable_acc_size == 0))
- cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- cotable_acc_size, &ttm_opt_ctx);
- if (unlikely(ret))
- return ERR_PTR(ret);
-
vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
if (unlikely(!vcotbl)) {
ret = -ENOMEM;
@@ -622,7 +601,6 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
out_no_init:
kfree(vcotbl);
out_no_alloc:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index bfd71c86faa5..2d59bdad0373 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -25,7 +25,6 @@
*
**************************************************************************/
-#include <linux/console.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -35,6 +34,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_sysfs.h>
+#include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_placement.h>
@@ -51,9 +51,6 @@
#define VMW_MIN_INITIAL_WIDTH 800
#define VMW_MIN_INITIAL_HEIGHT 600
-#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
-
-
/*
* Fully encoded drm commands. Might move to vmw_drm.h
*/
@@ -166,7 +163,7 @@
static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
+ DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_gem_object_create_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
DRM_RENDER_ALLOW),
@@ -258,8 +255,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
};
static const struct pci_device_id vmw_pci_id_list[] = {
- { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) },
- { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA3) },
{ }
};
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
@@ -367,6 +364,7 @@ static void vmw_print_sm_type(struct vmw_private *dev_priv)
[VMW_SM_4] = "SM4",
[VMW_SM_4_1] = "SM4_1",
[VMW_SM_5] = "SM_5",
+ [VMW_SM_5_1X] = "SM_5_1X",
[VMW_SM_MAX] = "Invalid"
};
BUILD_BUG_ON(ARRAY_SIZE(names) != (VMW_SM_MAX + 1));
@@ -400,13 +398,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
* immediately succeed. This is because we're the only
* user of the bo currently.
*/
- vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
- if (!vbo)
- return -ENOMEM;
-
- ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
- &vmw_sys_placement, false, true,
- &vmw_bo_bo_free);
+ ret = vmw_bo_create(dev_priv, PAGE_SIZE,
+ &vmw_sys_placement, false, true,
+ &vmw_bo_bo_free, &vbo);
if (unlikely(ret != 0))
return ret;
@@ -987,8 +981,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
goto out_err0;
}
- dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
- &vmw_prime_dmabuf_ops);
+ dev_priv->tdev = ttm_object_device_init(12, &vmw_prime_dmabuf_ops);
if (unlikely(dev_priv->tdev == NULL)) {
drm_err(&dev_priv->drm,
@@ -1071,6 +1064,12 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
"3D will be disabled.\n");
dev_priv->has_mob = false;
}
+ if (vmw_sys_man_init(dev_priv) != 0) {
+ drm_info(&dev_priv->drm,
+ "No MOB page table memory available. "
+ "3D will be disabled.\n");
+ dev_priv->has_mob = false;
+ }
}
if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
@@ -1078,8 +1077,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->sm_type = VMW_SM_4;
}
- vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
-
/* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
if (has_sm4_context(dev_priv) &&
(dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
@@ -1087,8 +1084,11 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->sm_type = VMW_SM_4_1;
if (has_sm4_1_context(dev_priv) &&
(dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
- if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5))
+ if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) {
dev_priv->sm_type = VMW_SM_5;
+ if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_GL43))
+ dev_priv->sm_type = VMW_SM_5_1X;
+ }
}
}
@@ -1121,8 +1121,10 @@ out_no_fifo:
vmw_overlay_close(dev_priv);
vmw_kms_close(dev_priv);
out_no_kms:
- if (dev_priv->has_mob)
+ if (dev_priv->has_mob) {
vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
+ vmw_sys_man_fini(dev_priv);
+ }
if (dev_priv->has_gmr)
vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
vmw_devcaps_destroy(dev_priv);
@@ -1156,7 +1158,7 @@ static void vmw_driver_unload(struct drm_device *dev)
unregister_pm_notifier(&dev_priv->pm_nb);
if (dev_priv->ctx.res_ht_initialized)
- drm_ht_remove(&dev_priv->ctx.res_ht);
+ vmwgfx_ht_remove(&dev_priv->ctx.res_ht);
vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->enable_fb) {
vmw_fb_off(dev_priv);
@@ -1172,8 +1174,10 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
vmw_release_device_early(dev_priv);
- if (dev_priv->has_mob)
+ if (dev_priv->has_mob) {
vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
+ vmw_sys_man_fini(dev_priv);
+ }
vmw_devcaps_destroy(dev_priv);
vmw_vram_manager_fini(dev_priv);
ttm_device_fini(&dev_priv->bdev);
@@ -1388,7 +1392,6 @@ static void vmw_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- ttm_mem_global_release(&ttm_mem_glob);
drm_dev_unregister(dev);
vmw_driver_unload(dev);
}
@@ -1576,7 +1579,7 @@ static const struct file_operations vmwgfx_driver_fops = {
static const struct drm_driver driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
+ DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM,
.ioctls = vmw_ioctls,
.num_ioctls = ARRAY_SIZE(vmw_ioctls),
.master_set = vmw_master_set,
@@ -1585,8 +1588,7 @@ static const struct drm_driver driver = {
.postclose = vmw_postclose,
.dumb_create = vmw_dumb_create,
- .dumb_map_offset = vmw_dumb_map_offset,
- .dumb_destroy = vmw_dumb_destroy,
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
.prime_fd_to_handle = vmw_prime_fd_to_handle,
.prime_handle_to_fd = vmw_prime_handle_to_fd,
@@ -1617,41 +1619,43 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
if (ret)
- return ret;
+ goto out_error;
ret = pcim_enable_device(pdev);
if (ret)
- return ret;
+ goto out_error;
vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
struct vmw_private, drm);
- if (IS_ERR(vmw))
- return PTR_ERR(vmw);
+ if (IS_ERR(vmw)) {
+ ret = PTR_ERR(vmw);
+ goto out_error;
+ }
pci_set_drvdata(pdev, &vmw->drm);
- ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev);
- if (ret)
- return ret;
-
ret = vmw_driver_load(vmw, ent->device);
if (ret)
- return ret;
+ goto out_error;
ret = drm_dev_register(&vmw->drm, 0);
- if (ret) {
- vmw_driver_unload(&vmw->drm);
- return ret;
- }
+ if (ret)
+ goto out_unload;
+
+ vmw_debugfs_gem_init(vmw);
return 0;
+out_unload:
+ vmw_driver_unload(&vmw->drm);
+out_error:
+ return ret;
}
static int __init vmwgfx_init(void)
{
int ret;
- if (vgacon_text_force())
+ if (drm_firmware_drivers_only())
return -EINVAL;
ret = pci_register_driver(&vmw_pci_driver);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 858aff99a3fe..4ec2b99351cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -34,7 +34,6 @@
#include <drm/drm_auth.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_hashtab.h>
#include <drm/drm_rect.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -43,6 +42,7 @@
#include "ttm_object.h"
#include "vmwgfx_fence.h"
+#include "vmwgfx_hashtab.h"
#include "vmwgfx_reg.h"
#include "vmwgfx_validation.h"
@@ -54,9 +54,9 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20210722"
+#define VMWGFX_DRIVER_DATE "20211206"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 19
+#define VMWGFX_DRIVER_MINOR 20
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -82,8 +82,9 @@
VMWGFX_NUM_GB_SURFACE +\
VMWGFX_NUM_GB_SCREEN_TARGET)
-#define VMW_PL_GMR (TTM_PL_PRIV + 0)
-#define VMW_PL_MOB (TTM_PL_PRIV + 1)
+#define VMW_PL_GMR (TTM_PL_PRIV + 0)
+#define VMW_PL_MOB (TTM_PL_PRIV + 1)
+#define VMW_PL_SYSTEM (TTM_PL_PRIV + 2)
#define VMW_RES_CONTEXT ttm_driver_type0
#define VMW_RES_SURFACE ttm_driver_type1
@@ -133,7 +134,7 @@ struct vmw_buffer_object {
*/
struct vmw_validate_buffer {
struct ttm_validate_buffer base;
- struct drm_hash_item hash;
+ struct vmwgfx_hash_item hash;
bool validate_as_mob;
};
@@ -332,7 +333,6 @@ struct vmw_sg_table {
struct page **pages;
const dma_addr_t *addrs;
struct sg_table *sgt;
- unsigned long num_regions;
unsigned long num_pages;
};
@@ -360,6 +360,19 @@ struct vmw_piter {
dma_addr_t (*dma_address)(struct vmw_piter *);
};
+
+struct vmw_ttm_tt {
+ struct ttm_tt dma_ttm;
+ struct vmw_private *dev_priv;
+ int gmr_id;
+ struct vmw_mob *mob;
+ int mem_type;
+ struct sg_table sgt;
+ struct vmw_sg_table vsgt;
+ bool mapped;
+ bool bound;
+};
+
/*
* enum vmw_display_unit_type - Describes the display unit
*/
@@ -406,10 +419,11 @@ struct vmw_ctx_validation_info;
* @ctx: The validation context
*/
struct vmw_sw_context{
- struct drm_open_hash res_ht;
+ struct vmwgfx_open_hash res_ht;
bool res_ht_initialized;
bool kernel;
struct vmw_fpriv *fp;
+ struct drm_file *filp;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
struct vmw_buffer_object *cur_query_bo;
@@ -473,6 +487,7 @@ enum {
* @VMW_SM_4: Context support upto SM4.
* @VMW_SM_4_1: Context support upto SM4_1.
* @VMW_SM_5: Context support up to SM5.
+ * @VMW_SM_5_1X: Adds support for sm5_1 and gl43 extensions.
* @VMW_SM_MAX: Should be the last.
*/
enum vmw_sm_type {
@@ -480,6 +495,7 @@ enum vmw_sm_type {
VMW_SM_4,
VMW_SM_4_1,
VMW_SM_5,
+ VMW_SM_5_1X,
VMW_SM_MAX
};
@@ -627,9 +643,6 @@ struct vmw_private {
struct vmw_cmdbuf_man *cman;
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
- /* Validation memory reservation */
- struct vmw_validation_mem vvm;
-
uint32 *devcaps;
/*
@@ -645,6 +658,11 @@ struct vmw_private {
#endif
};
+static inline struct vmw_buffer_object *gem_to_vmw_bo(struct drm_gem_object *gobj)
+{
+ return container_of((gobj), struct vmw_buffer_object, base.base);
+}
+
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
{
return container_of(res, struct vmw_surface, res);
@@ -738,6 +756,24 @@ static inline bool has_sm5_context(const struct vmw_private *dev_priv)
return (dev_priv->sm_type >= VMW_SM_5);
}
+/**
+ * has_gl43_context - Does the device support GL43 context.
+ * @dev_priv: Device private.
+ *
+ * Return: Bool value if device support SM5 context or not.
+ */
+static inline bool has_gl43_context(const struct vmw_private *dev_priv)
+{
+ return (dev_priv->sm_type >= VMW_SM_5_1X);
+}
+
+
+static inline u32 vmw_max_num_uavs(struct vmw_private *dev_priv)
+{
+ return (has_gl43_context(dev_priv) ?
+ SVGA3D_DX11_1_MAX_UAVIEWS : SVGA3D_MAX_UAVIEWS);
+}
+
extern void vmw_svga_enable(struct vmw_private *dev_priv);
extern void vmw_svga_disable(struct vmw_private *dev_priv);
@@ -767,7 +803,7 @@ extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
+ struct drm_file *filp,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_buffer_object **out_buf);
@@ -833,6 +869,7 @@ static inline void vmw_user_resource_noref_release(void)
/**
* Buffer object helper functions - vmwgfx_bo.c
*/
+extern bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo);
extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
struct vmw_buffer_object *bo,
struct ttm_placement *placement,
@@ -857,32 +894,23 @@ extern int vmw_bo_create_kernel(struct vmw_private *dev_priv,
unsigned long size,
struct ttm_placement *placement,
struct ttm_buffer_object **p_bo);
+extern int vmw_bo_create(struct vmw_private *dev_priv,
+ size_t size, struct ttm_placement *placement,
+ bool interruptible, bool pin,
+ void (*bo_free)(struct ttm_buffer_object *bo),
+ struct vmw_buffer_object **p_bo);
extern int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo));
-extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
- struct ttm_object_file *tfile);
-extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t size,
- bool shareable,
- uint32_t *handle,
- struct vmw_buffer_object **p_dma_buf,
- struct ttm_base_object **p_base);
-extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
- struct vmw_buffer_object *dma_buf,
- uint32_t *handle);
-extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
- uint32_t id, struct vmw_buffer_object **out,
- struct ttm_base_object **base);
+extern int vmw_user_bo_lookup(struct drm_file *filp,
+ uint32_t handle,
+ struct vmw_buffer_object **out);
extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
@@ -891,16 +919,7 @@ extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
extern struct vmw_buffer_object *
-vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle);
-
-/**
- * vmw_user_bo_noref_release - release a buffer object pointer looked up
- * without reference
- */
-static inline void vmw_user_bo_noref_release(void)
-{
- ttm_base_object_noref_release();
-}
+vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle);
/**
* vmw_bo_adjust_prio - Adjust the buffer object eviction priority
@@ -952,6 +971,19 @@ static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio)
}
/**
+ * GEM related functionality - vmwgfx_gem.c
+ */
+extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
+ struct drm_file *filp,
+ uint32_t size,
+ uint32_t *handle,
+ struct vmw_buffer_object **p_vbo);
+extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+extern void vmw_gem_destroy(struct ttm_buffer_object *bo);
+extern void vmw_debugfs_gem_init(struct vmw_private *vdev);
+
+/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
*/
@@ -1027,9 +1059,6 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
-extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
- size_t gran);
-
/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
@@ -1039,7 +1068,6 @@ extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_sys_placement;
-extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_nonfixed_placement;
@@ -1218,13 +1246,6 @@ void vmw_kms_lost_device(struct drm_device *dev);
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-
-int vmw_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset);
-int vmw_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
extern void vmw_resource_unpin(struct vmw_resource *res);
extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
@@ -1252,6 +1273,12 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type);
void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type);
/**
+ * System memory manager
+ */
+int vmw_sys_man_init(struct vmw_private *dev_priv);
+void vmw_sys_man_fini(struct vmw_private *dev_priv);
+
+/**
* Prime - vmwgfx_prime.c
*/
@@ -1322,18 +1349,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int vmw_surface_gb_priv_define(struct drm_device *dev,
- uint32_t user_accounting_size,
- SVGA3dSurfaceAllFlags svga3d_flags,
- SVGA3dSurfaceFormat format,
- bool for_scanout,
- uint32_t num_mip_levels,
- uint32_t multisample_count,
- uint32_t array_size,
- struct drm_vmw_size size,
- SVGA3dMSPattern multisample_pattern,
- SVGA3dMSQualityLevel quality_level,
- struct vmw_surface **srf_out);
extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file_priv);
@@ -1342,7 +1357,6 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
struct drm_file *file_priv);
int vmw_gb_surface_define(struct vmw_private *dev_priv,
- uint32_t user_accounting_size,
const struct vmw_surface_metadata *req,
struct vmw_surface **srf_out);
@@ -1403,7 +1417,6 @@ void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv,
extern struct vmw_cmdbuf_res_manager *
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
-extern size_t vmw_cmdbuf_res_man_size(void);
extern struct vmw_resource *
vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
@@ -1600,11 +1613,6 @@ vmw_bo_reference(struct vmw_buffer_object *buf)
return buf;
}
-static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
-{
- return &ttm_mem_glob;
-}
-
static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
{
atomic_inc(&dev_priv->num_fifo_resources);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 5f2ffa9de5c8..44ca23b0ea4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1171,14 +1171,13 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
int ret;
vmw_validation_preload_bo(sw_context->ctx);
- vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
- if (IS_ERR(vmw_bo)) {
+ vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
+ if (IS_ERR_OR_NULL(vmw_bo)) {
VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
return PTR_ERR(vmw_bo);
}
-
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
- vmw_user_bo_noref_release();
+ ttm_bo_put(&vmw_bo->base);
if (unlikely(ret != 0))
return ret;
@@ -1226,14 +1225,13 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
int ret;
vmw_validation_preload_bo(sw_context->ctx);
- vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
- if (IS_ERR(vmw_bo)) {
+ vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
+ if (IS_ERR_OR_NULL(vmw_bo)) {
VMW_DEBUG_USER("Could not find or use GMR region.\n");
return PTR_ERR(vmw_bo);
}
-
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
- vmw_user_bo_noref_release();
+ ttm_bo_put(&vmw_bo->base);
if (unlikely(ret != 0))
return ret;
@@ -2166,6 +2164,44 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
}
/**
+ * vmw_cmd_dx_set_constant_buffer_offset - Validate
+ * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int
+vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
+
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+ u32 shader_slot;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ if (!ctx_node)
+ return -EINVAL;
+
+ cmd = container_of(header, typeof(*cmd), header);
+ if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
+ VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
+ (unsigned int) cmd->body.slot);
+ return -EINVAL;
+ }
+
+ shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
+ vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
+ cmd->body.slot, cmd->body.offsetInBytes);
+
+ return 0;
+}
+
+/**
* vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
* command
*
@@ -2918,7 +2954,7 @@ static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
if (!has_sm5_context(dev_priv))
return -EINVAL;
- if (num_uav > SVGA3D_MAX_UAVIEWS) {
+ if (num_uav > vmw_max_num_uavs(dev_priv)) {
VMW_DEBUG_USER("Invalid UAV binding.\n");
return -EINVAL;
}
@@ -2950,7 +2986,7 @@ static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
if (!has_sm5_context(dev_priv))
return -EINVAL;
- if (num_uav > SVGA3D_MAX_UAVIEWS) {
+ if (num_uav > vmw_max_num_uavs(dev_priv)) {
VMW_DEBUG_USER("Invalid UAV binding.\n");
return -EINVAL;
}
@@ -3528,6 +3564,24 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
&vmw_cmd_dx_transfer_from_buffer,
true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
true, false, true),
@@ -3561,6 +3615,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_dx_define_streamoutput, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
&vmw_cmd_dx_bind_streamoutput, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
+ &vmw_cmd_dx_so_define, true, false, true),
};
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
@@ -3869,8 +3925,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
fence_rep.fd = -1;
}
- ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
- TTM_REF_USAGE);
+ ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
VMW_DEBUG_USER("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
@@ -4054,8 +4109,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct sync_file *sync_file = NULL;
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
- vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
-
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
@@ -4101,6 +4154,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->kernel = true;
}
+ sw_context->filp = file_priv;
sw_context->fp = vmw_fpriv(file_priv);
INIT_LIST_HEAD(&sw_context->ctx_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
@@ -4117,7 +4171,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_binding_state_reset(sw_context->staged_bindings);
if (!sw_context->res_ht_initialized) {
- ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
+ ret = vmwgfx_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
if (unlikely(ret != 0))
goto out_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index d18c6a56e3dc..8ee34576c7d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -394,22 +394,15 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
struct vmw_buffer_object *vmw_bo;
int ret;
- vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
- if (!vmw_bo) {
- ret = -ENOMEM;
- goto err_unlock;
- }
-
- ret = vmw_bo_init(vmw_priv, vmw_bo, size,
+ ret = vmw_bo_create(vmw_priv, size,
&vmw_sys_placement,
false, false,
- &vmw_bo_bo_free);
+ &vmw_bo_bo_free, &vmw_bo);
if (unlikely(ret != 0))
- goto err_unlock; /* init frees the buffer on failure */
+ return ret;
*out = vmw_bo;
-err_unlock:
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 9fe12329a4d5..c60d395f9e2e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -37,9 +37,6 @@ struct vmw_fence_manager {
spinlock_t lock;
struct list_head fence_list;
struct work_struct work;
- u32 user_fence_size;
- u32 fence_size;
- u32 event_fence_action_size;
bool fifo_down;
struct list_head cleanup_list;
uint32_t pending_actions[VMW_ACTION_MAX];
@@ -304,11 +301,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func);
fman->fifo_down = true;
- fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
- TTM_OBJ_EXTRA_SIZE;
- fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
- fman->event_fence_action_size =
- ttm_round_pot(sizeof(struct vmw_event_fence_action));
mutex_init(&fman->goal_irq_mutex);
fman->ctx = dma_fence_context_alloc(1);
@@ -560,14 +552,8 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
{
struct vmw_user_fence *ufence =
container_of(fence, struct vmw_user_fence, fence);
- struct vmw_fence_manager *fman = fman_from_fence(fence);
ttm_base_object_kfree(ufence, base);
- /*
- * Free kernel space accounting.
- */
- ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
- fman->user_fence_size);
}
static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
@@ -590,23 +576,8 @@ int vmw_user_fence_create(struct drm_file *file_priv,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_user_fence *ufence;
struct vmw_fence_obj *tmp;
- struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
int ret;
- /*
- * Kernel memory space accounting, since this object may
- * be created by a user-space request.
- */
-
- ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
- &ctx);
- if (unlikely(ret != 0))
- return ret;
-
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
if (unlikely(!ufence)) {
ret = -ENOMEM;
@@ -625,9 +596,10 @@ int vmw_user_fence_create(struct drm_file *file_priv,
* vmw_user_fence_base_release.
*/
tmp = vmw_fence_obj_reference(&ufence->fence);
+
ret = ttm_base_object_init(tfile, &ufence->base, false,
VMW_RES_FENCE,
- &vmw_user_fence_base_release, NULL);
+ &vmw_user_fence_base_release);
if (unlikely(ret != 0)) {
@@ -646,7 +618,6 @@ out_err:
tmp = &ufence->fence;
vmw_fence_obj_unreference(&tmp);
out_no_object:
- ttm_mem_global_free(mem_glob, fman->user_fence_size);
return ret;
}
@@ -831,8 +802,7 @@ out:
*/
if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
- return ttm_ref_object_base_unref(tfile, arg->handle,
- TTM_REF_USAGE);
+ return ttm_ref_object_base_unref(tfile, arg->handle);
return ret;
}
@@ -874,8 +844,7 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_arg *) data;
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- arg->handle,
- TTM_REF_USAGE);
+ arg->handle);
}
/**
@@ -1121,7 +1090,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
if (user_fence_rep != NULL) {
ret = ttm_ref_object_add(vmw_fp->tfile, base,
- TTM_REF_USAGE, NULL, false);
+ NULL, false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to reference a fence "
"object.\n");
@@ -1164,7 +1133,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
return 0;
out_no_create:
if (user_fence_rep != NULL)
- ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, handle);
out_no_ref_obj:
vmw_fence_obj_unreference(&fence);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
new file mode 100644
index 000000000000..c016c434b6cb
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2021 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "vmwgfx_drv.h"
+
+#include "drm/drm_prime.h"
+#include "drm/drm_gem_ttm_helper.h"
+
+/**
+ * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
+ * vmw_buffer_object.
+ *
+ * @bo: Pointer to the TTM buffer object.
+ * Return: Pointer to the struct vmw_buffer_object embedding the
+ * TTM buffer object.
+ */
+static struct vmw_buffer_object *
+vmw_buffer_object(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vmw_buffer_object, base);
+}
+
+static void vmw_gem_object_free(struct drm_gem_object *gobj)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
+ if (bo) {
+ ttm_bo_put(bo);
+ }
+}
+
+static int vmw_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
+{
+ return 0;
+}
+
+static void vmw_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
+{
+}
+
+static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
+ struct vmw_buffer_object *vbo = vmw_buffer_object(bo);
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+ if (unlikely(ret != 0))
+ goto err;
+
+ vmw_bo_pin_reserved(vbo, do_pin);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ return ret;
+}
+
+
+static int vmw_gem_object_pin(struct drm_gem_object *obj)
+{
+ return vmw_gem_pin_private(obj, true);
+}
+
+static void vmw_gem_object_unpin(struct drm_gem_object *obj)
+{
+ vmw_gem_pin_private(obj, false);
+}
+
+static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
+ struct vmw_ttm_tt *vmw_tt =
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
+
+ if (vmw_tt->vsgt.sgt)
+ return vmw_tt->vsgt.sgt;
+
+ return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages);
+}
+
+
+static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
+ .free = vmw_gem_object_free,
+ .open = vmw_gem_object_open,
+ .close = vmw_gem_object_close,
+ .print_info = drm_gem_ttm_print_info,
+ .pin = vmw_gem_object_pin,
+ .unpin = vmw_gem_object_unpin,
+ .get_sg_table = vmw_gem_object_get_sg_table,
+ .vmap = drm_gem_ttm_vmap,
+ .vunmap = drm_gem_ttm_vunmap,
+ .mmap = drm_gem_ttm_mmap,
+};
+
+/**
+ * vmw_gem_destroy - vmw buffer object destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+void vmw_gem_destroy(struct ttm_buffer_object *bo)
+{
+ struct vmw_buffer_object *vbo = vmw_buffer_object(bo);
+
+ WARN_ON(vbo->dirty);
+ WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
+ vmw_bo_unmap(vbo);
+ drm_gem_object_release(&vbo->base.base);
+ kfree(vbo);
+}
+
+int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
+ struct drm_file *filp,
+ uint32_t size,
+ uint32_t *handle,
+ struct vmw_buffer_object **p_vbo)
+{
+ int ret;
+
+ ret = vmw_bo_create(dev_priv, size,
+ (dev_priv->has_mob) ?
+ &vmw_sys_placement :
+ &vmw_vram_sys_placement,
+ true, false, &vmw_gem_destroy, p_vbo);
+
+ (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
+ if (ret != 0)
+ goto out_no_bo;
+
+ ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(&(*p_vbo)->base.base);
+out_no_bo:
+ return ret;
+}
+
+
+int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ union drm_vmw_alloc_dmabuf_arg *arg =
+ (union drm_vmw_alloc_dmabuf_arg *)data;
+ struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+ struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+ struct vmw_buffer_object *vbo;
+ uint32_t handle;
+ int ret;
+
+ ret = vmw_gem_object_create_with_handle(dev_priv, filp,
+ req->size, &handle, &vbo);
+ if (ret)
+ goto out_no_bo;
+
+ rep->handle = handle;
+ rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
+ rep->cur_gmr_id = handle;
+ rep->cur_gmr_offset = 0;
+out_no_bo:
+ return ret;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_file *m)
+{
+ const char *placement;
+ const char *type;
+
+ switch (bo->base.resource->mem_type) {
+ case TTM_PL_SYSTEM:
+ placement = " CPU";
+ break;
+ case VMW_PL_GMR:
+ placement = " GMR";
+ break;
+ case VMW_PL_MOB:
+ placement = " MOB";
+ break;
+ case VMW_PL_SYSTEM:
+ placement = "VCPU";
+ break;
+ case TTM_PL_VRAM:
+ placement = "VRAM";
+ break;
+ default:
+ placement = "None";
+ break;
+ }
+
+ switch (bo->base.type) {
+ case ttm_bo_type_device:
+ type = "device";
+ break;
+ case ttm_bo_type_kernel:
+ type = "kernel";
+ break;
+ case ttm_bo_type_sg:
+ type = "sg ";
+ break;
+ default:
+ type = "none ";
+ break;
+ }
+
+ seq_printf(m, "\t\t0x%08x: %12ld bytes %s, type = %s",
+ id, bo->base.base.size, placement, type);
+ seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d",
+ bo->base.priority,
+ bo->base.pin_count,
+ kref_read(&bo->base.base.refcount),
+ kref_read(&bo->base.kref));
+ seq_puts(m, "\n");
+}
+
+static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused)
+{
+ struct vmw_private *vdev = (struct vmw_private *)m->private;
+ struct drm_device *dev = &vdev->drm;
+ struct drm_file *file;
+ int r;
+
+ r = mutex_lock_interruptible(&dev->filelist_mutex);
+ if (r)
+ return r;
+
+ list_for_each_entry(file, &dev->filelist, lhead) {
+ struct task_struct *task;
+ struct drm_gem_object *gobj;
+ int id;
+
+ /*
+ * Although we have a valid reference on file->pid, that does
+ * not guarantee that the task_struct who called get_pid() is
+ * still alive (e.g. get_pid(current) => fork() => exit()).
+ * Therefore, we need to protect this ->comm access using RCU.
+ */
+ rcu_read_lock();
+ task = pid_task(file->pid, PIDTYPE_PID);
+ seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
+ task ? task->comm : "<unknown>");
+ rcu_read_unlock();
+
+ spin_lock(&file->table_lock);
+ idr_for_each_entry(&file->object_idr, gobj, id) {
+ struct vmw_buffer_object *bo = gem_to_vmw_bo(gobj);
+
+ vmw_bo_print_info(id, bo, m);
+ }
+ spin_unlock(&file->table_lock);
+ }
+
+ mutex_unlock(&dev->filelist_mutex);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(vmw_debugfs_gem_info);
+
+#endif
+
+void vmw_debugfs_gem_init(struct vmw_private *vdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = vdev->drm.primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("vmwgfx_gem_info", 0444, root, vdev,
+ &vmw_debugfs_gem_info_fops);
+#endif
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index b2c4af331c9d..ebb4505a31a3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -42,6 +42,7 @@ struct vmwgfx_gmrid_man {
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
uint32_t used_gmr_pages;
+ uint8_t type;
};
static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *man)
@@ -132,6 +133,18 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
kfree(res);
}
+static void vmw_gmrid_man_debug(struct ttm_resource_manager *man,
+ struct drm_printer *printer)
+{
+ struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
+
+ BUG_ON(gman->type != VMW_PL_GMR && gman->type != VMW_PL_MOB);
+
+ drm_printf(printer, "%s's used: %u pages, max: %u pages, %u id's\n",
+ (gman->type == VMW_PL_MOB) ? "Mob" : "GMR",
+ gman->used_gmr_pages, gman->max_gmr_pages, gman->max_gmr_ids);
+}
+
static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
@@ -146,12 +159,12 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
man = &gman->manager;
man->func = &vmw_gmrid_manager_func;
- /* TODO: This is most likely not correct */
man->use_tt = true;
ttm_resource_manager_init(man, 0);
spin_lock_init(&gman->lock);
gman->used_gmr_pages = 0;
ida_init(&gman->gmr_ida);
+ gman->type = type;
switch (type) {
case VMW_PL_GMR:
@@ -190,4 +203,5 @@ void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type)
static const struct ttm_resource_manager_func vmw_gmrid_manager_func = {
.alloc = vmw_gmrid_man_get_node,
.free = vmw_gmrid_man_put_node,
+ .debug = vmw_gmrid_man_debug
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c
new file mode 100644
index 000000000000..06aebc12774e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <linux/export.h>
+#include <linux/hash.h>
+#include <linux/mm.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_print.h>
+
+#include "vmwgfx_hashtab.h"
+
+int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order)
+{
+ unsigned int size = 1 << order;
+
+ ht->order = order;
+ ht->table = NULL;
+ if (size <= PAGE_SIZE / sizeof(*ht->table))
+ ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
+ else
+ ht->table = vzalloc(array_size(size, sizeof(*ht->table)));
+ if (!ht->table) {
+ DRM_ERROR("Out of memory for hash table\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key)
+{
+ struct vmwgfx_hash_item *entry;
+ struct hlist_head *h_list;
+ unsigned int hashed_key;
+ int count = 0;
+
+ hashed_key = hash_long(key, ht->order);
+ DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry(entry, h_list, head)
+ DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
+}
+
+static struct hlist_node *vmwgfx_ht_find_key(struct vmwgfx_open_hash *ht, unsigned long key)
+{
+ struct vmwgfx_hash_item *entry;
+ struct hlist_head *h_list;
+ unsigned int hashed_key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry(entry, h_list, head) {
+ if (entry->key == key)
+ return &entry->head;
+ if (entry->key > key)
+ break;
+ }
+ return NULL;
+}
+
+static struct hlist_node *vmwgfx_ht_find_key_rcu(struct vmwgfx_open_hash *ht, unsigned long key)
+{
+ struct vmwgfx_hash_item *entry;
+ struct hlist_head *h_list;
+ unsigned int hashed_key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry_rcu(entry, h_list, head) {
+ if (entry->key == key)
+ return &entry->head;
+ if (entry->key > key)
+ break;
+ }
+ return NULL;
+}
+
+int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item)
+{
+ struct vmwgfx_hash_item *entry;
+ struct hlist_head *h_list;
+ struct hlist_node *parent;
+ unsigned int hashed_key;
+ unsigned long key = item->key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ parent = NULL;
+ hlist_for_each_entry(entry, h_list, head) {
+ if (entry->key == key)
+ return -EINVAL;
+ if (entry->key > key)
+ break;
+ parent = &entry->head;
+ }
+ if (parent)
+ hlist_add_behind_rcu(&item->head, parent);
+ else
+ hlist_add_head_rcu(&item->head, h_list);
+ return 0;
+}
+
+/*
+ * Just insert an item and return any "bits" bit key that hasn't been
+ * used before.
+ */
+int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item,
+ unsigned long seed, int bits, int shift,
+ unsigned long add)
+{
+ int ret;
+ unsigned long mask = (1UL << bits) - 1;
+ unsigned long first, unshifted_key;
+
+ unshifted_key = hash_long(seed, bits);
+ first = unshifted_key;
+ do {
+ item->key = (unshifted_key << shift) + add;
+ ret = vmwgfx_ht_insert_item(ht, item);
+ if (ret)
+ unshifted_key = (unshifted_key + 1) & mask;
+ } while (ret && (unshifted_key != first));
+
+ if (ret) {
+ DRM_ERROR("Available key bit space exhausted\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key,
+ struct vmwgfx_hash_item **item)
+{
+ struct hlist_node *list;
+
+ list = vmwgfx_ht_find_key_rcu(ht, key);
+ if (!list)
+ return -EINVAL;
+
+ *item = hlist_entry(list, struct vmwgfx_hash_item, head);
+ return 0;
+}
+
+int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key)
+{
+ struct hlist_node *list;
+
+ list = vmwgfx_ht_find_key(ht, key);
+ if (list) {
+ hlist_del_init_rcu(list);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item)
+{
+ hlist_del_init_rcu(&item->head);
+ return 0;
+}
+
+void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht)
+{
+ if (ht->table) {
+ kvfree(ht->table);
+ ht->table = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h
new file mode 100644
index 000000000000..a9ce12922e21
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+/*
+ * TODO: Replace this hashtable with Linux' generic implementation
+ * from <linux/hashtable.h>.
+ */
+
+#ifndef VMWGFX_HASHTAB_H
+#define VMWGFX_HASHTAB_H
+
+#include <linux/list.h>
+
+#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
+
+struct vmwgfx_hash_item {
+ struct hlist_node head;
+ unsigned long key;
+};
+
+struct vmwgfx_open_hash {
+ struct hlist_head *table;
+ u8 order;
+};
+
+int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order);
+int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item);
+int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item,
+ unsigned long seed, int bits, int shift,
+ unsigned long add);
+int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key,
+ struct vmwgfx_hash_item **item);
+
+void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key);
+int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key);
+int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item);
+void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht);
+
+/*
+ * RCU-safe interface
+ *
+ * The user of this API needs to make sure that two or more instances of the
+ * hash table manipulation functions are never run simultaneously.
+ * The lookup function vmwgfx_ht_find_item_rcu may, however, run simultaneously
+ * with any of the manipulation functions as long as it's called from within
+ * an RCU read-locked section.
+ */
+#define vmwgfx_ht_insert_item_rcu vmwgfx_ht_insert_item
+#define vmwgfx_ht_just_insert_please_rcu vmwgfx_ht_just_insert_please
+#define vmwgfx_ht_remove_key_rcu vmwgfx_ht_remove_key
+#define vmwgfx_ht_remove_item_rcu vmwgfx_ht_remove_item
+#define vmwgfx_ht_find_item_rcu vmwgfx_ht_find_item
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 28af34ab6ed6..471da2b4c177 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -105,6 +105,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_SM5:
param->value = has_sm5_context(dev_priv);
break;
+ case DRM_VMW_PARAM_GL43:
+ param->value = has_gl43_context(dev_priv);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 74fa41909213..4e693e8de2c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -843,8 +843,6 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfbs->surface);
- if (vfbs->base.user_obj)
- ttm_base_object_unref(&vfbs->base.user_obj);
kfree(vfbs);
}
@@ -989,6 +987,16 @@ out_err1:
* Buffer-object framebuffer code
*/
+static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct vmw_framebuffer_bo *vfbd =
+ vmw_framebuffer_to_vfbd(fb);
+
+ return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle);
+}
+
static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_bo *vfbd =
@@ -996,8 +1004,6 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
drm_framebuffer_cleanup(framebuffer);
vmw_bo_unreference(&vfbd->buffer);
- if (vfbd->base.user_obj)
- ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd);
}
@@ -1063,6 +1069,7 @@ static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
}
static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
+ .create_handle = vmw_framebuffer_bo_create_handle,
.destroy = vmw_framebuffer_bo_destroy,
.dirty = vmw_framebuffer_bo_dirty_ext,
};
@@ -1188,7 +1195,7 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
metadata.base_size.depth = 1;
metadata.scanout = true;
- ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out);
+ ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
if (ret) {
DRM_ERROR("Failed to allocate proxy content buffer\n");
return ret;
@@ -1251,6 +1258,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
goto out_err1;
}
+ vfbd->base.base.obj[0] = &bo->base.base;
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo);
@@ -1368,34 +1376,13 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
struct vmw_buffer_object *bo = NULL;
- struct ttm_base_object *user_obj;
int ret;
- /*
- * Take a reference on the user object of the resource
- * backing the kms fb. This ensures that user-space handle
- * lookups on that resource will always work as long as
- * it's registered with a kms framebuffer. This is important,
- * since vmw_execbuf_process identifies resources in the
- * command stream using user-space handles.
- */
-
- user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
- if (unlikely(user_obj == NULL)) {
- DRM_ERROR("Could not locate requested kms frame buffer.\n");
- return ERR_PTR(-ENOENT);
- }
-
- /**
- * End conditioned code.
- */
-
/* returns either a bo or surface */
- ret = vmw_user_lookup_handle(dev_priv, tfile,
+ ret = vmw_user_lookup_handle(dev_priv, file_priv,
mode_cmd->handles[0],
&surface, &bo);
if (ret)
@@ -1428,10 +1415,8 @@ err_out:
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
- ttm_base_object_unref(&user_obj);
return ERR_PTR(ret);
- } else
- vfb->user_obj = user_obj;
+ }
return &vfb->base;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index bbc809f7bd8a..4d36e8507380 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -219,7 +219,6 @@ struct vmw_framebuffer {
int (*pin)(struct vmw_framebuffer *fb);
int (*unpin)(struct vmw_framebuffer *fb);
bool bo;
- struct ttm_base_object *user_obj;
uint32_t user_handle;
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index f9394207dd3c..2d91a44a3b22 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -146,9 +146,6 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
if (otable->size <= PAGE_SIZE) {
mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
mob->pt_root_page = vmw_piter_dma_addr(&iter);
- } else if (vsgt->num_regions == 1) {
- mob->pt_level = SVGA3D_MOBFMT_RANGE;
- mob->pt_root_page = vmw_piter_dma_addr(&iter);
} else {
ret = vmw_mob_pt_populate(dev_priv, mob);
if (unlikely(ret != 0))
@@ -413,10 +410,9 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages)
* @mob: Pointer to the mob the pagetable of which we want to
* populate.
*
- * This function allocates memory to be used for the pagetable, and
- * adjusts TTM memory accounting accordingly. Returns ENOMEM if
- * memory resources aren't sufficient and may cause TTM buffer objects
- * to be swapped out by using the TTM memory accounting function.
+ * This function allocates memory to be used for the pagetable.
+ * Returns ENOMEM if memory resources aren't sufficient and may
+ * cause TTM buffer objects to be swapped out.
*/
static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
struct vmw_mob *mob)
@@ -624,9 +620,6 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
if (likely(num_data_pages == 1)) {
mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
- } else if (vsgt->num_regions == 1) {
- mob->pt_level = SVGA3D_MOBFMT_RANGE;
- mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
} else if (unlikely(mob->pt_bo == NULL)) {
ret = vmw_mob_pt_populate(dev_priv, mob);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 54c5d16eb3b7..e9f5c89b4ca6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -451,7 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL);
+ ret = vmw_user_bo_lookup(file_priv, arg->handle, &buf);
if (ret)
goto out_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 922317d1acc8..7bc99b1279f7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -57,7 +57,6 @@ enum vmw_bo_dirty_method {
* @ref_count: Reference count for this structure
* @bitmap_size: The size of the bitmap in bits. Typically equal to the
* nuber of pages in the bo.
- * @size: The accounting size for this struct.
* @bitmap: A bitmap where each bit represents a page. A set bit means a
* dirty page.
*/
@@ -68,7 +67,6 @@ struct vmw_bo_dirty {
unsigned int change_count;
unsigned int ref_count;
unsigned long bitmap_size;
- size_t size;
unsigned long bitmap[];
};
@@ -233,12 +231,8 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t num_pages = vbo->base.resource->num_pages;
- size_t size, acc_size;
+ size_t size;
int ret;
- static struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
if (dirty) {
dirty->ref_count++;
@@ -246,20 +240,12 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
}
size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long);
- acc_size = ttm_round_pot(size);
- ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
- if (ret) {
- VMW_DEBUG_USER("Out of graphics memory for buffer object "
- "dirty tracker.\n");
- return ret;
- }
dirty = kvzalloc(size, GFP_KERNEL);
if (!dirty) {
ret = -ENOMEM;
goto out_no_dirty;
}
- dirty->size = acc_size;
dirty->bitmap_size = num_pages;
dirty->start = dirty->bitmap_size;
dirty->end = 0;
@@ -285,7 +271,6 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
return 0;
out_no_dirty:
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
return ret;
}
@@ -304,10 +289,7 @@ void vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
struct vmw_bo_dirty *dirty = vbo->dirty;
if (dirty && --dirty->ref_count == 0) {
- size_t acc_size = dirty->size;
-
kvfree(dirty);
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
vbo->dirty = NULL;
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index d9552a1efd13..2d72a5ee7c0c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -85,6 +85,5 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
int *prime_fd)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-
return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 8d1e869cc196..708899ba2102 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -320,11 +320,12 @@ vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
* The pointer this pointed at by out_surf and out_buf needs to be null.
*/
int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
+ struct drm_file *filp,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_buffer_object **out_buf)
{
+ struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
struct vmw_resource *res;
int ret;
@@ -339,7 +340,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
}
*out_surf = NULL;
- ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
+ ret = vmw_user_bo_lookup(filp, handle, out_buf);
return ret;
}
@@ -362,14 +363,10 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
return 0;
}
- backup = kzalloc(sizeof(*backup), GFP_KERNEL);
- if (unlikely(!backup))
- return -ENOMEM;
-
- ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
- res->func->backup_placement,
- interruptible, false,
- &vmw_bo_bo_free);
+ ret = vmw_bo_create(res->dev_priv, res->backup_size,
+ res->func->backup_placement,
+ interruptible, false,
+ &vmw_bo_bo_free, &backup);
if (unlikely(ret != 0))
goto out_no_bo;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index bd157fb21b45..3004c7a719e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -442,19 +442,15 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
vps->bo_size = 0;
}
- vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL);
- if (!vps->bo)
- return -ENOMEM;
-
vmw_svga_enable(dev_priv);
/* After we have alloced the backing store might not be able to
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_bo_init(dev_priv, vps->bo, size,
- &vmw_vram_placement,
- false, true, &vmw_bo_bo_free);
+ ret = vmw_bo_create(dev_priv, size,
+ &vmw_vram_placement,
+ false, true, &vmw_bo_bo_free, &vps->bo);
vmw_overlay_resume_all(dev_priv);
if (ret) {
vps->bo = NULL; /* vmw_bo_init frees on error */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index b8dd62529104..108a496b5d18 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -53,10 +53,6 @@ struct vmw_dx_shader {
struct list_head cotable_head;
};
-static uint64_t vmw_user_shader_size;
-static uint64_t vmw_shader_size;
-static size_t vmw_shader_dx_size;
-
static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_shader_base_to_res(struct ttm_base_object *base);
@@ -79,7 +75,6 @@ static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state);
static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
-static uint64_t vmw_user_shader_size;
static const struct vmw_user_resource_conv user_shader_conv = {
.object_type = VMW_RES_SHADER,
@@ -563,16 +558,14 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
*
* @res: The shader resource
*
- * Frees the DX shader resource and updates memory accounting.
+ * Frees the DX shader resource.
*/
static void vmw_dx_shader_res_free(struct vmw_resource *res)
{
- struct vmw_private *dev_priv = res->dev_priv;
struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
vmw_resource_unreference(&shader->cotable);
kfree(shader);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
}
/**
@@ -594,30 +587,13 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_dx_shader *shader;
struct vmw_resource *res;
struct vmw_private *dev_priv = ctx->dev_priv;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
- if (!vmw_shader_dx_size)
- vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
-
if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
- &ttm_opt_ctx);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for shader "
- "creation.\n");
- return ret;
- }
-
shader = kmalloc(sizeof(*shader), GFP_KERNEL);
if (!shader) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
return -ENOMEM;
}
@@ -669,21 +645,15 @@ static void vmw_user_shader_free(struct vmw_resource *res)
{
struct vmw_user_shader *ushader =
container_of(res, struct vmw_user_shader, shader.res);
- struct vmw_private *dev_priv = res->dev_priv;
ttm_base_object_kfree(ushader, base);
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_shader_size);
}
static void vmw_shader_free(struct vmw_resource *res)
{
struct vmw_shader *shader = vmw_res_to_shader(res);
- struct vmw_private *dev_priv = res->dev_priv;
kfree(shader);
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_shader_size);
}
/*
@@ -706,8 +676,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_ref_object_base_unref(tfile, arg->handle,
- TTM_REF_USAGE);
+ return ttm_ref_object_base_unref(tfile, arg->handle);
}
static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
@@ -722,31 +691,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
{
struct vmw_user_shader *ushader;
struct vmw_resource *res, *tmp;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
- if (unlikely(vmw_user_shader_size == 0))
- vmw_user_shader_size =
- ttm_round_pot(sizeof(struct vmw_user_shader)) +
- VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_user_shader_size,
- &ctx);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for shader "
- "creation.\n");
- goto out;
- }
-
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
if (unlikely(!ushader)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_shader_size);
ret = -ENOMEM;
goto out;
}
@@ -769,7 +717,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &ushader->base, false,
VMW_RES_SHADER,
- &vmw_user_shader_base_release, NULL);
+ &vmw_user_shader_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
@@ -793,31 +741,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
{
struct vmw_shader *shader;
struct vmw_resource *res;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
- if (unlikely(vmw_shader_size == 0))
- vmw_shader_size =
- ttm_round_pot(sizeof(struct vmw_shader)) +
- VMW_IDA_ACC_SIZE;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_shader_size,
- &ctx);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for shader "
- "creation.\n");
- goto out_err;
- }
-
shader = kzalloc(sizeof(*shader), GFP_KERNEL);
if (unlikely(!shader)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_shader_size);
ret = -ENOMEM;
goto out_err;
}
@@ -849,8 +776,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
int ret;
if (buffer_handle != SVGA3D_INVALID_ID) {
- ret = vmw_user_bo_lookup(tfile, buffer_handle,
- &buffer, NULL);
+ ret = vmw_user_bo_lookup(file_priv, buffer_handle, &buffer);
if (unlikely(ret != 0)) {
VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n");
return ret;
@@ -966,13 +892,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
- /* Allocate and pin a DMA buffer */
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (unlikely(!buf))
- return -ENOMEM;
-
- ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_placement,
- true, true, vmw_bo_bo_free);
+ ret = vmw_bo_create(dev_priv, size, &vmw_sys_placement,
+ true, true, vmw_bo_bo_free, &buf);
if (unlikely(ret != 0))
goto out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index 33b69a70cfe3..483ad544ea54 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -32,12 +32,10 @@
* struct vmw_user_simple_resource - User-space simple resource struct
*
* @base: The TTM base object implementing user-space visibility.
- * @account_size: How much memory was accounted for this object.
* @simple: The embedded struct vmw_simple_resource.
*/
struct vmw_user_simple_resource {
struct ttm_base_object base;
- size_t account_size;
struct vmw_simple_resource simple;
/*
* Nothing to be placed after @simple, since size of @simple is
@@ -91,18 +89,15 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv,
*
* @res: The struct vmw_resource member of the simple resource object.
*
- * Frees memory and memory accounting for the object.
+ * Frees memory for the object.
*/
static void vmw_simple_resource_free(struct vmw_resource *res)
{
struct vmw_user_simple_resource *usimple =
container_of(res, struct vmw_user_simple_resource,
simple.res);
- struct vmw_private *dev_priv = res->dev_priv;
- size_t size = usimple->account_size;
ttm_base_object_kfree(usimple, base);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
/**
@@ -149,39 +144,19 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
struct vmw_resource *res;
struct vmw_resource *tmp;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
size_t alloc_size;
- size_t account_size;
int ret;
alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
func->size;
- account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
- TTM_OBJ_EXTRA_SIZE;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size,
- &ctx);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for %s"
- " creation.\n", func->res_func.type_name);
-
- goto out_ret;
- }
usimple = kzalloc(alloc_size, GFP_KERNEL);
if (!usimple) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- account_size);
ret = -ENOMEM;
goto out_ret;
}
usimple->simple.func = func;
- usimple->account_size = account_size;
res = &usimple->simple.res;
usimple->base.shareable = false;
usimple->base.tfile = NULL;
@@ -197,7 +172,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &usimple->base, false,
func->ttm_res_type,
- &vmw_simple_resource_base_release, NULL);
+ &vmw_simple_resource_base_release);
if (ret) {
vmw_resource_unreference(&tmp);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 9efb4463ce99..4ea32b01efc0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -279,18 +279,15 @@ static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
*
* @res: Pointer to a struct vmw_resource
*
- * Frees memory and memory accounting held by a struct vmw_view.
+ * Frees memory held by the struct vmw_view.
*/
static void vmw_view_res_free(struct vmw_resource *res)
{
struct vmw_view *view = vmw_view(res);
- size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
- struct vmw_private *dev_priv = res->dev_priv;
vmw_resource_unreference(&view->cotable);
vmw_resource_unreference(&view->srf);
kfree_rcu(view, rcu);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
/**
@@ -327,10 +324,6 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_private *dev_priv = ctx->dev_priv;
struct vmw_resource *res;
struct vmw_view *view;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
size_t size;
int ret;
@@ -347,16 +340,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
size = offsetof(struct vmw_view, cmd) + cmd_size;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ttm_opt_ctx);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for view creation\n");
- return ret;
- }
-
view = kmalloc(size, GFP_KERNEL);
if (!view) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
return -ENOMEM;
}
@@ -582,4 +567,8 @@ static void vmw_so_build_asserts(void)
offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
+ BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+ offsetof(SVGA3dCmdDXDefineUAView, sid));
+ BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+ offsetof(SVGA3dCmdDXDefineDepthStencilView_v2, sid));
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
index f48b84bfeeac..01c701e7466e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -93,7 +93,10 @@ static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
id == SVGA_3D_CMD_DX_DESTROY_UA_VIEW)
return vmw_view_ua;
- if (tmp > (u32)vmw_view_max)
+ if (id == SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2)
+ return vmw_view_ds;
+
+ if (tmp > (u32)vmw_view_ds)
return vmw_view_max;
return (enum vmw_view_type) tmp;
@@ -123,6 +126,7 @@ static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE:
return vmw_so_ds;
case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE:
+ case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2:
case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE:
return vmw_so_rs;
case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index d85310b2608d..8f0d651d0144 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1123,7 +1123,7 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
}
if (!vps->surf) {
- ret = vmw_gb_surface_define(dev_priv, 0, &metadata,
+ ret = vmw_gb_surface_define(dev_priv, &metadata,
&vps->surf);
if (ret != 0) {
DRM_ERROR("Couldn't allocate STDU surface.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
index c8efa4a6c995..2de97419d5c9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -60,8 +60,6 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state);
-static size_t vmw_streamoutput_size;
-
static const struct vmw_res_func vmw_dx_streamoutput_func = {
.res_type = vmw_res_streamoutput,
.needs_backup = true,
@@ -254,12 +252,10 @@ vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man,
static void vmw_dx_streamoutput_res_free(struct vmw_resource *res)
{
- struct vmw_private *dev_priv = res->dev_priv;
struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
vmw_resource_unreference(&so->cotable);
kfree(so);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_streamoutput_size);
}
static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res)
@@ -284,27 +280,10 @@ int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_dx_streamoutput *so;
struct vmw_resource *res;
struct vmw_private *dev_priv = ctx->dev_priv;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
- if (!vmw_streamoutput_size)
- vmw_streamoutput_size = ttm_round_pot(sizeof(*so));
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_streamoutput_size, &ttm_opt_ctx);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for streamout.\n");
- return ret;
- }
-
so = kmalloc(sizeof(*so), GFP_KERNEL);
if (!so) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_streamoutput_size);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 5d53a5f9d123..00e8e27e4884 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -45,16 +45,12 @@
* @prime: The TTM prime object.
* @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata.
- * @size: TTM accounting size for the surface.
* @master: Master of the creating client. Used for security check.
- * @backup_base: The TTM base object of the backup buffer.
*/
struct vmw_user_surface {
struct ttm_prime_object prime;
struct vmw_surface srf;
- uint32_t size;
struct drm_master *master;
- struct ttm_base_object *backup_base;
};
/**
@@ -74,13 +70,11 @@ struct vmw_surface_offset {
/**
* struct vmw_surface_dirty - Surface dirty-tracker
* @cache: Cached layout information of the surface.
- * @size: Accounting size for the struct vmw_surface_dirty.
* @num_subres: Number of subresources.
* @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
*/
struct vmw_surface_dirty {
struct vmw_surface_cache cache;
- size_t size;
u32 num_subres;
SVGA3dBox boxes[];
};
@@ -129,9 +123,6 @@ static const struct vmw_user_resource_conv user_surface_conv = {
const struct vmw_user_resource_conv *user_surface_converter =
&user_surface_conv;
-
-static uint64_t vmw_user_surface_size;
-
static const struct vmw_res_func vmw_legacy_surface_func = {
.res_type = vmw_res_surface,
.needs_backup = false,
@@ -359,7 +350,7 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf,
* vmw_surface.
*
* Destroys a the device surface associated with a struct vmw_surface if
- * any, and adjusts accounting and resource count accordingly.
+ * any, and adjusts resource count accordingly.
*/
static void vmw_hw_surface_destroy(struct vmw_resource *res)
{
@@ -666,8 +657,6 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_surface *srf = vmw_res_to_srf(res);
struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
- struct vmw_private *dev_priv = srf->res.dev_priv;
- uint32_t size = user_srf->size;
WARN_ON_ONCE(res->dirty);
if (user_srf->master)
@@ -676,7 +665,6 @@ static void vmw_user_surface_free(struct vmw_resource *res)
kfree(srf->metadata.sizes);
kfree(srf->snooper.image);
ttm_prime_object_kfree(user_srf, prime);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
/**
@@ -696,8 +684,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL;
- if (user_srf->backup_base)
- ttm_base_object_unref(&user_srf->backup_base);
vmw_resource_unreference(&res);
}
@@ -715,7 +701,7 @@ int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+ return ttm_ref_object_base_unref(tfile, arg->sid);
}
/**
@@ -740,23 +726,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_surface_create_req *req = &arg->req;
struct drm_vmw_surface_arg *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
int i, j;
uint32_t cur_bo_offset;
struct drm_vmw_size *cur_size;
struct vmw_surface_offset *cur_offset;
uint32_t num_sizes;
- uint32_t size;
const SVGA3dSurfaceDesc *desc;
- if (unlikely(vmw_user_surface_size == 0))
- vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
num_sizes = 0;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
@@ -768,10 +745,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
num_sizes == 0)
return -EINVAL;
- size = vmw_user_surface_size +
- ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
- ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
-
desc = vmw_surface_get_desc(req->format);
if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) {
VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
@@ -779,18 +752,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- size, &ctx);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface.\n");
- goto out_unlock;
- }
-
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
if (unlikely(!user_srf)) {
ret = -ENOMEM;
- goto out_no_user_srf;
+ goto out_unlock;
}
srf = &user_srf->srf;
@@ -805,7 +770,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
memcpy(metadata->mip_levels, req->mip_levels,
sizeof(metadata->mip_levels));
metadata->num_sizes = num_sizes;
- user_srf->size = size;
metadata->sizes =
memdup_user((struct drm_vmw_size __user *)(unsigned long)
req->size_addr,
@@ -883,22 +847,22 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (dev_priv->has_mob && req->shareable) {
uint32_t backup_handle;
- ret = vmw_user_bo_alloc(dev_priv, tfile,
- res->backup_size,
- true,
- &backup_handle,
- &res->backup,
- &user_srf->backup_base);
+ ret = vmw_gem_object_create_with_handle(dev_priv,
+ file_priv,
+ res->backup_size,
+ &backup_handle,
+ &res->backup);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
+ vmw_bo_reference(res->backup);
}
tmp = vmw_resource_reference(&srf->res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
+ &vmw_user_surface_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
@@ -916,8 +880,6 @@ out_no_offsets:
kfree(metadata->sizes);
out_no_sizes:
ttm_prime_object_kfree(user_srf, prime);
-out_no_user_srf:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
return ret;
}
@@ -955,7 +917,6 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
VMW_DEBUG_USER("Referenced object is not a surface.\n");
goto out_bad_resource;
}
-
if (handle_type != DRM_VMW_HANDLE_PRIME) {
bool require_exist = false;
@@ -980,8 +941,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
if (unlikely(drm_is_render_client(file_priv)))
require_exist = true;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
- require_exist);
+ ret = ttm_ref_object_add(tfile, base, NULL, require_exist);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_bad_resource;
@@ -995,7 +955,7 @@ out_bad_resource:
ttm_base_object_unref(&base);
out_no_lookup:
if (handle_type == DRM_VMW_HANDLE_PRIME)
- (void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+ (void) ttm_ref_object_base_unref(tfile, handle);
return ret;
}
@@ -1045,7 +1005,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) {
VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
srf->metadata.num_sizes);
- ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, base->handle);
ret = -EFAULT;
}
@@ -1459,7 +1419,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
struct vmw_resource *res;
struct vmw_resource *tmp;
int ret = 0;
- uint32_t size;
uint32_t backup_handle = 0;
SVGA3dSurfaceAllFlags svga3d_flags_64 =
SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
@@ -1506,12 +1465,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
return -EINVAL;
}
- if (unlikely(vmw_user_surface_size == 0))
- vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
- size = vmw_user_surface_size;
-
metadata.flags = svga3d_flags_64;
metadata.format = req->base.format;
metadata.mip_levels[0] = req->base.mip_levels;
@@ -1526,7 +1479,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
drm_vmw_surface_flag_scanout;
/* Define a surface based on the parameters. */
- ret = vmw_gb_surface_define(dev_priv, size, &metadata, &srf);
+ ret = vmw_gb_surface_define(dev_priv, &metadata, &srf);
if (ret != 0) {
VMW_DEBUG_USER("Failed to define surface.\n");
return ret;
@@ -1539,9 +1492,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
res = &user_srf->srf.res;
if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
- ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle,
- &res->backup,
- &user_srf->backup_base);
+ ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
+ &res->backup);
if (ret == 0) {
if (res->backup->base.base.size < res->backup_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
@@ -1554,14 +1506,15 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
} else if (req->base.drm_surface_flags &
(drm_vmw_surface_flag_create_buffer |
- drm_vmw_surface_flag_coherent))
- ret = vmw_user_bo_alloc(dev_priv, tfile,
- res->backup_size,
- req->base.drm_surface_flags &
- drm_vmw_surface_flag_shareable,
- &backup_handle,
- &res->backup,
- &user_srf->backup_base);
+ drm_vmw_surface_flag_coherent)) {
+ ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
+ res->backup_size,
+ &backup_handle,
+ &res->backup);
+ if (ret == 0)
+ vmw_bo_reference(res->backup);
+
+ }
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
@@ -1593,7 +1546,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
req->base.drm_surface_flags &
drm_vmw_surface_flag_shareable,
VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
+ &vmw_user_surface_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
@@ -1613,7 +1566,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->buffer_size = 0;
rep->buffer_handle = SVGA3D_INVALID_ID;
}
-
vmw_resource_unreference(&res);
out_unlock:
@@ -1636,12 +1588,11 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_surface *srf;
struct vmw_user_surface *user_srf;
struct vmw_surface_metadata *metadata;
struct ttm_base_object *base;
- uint32_t backup_handle;
+ u32 backup_handle;
int ret;
ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
@@ -1658,14 +1609,12 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
metadata = &srf->metadata;
mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
- ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
+ ret = drm_gem_handle_create(file_priv, &srf->res.backup->base.base,
+ &backup_handle);
mutex_unlock(&dev_priv->cmdbuf_mutex);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not add a reference to a GB surface "
- "backup buffer.\n");
- (void) ttm_ref_object_base_unref(tfile, base->handle,
- TTM_REF_USAGE);
+ if (ret != 0) {
+ drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n",
+ req->sid);
goto out_bad_resource;
}
@@ -1955,11 +1904,7 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
u32 num_mip;
u32 num_subres;
u32 num_samples;
- size_t dirty_size, acc_size;
- static struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
+ size_t dirty_size;
int ret;
if (metadata->array_size)
@@ -1973,14 +1918,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
num_subres = num_layers * num_mip;
dirty_size = struct_size(dirty, boxes, num_subres);
- acc_size = ttm_round_pot(dirty_size);
- ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv),
- acc_size, &ctx);
- if (ret) {
- VMW_DEBUG_USER("Out of graphics memory for surface "
- "dirty tracker.\n");
- return ret;
- }
dirty = kvzalloc(dirty_size, GFP_KERNEL);
if (!dirty) {
@@ -1990,13 +1927,12 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
num_samples = max_t(u32, 1, metadata->multisample_count);
ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format,
- num_mip, num_layers, num_samples,
- &dirty->cache);
+ num_mip, num_layers, num_samples,
+ &dirty->cache);
if (ret)
goto out_no_cache;
dirty->num_subres = num_subres;
- dirty->size = acc_size;
res->dirty = (struct vmw_resource_dirty *) dirty;
return 0;
@@ -2004,7 +1940,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
out_no_cache:
kvfree(dirty);
out_no_dirty:
- ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
return ret;
}
@@ -2015,10 +1950,8 @@ static void vmw_surface_dirty_free(struct vmw_resource *res)
{
struct vmw_surface_dirty *dirty =
(struct vmw_surface_dirty *) res->dirty;
- size_t acc_size = dirty->size;
kvfree(dirty);
- ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
res->dirty = NULL;
}
@@ -2051,8 +1984,6 @@ static int vmw_surface_clean(struct vmw_resource *res)
* vmw_gb_surface_define - Define a private GB surface
*
* @dev_priv: Pointer to a device private.
- * @user_accounting_size: Used to track user-space memory usage, set
- * to 0 for kernel mode only memory
* @metadata: Metadata representing the surface to create.
* @user_srf_out: allocated user_srf. Set to NULL on failure.
*
@@ -2062,17 +1993,12 @@ static int vmw_surface_clean(struct vmw_resource *res)
* it available to user mode drivers.
*/
int vmw_gb_surface_define(struct vmw_private *dev_priv,
- uint32_t user_accounting_size,
const struct vmw_surface_metadata *req,
struct vmw_surface **srf_out)
{
struct vmw_surface_metadata *metadata;
struct vmw_user_surface *user_srf;
struct vmw_surface *srf;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
u32 sample_count = 1;
u32 num_layers = 1;
int ret;
@@ -2113,22 +2039,13 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
if (req->sizes != NULL)
return -EINVAL;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- user_accounting_size, &ctx);
- if (ret != 0) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface.\n");
- goto out_unlock;
- }
-
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
if (unlikely(!user_srf)) {
ret = -ENOMEM;
- goto out_no_user_srf;
+ goto out_unlock;
}
*srf_out = &user_srf->srf;
- user_srf->size = user_accounting_size;
user_srf->prime.base.shareable = false;
user_srf->prime.base.tfile = NULL;
@@ -2179,9 +2096,6 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
return ret;
-out_no_user_srf:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
-
out_unlock:
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
new file mode 100644
index 000000000000..b0005b03a617
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2021 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "vmwgfx_drv.h"
+
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_resource.h>
+#include <linux/slab.h>
+
+
+static int vmw_sys_man_alloc(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ *res = kzalloc(sizeof(**res), GFP_KERNEL);
+ if (!*res)
+ return -ENOMEM;
+
+ ttm_resource_init(bo, place, *res);
+ return 0;
+}
+
+static void vmw_sys_man_free(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+ kfree(res);
+}
+
+static const struct ttm_resource_manager_func vmw_sys_manager_func = {
+ .alloc = vmw_sys_man_alloc,
+ .free = vmw_sys_man_free,
+};
+
+int vmw_sys_man_init(struct vmw_private *dev_priv)
+{
+ struct ttm_device *bdev = &dev_priv->bdev;
+ struct ttm_resource_manager *man =
+ kzalloc(sizeof(*man), GFP_KERNEL);
+
+ if (!man)
+ return -ENOMEM;
+
+ man->use_tt = true;
+ man->func = &vmw_sys_manager_func;
+
+ ttm_resource_manager_init(man, 0);
+ ttm_set_driver_manager(bdev, VMW_PL_SYSTEM, man);
+ ttm_resource_manager_set_used(man, true);
+ return 0;
+}
+
+void vmw_sys_man_fini(struct vmw_private *dev_priv)
+{
+ struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev,
+ VMW_PL_SYSTEM);
+
+ ttm_resource_manager_evict_all(&dev_priv->bdev, man);
+
+ ttm_resource_manager_set_used(man, false);
+ ttm_resource_manager_cleanup(man);
+
+ ttm_set_driver_manager(&dev_priv->bdev, VMW_PL_SYSTEM, NULL);
+ kfree(man);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index e899a936a42a..b84ecc6d6611 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -92,6 +92,13 @@ static const struct ttm_place gmr_vram_placement_flags[] = {
}
};
+static const struct ttm_place vmw_sys_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = VMW_PL_SYSTEM,
+ .flags = 0
+};
+
struct ttm_placement vmw_vram_gmr_placement = {
.num_placement = 2,
.placement = vram_gmr_placement_flags,
@@ -113,28 +120,11 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags
};
-static const struct ttm_place evictable_placement_flags[] = {
- {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_SYSTEM,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_VRAM,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_MOB,
- .flags = 0
- }
+struct ttm_placement vmw_pt_sys_placement = {
+ .num_placement = 1,
+ .placement = &vmw_sys_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &vmw_sys_placement_flags
};
static const struct ttm_place nonfixed_placement_flags[] = {
@@ -156,13 +146,6 @@ static const struct ttm_place nonfixed_placement_flags[] = {
}
};
-struct ttm_placement vmw_evictable_placement = {
- .num_placement = 4,
- .placement = evictable_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags
-};
-
struct ttm_placement vmw_srf_placement = {
.num_placement = 1,
.num_busy_placement = 2,
@@ -184,19 +167,6 @@ struct ttm_placement vmw_nonfixed_placement = {
.busy_placement = &sys_placement_flags
};
-struct vmw_ttm_tt {
- struct ttm_tt dma_ttm;
- struct vmw_private *dev_priv;
- int gmr_id;
- struct vmw_mob *mob;
- int mem_type;
- struct sg_table sgt;
- struct vmw_sg_table vsgt;
- uint64_t sg_alloc_size;
- bool mapped;
- bool bound;
-};
-
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
/**
@@ -317,17 +287,8 @@ static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
{
struct vmw_private *dev_priv = vmw_tt->dev_priv;
- struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
- struct vmw_piter iter;
- dma_addr_t old;
int ret = 0;
- static size_t sgl_size;
- static size_t sgt_size;
if (vmw_tt->mapped)
return 0;
@@ -336,20 +297,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
vsgt->pages = vmw_tt->dma_ttm.pages;
vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
vsgt->addrs = vmw_tt->dma_ttm.dma_address;
- vsgt->sgt = &vmw_tt->sgt;
+ vsgt->sgt = NULL;
switch (dev_priv->map_mode) {
case vmw_dma_map_bind:
case vmw_dma_map_populate:
- if (unlikely(!sgl_size)) {
- sgl_size = ttm_round_pot(sizeof(struct scatterlist));
- sgt_size = ttm_round_pot(sizeof(struct sg_table));
- }
- vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
- ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
- if (unlikely(ret != 0))
- return ret;
-
+ vsgt->sgt = &vmw_tt->sgt;
ret = sg_alloc_table_from_pages_segment(
&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
(unsigned long)vsgt->num_pages << PAGE_SHIFT,
@@ -357,15 +310,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
if (ret)
goto out_sg_alloc_fail;
- if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
- uint64_t over_alloc =
- sgl_size * (vsgt->num_pages -
- vmw_tt->sgt.orig_nents);
-
- ttm_mem_global_free(glob, over_alloc);
- vmw_tt->sg_alloc_size -= over_alloc;
- }
-
ret = vmw_ttm_map_for_dma(vmw_tt);
if (unlikely(ret != 0))
goto out_map_fail;
@@ -375,16 +319,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
break;
}
- old = ~((dma_addr_t) 0);
- vmw_tt->vsgt.num_regions = 0;
- for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
- dma_addr_t cur = vmw_piter_dma_addr(&iter);
-
- if (cur != old + PAGE_SIZE)
- vmw_tt->vsgt.num_regions++;
- old = cur;
- }
-
vmw_tt->mapped = true;
return 0;
@@ -392,7 +326,6 @@ out_map_fail:
sg_free_table(vmw_tt->vsgt.sgt);
vmw_tt->vsgt.sgt = NULL;
out_sg_alloc_fail:
- ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
return ret;
}
@@ -418,8 +351,6 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
vmw_ttm_unmap_from_dma(vmw_tt);
sg_free_table(vmw_tt->vsgt.sgt);
vmw_tt->vsgt.sgt = NULL;
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_tt->sg_alloc_size);
break;
default:
break;
@@ -484,6 +415,9 @@ static int vmw_ttm_bind(struct ttm_device *bdev,
&vmw_be->vsgt, ttm->num_pages,
vmw_be->gmr_id);
break;
+ case VMW_PL_SYSTEM:
+ /* Nothing to be done for a system bind */
+ break;
default:
BUG();
}
@@ -507,6 +441,8 @@ static void vmw_ttm_unbind(struct ttm_device *bdev,
case VMW_PL_MOB:
vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
break;
+ case VMW_PL_SYSTEM:
+ break;
default:
BUG();
}
@@ -534,7 +470,6 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- unsigned int i;
int ret;
/* TODO: maybe completely drop this ? */
@@ -542,22 +477,7 @@ static int vmw_ttm_populate(struct ttm_device *bdev,
return 0;
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
- if (ret)
- return ret;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i],
- PAGE_SIZE, ctx);
- if (ret)
- goto error;
- }
- return 0;
-error:
- while (i--)
- ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
- PAGE_SIZE);
- ttm_pool_free(&bdev->pool, ttm);
return ret;
}
@@ -566,7 +486,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm);
- unsigned int i;
vmw_ttm_unbind(bdev, ttm);
@@ -577,10 +496,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
vmw_ttm_unmap_dma(vmw_tt);
- for (i = 0; i < ttm->num_pages; ++i)
- ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
- PAGE_SIZE);
-
ttm_pool_free(&bdev->pool, ttm);
}
@@ -624,6 +539,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
+ case VMW_PL_SYSTEM:
case VMW_PL_GMR:
case VMW_PL_MOB:
return 0;
@@ -670,6 +586,11 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
(void) ttm_bo_wait(bo, false, false);
}
+static bool vmw_memtype_is_system(uint32_t mem_type)
+{
+ return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM;
+}
+
static int vmw_move(struct ttm_buffer_object *bo,
bool evict,
struct ttm_operation_ctx *ctx,
@@ -680,7 +601,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
int ret;
- if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) {
+ if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) {
ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
if (ret)
return ret;
@@ -689,7 +610,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
vmw_move_notify(bo, bo->resource, new_mem);
if (old_man->use_tt && new_man->use_tt) {
- if (bo->resource->mem_type == TTM_PL_SYSTEM) {
+ if (vmw_memtype_is_system(bo->resource->mem_type)) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
@@ -736,7 +657,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
int ret;
ret = vmw_bo_create_kernel(dev_priv, bo_size,
- &vmw_sys_placement,
+ &vmw_pt_sys_placement,
&bo);
if (unlikely(ret != 0))
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 0a4c340252ec..265f7c48d856 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -27,30 +27,44 @@
#include "vmwgfx_drv.h"
-static struct ttm_buffer_object *vmw_bo_vm_lookup(struct ttm_device *bdev,
- unsigned long offset,
- unsigned long pages)
+static int vmw_bo_vm_lookup(struct ttm_device *bdev,
+ struct drm_file *filp,
+ unsigned long offset,
+ unsigned long pages,
+ struct ttm_buffer_object **p_bo)
{
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
struct drm_device *drm = &dev_priv->drm;
struct drm_vma_offset_node *node;
- struct ttm_buffer_object *bo = NULL;
+ int ret;
+
+ *p_bo = NULL;
drm_vma_offset_lock_lookup(bdev->vma_manager);
node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
if (likely(node)) {
- bo = container_of(node, struct ttm_buffer_object,
+ *p_bo = container_of(node, struct ttm_buffer_object,
base.vma_node);
- bo = ttm_bo_get_unless_zero(bo);
+ *p_bo = ttm_bo_get_unless_zero(*p_bo);
}
drm_vma_offset_unlock_lookup(bdev->vma_manager);
- if (!bo)
+ if (!*p_bo) {
drm_err(drm, "Could not find buffer object to map\n");
+ return -EINVAL;
+ }
+
+ if (!drm_vma_node_is_allowed(node, filp)) {
+ ret = -EACCES;
+ goto out_no_access;
+ }
- return bo;
+ return 0;
+out_no_access:
+ ttm_bo_put(*p_bo);
+ return ret;
}
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -64,7 +78,6 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
};
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct ttm_device *bdev = &dev_priv->bdev;
struct ttm_buffer_object *bo;
int ret;
@@ -72,13 +85,9 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
return -EINVAL;
- bo = vmw_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
- if (unlikely(!bo))
- return -EINVAL;
-
- ret = vmw_user_bo_verify_access(bo, tfile);
+ ret = vmw_bo_vm_lookup(bdev, file_priv, vma->vm_pgoff, vma_pages(vma), &bo);
if (unlikely(ret != 0))
- goto out_unref;
+ return ret;
ret = ttm_bo_mmap_obj(vma, bo);
if (unlikely(ret != 0))
@@ -99,38 +108,3 @@ out_unref:
return ret;
}
-/* struct vmw_validation_mem callback */
-static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
-{
- static struct ttm_operation_ctx ctx = {.interruptible = false,
- .no_wait_gpu = false};
- struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
-
- return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
-}
-
-/* struct vmw_validation_mem callback */
-static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
-{
- struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
-
- return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-}
-
-/**
- * vmw_validation_mem_init_ttm - Interface the validation memory tracker
- * to ttm.
- * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
- * rather than a struct vmw_validation_mem is to make sure assumption in the
- * callbacks that struct vmw_private derives from struct vmw_validation_mem
- * holds true.
- * @gran: The recommended allocation granularity
- */
-void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
-{
- struct vmw_validation_mem *vvm = &dev_priv->vvm;
-
- vvm->reserve_mem = vmw_vmt_reserve;
- vvm->unreserve_mem = vmw_vmt_unreserve;
- vvm->gran = gran;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
index ebc1d83c34b4..6ad744ae07f5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
@@ -117,7 +117,7 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- arg->stream_id, TTM_REF_USAGE);
+ arg->stream_id);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index b09094b50c5d..f46891012be3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -29,6 +29,9 @@
#include "vmwgfx_validation.h"
#include "vmwgfx_drv.h"
+
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
+
/**
* struct vmw_validation_bo_node - Buffer object validation metadata.
* @base: Metadata used for TTM reservation- and validation.
@@ -43,7 +46,7 @@
*/
struct vmw_validation_bo_node {
struct ttm_validate_buffer base;
- struct drm_hash_item hash;
+ struct vmwgfx_hash_item hash;
unsigned int coherent_count;
u32 as_mob : 1;
u32 cpu_blit : 1;
@@ -72,7 +75,7 @@ struct vmw_validation_bo_node {
*/
struct vmw_validation_res_node {
struct list_head head;
- struct drm_hash_item hash;
+ struct vmwgfx_hash_item hash;
struct vmw_resource *res;
struct vmw_buffer_object *new_backup;
unsigned long new_backup_offset;
@@ -113,13 +116,8 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
struct page *page;
if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
- int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
-
- if (ret)
- return NULL;
-
- ctx->vm_size_left += ctx->vm->gran;
- ctx->total_mem += ctx->vm->gran;
+ ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN;
+ ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN;
}
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
@@ -159,7 +157,6 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
ctx->mem_size_left = 0;
if (ctx->vm && ctx->total_mem) {
- ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
ctx->total_mem = 0;
ctx->vm_size_left = 0;
}
@@ -184,9 +181,9 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
return NULL;
if (ctx->ht) {
- struct drm_hash_item *hash;
+ struct vmwgfx_hash_item *hash;
- if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
+ if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
bo_node = container_of(hash, typeof(*bo_node), hash);
} else {
struct vmw_validation_bo_node *entry;
@@ -221,9 +218,9 @@ vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
return NULL;
if (ctx->ht) {
- struct drm_hash_item *hash;
+ struct vmwgfx_hash_item *hash;
- if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
+ if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) res, &hash))
res_node = container_of(hash, typeof(*res_node), hash);
} else {
struct vmw_validation_res_node *entry;
@@ -280,7 +277,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
if (ctx->ht) {
bo_node->hash.key = (unsigned long) vbo;
- ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
+ ret = vmwgfx_ht_insert_item(ctx->ht, &bo_node->hash);
if (ret) {
DRM_ERROR("Failed to initialize a buffer "
"validation entry.\n");
@@ -335,7 +332,7 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
if (ctx->ht) {
node->hash.key = (unsigned long) res;
- ret = drm_ht_insert_item(ctx->ht, &node->hash);
+ ret = vmwgfx_ht_insert_item(ctx->ht, &node->hash);
if (ret) {
DRM_ERROR("Failed to initialize a resource validation "
"entry.\n");
@@ -688,13 +685,13 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
return;
list_for_each_entry(entry, &ctx->bo_list, base.head)
- (void) drm_ht_remove_item(ctx->ht, &entry->hash);
+ (void) vmwgfx_ht_remove_item(ctx->ht, &entry->hash);
list_for_each_entry(val, &ctx->resource_list, head)
- (void) drm_ht_remove_item(ctx->ht, &val->hash);
+ (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
list_for_each_entry(val, &ctx->resource_ctx_list, head)
- (void) drm_ht_remove_item(ctx->ht, &val->hash);
+ (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
ctx->ht = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 739906d1b3eb..f21df053882b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -31,29 +31,15 @@
#include <linux/list.h>
#include <linux/ww_mutex.h>
-#include <drm/drm_hashtab.h>
#include <drm/ttm/ttm_execbuf_util.h>
+#include "vmwgfx_hashtab.h"
+
#define VMW_RES_DIRTY_NONE 0
#define VMW_RES_DIRTY_SET BIT(0)
#define VMW_RES_DIRTY_CLEAR BIT(1)
/**
- * struct vmw_validation_mem - Custom interface to provide memory reservations
- * for the validation code.
- * @reserve_mem: Callback to reserve memory
- * @unreserve_mem: Callback to unreserve memory
- * @gran: Reservation granularity. Contains a hint how much memory should
- * be reserved in each call to @reserve_mem(). A slow implementation may want
- * reservation to be done in large batches.
- */
-struct vmw_validation_mem {
- int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
- void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
- size_t gran;
-};
-
-/**
* struct vmw_validation_context - Per command submission validation context
* @ht: Hash table used to find resource- or buffer object duplicates
* @resource_list: List head for resource validation metadata
@@ -73,7 +59,7 @@ struct vmw_validation_mem {
* @total_mem: Amount of reserved memory.
*/
struct vmw_validation_context {
- struct drm_open_hash *ht;
+ struct vmwgfx_open_hash *ht;
struct list_head resource_list;
struct list_head resource_ctx_list;
struct list_head bo_list;
@@ -129,21 +115,6 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
}
/**
- * vmw_validation_set_val_mem - Register a validation mem object for
- * validation memory reservation
- * @ctx: The validation context
- * @vm: Pointer to a struct vmw_validation_mem
- *
- * Must be set before the first attempt to allocate validation memory.
- */
-static inline void
-vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
- struct vmw_validation_mem *vm)
-{
- ctx->vm = vm;
-}
-
-/**
* vmw_validation_set_ht - Register a hash table for duplicate finding
* @ctx: The validation context
* @ht: Pointer to a hash table to use for duplicate finding
@@ -151,7 +122,7 @@ vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
* available at validation context declaration time
*/
static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx,
- struct drm_open_hash *ht)
+ struct vmwgfx_open_hash *ht)
{
ctx->ht = ht;
}
@@ -190,22 +161,6 @@ vmw_validation_bo_fence(struct vmw_validation_context *ctx,
}
/**
- * vmw_validation_context_init - Initialize a validation context
- * @ctx: Pointer to the validation context to initialize
- *
- * This function initializes a validation context with @merge_dups set
- * to false
- */
-static inline void
-vmw_validation_context_init(struct vmw_validation_context *ctx)
-{
- memset(ctx, 0, sizeof(*ctx));
- INIT_LIST_HEAD(&ctx->resource_list);
- INIT_LIST_HEAD(&ctx->resource_ctx_list);
- INIT_LIST_HEAD(&ctx->bo_list);
-}
-
-/**
* vmw_validation_align - Align a validation memory allocation
* @val: The size to be aligned
*