aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/drm/drm_gem.h
diff options
context:
space:
mode:
authorDanilo Krummrich <dakr@redhat.com>2023-07-20 02:14:22 +0200
committerDanilo Krummrich <dakr@redhat.com>2023-07-20 05:15:53 +0200
commite6303f323b1ad9c02ae813fc3dedeaa9dadfd3b0 (patch)
treedab633d684a54a27858623efb0898231cf235916 /include/drm/drm_gem.h
parentdrm/bridge: anx7625: Use common macros for HDCP capabilities (diff)
downloadwireguard-linux-e6303f323b1ad9c02ae813fc3dedeaa9dadfd3b0.tar.xz
wireguard-linux-e6303f323b1ad9c02ae813fc3dedeaa9dadfd3b0.zip
drm: manager to keep track of GPUs VA mappings
Add infrastructure to keep track of GPU virtual address (VA) mappings with a decicated VA space manager implementation. New UAPIs, motivated by Vulkan sparse memory bindings graphics drivers start implementing, allow userspace applications to request multiple and arbitrary GPU VA mappings of buffer objects. The DRM GPU VA manager is intended to serve the following purposes in this context. 1) Provide infrastructure to track GPU VA allocations and mappings, using an interval tree (RB-tree). 2) Generically connect GPU VA mappings to their backing buffers, in particular DRM GEM objects. 3) Provide a common implementation to perform more complex mapping operations on the GPU VA space. In particular splitting and merging of GPU VA mappings, e.g. for intersecting mapping requests or partial unmap requests. Acked-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Acked-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> Tested-by: Matthew Brost <matthew.brost@intel.com> Tested-by: Donald Robson <donald.robson@imgtec.com> Suggested-by: Dave Airlie <airlied@redhat.com> Signed-off-by: Danilo Krummrich <dakr@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230720001443.2380-2-dakr@redhat.com
Diffstat (limited to 'include/drm/drm_gem.h')
-rw-r--r--include/drm/drm_gem.h79
1 files changed, 79 insertions, 0 deletions
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index bbc721870c13..c0b13c43b459 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -36,6 +36,8 @@
#include <linux/kref.h>
#include <linux/dma-resv.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include <drm/drm_vma_manager.h>
@@ -380,6 +382,22 @@ struct drm_gem_object {
struct dma_resv _resv;
/**
+ * @gpuva:
+ *
+ * Provides the list of GPU VAs attached to this GEM object.
+ *
+ * Drivers should lock list accesses with the GEMs &dma_resv lock
+ * (&drm_gem_object.resv) or a custom lock if one is provided.
+ */
+ struct {
+ struct list_head list;
+
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map *lock_dep_map;
+#endif
+ } gpuva;
+
+ /**
* @funcs:
*
* Optional GEM object functions. If this is set, it will be used instead of the
@@ -526,4 +544,65 @@ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
int drm_gem_evict(struct drm_gem_object *obj);
+#ifdef CONFIG_LOCKDEP
+/**
+ * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
+ * @obj: the &drm_gem_object
+ * @lock: the lock used to protect the gpuva list. The locking primitive
+ * must contain a dep_map field.
+ *
+ * Call this if you're not proctecting access to the gpuva list
+ * with the dma-resv lock, otherwise, drm_gem_gpuva_init() takes care
+ * of initializing lock_dep_map for you.
+ */
+#define drm_gem_gpuva_set_lock(obj, lock) \
+ if (!(obj)->gpuva.lock_dep_map) \
+ (obj)->gpuva.lock_dep_map = &(lock)->dep_map
+#define drm_gem_gpuva_assert_lock_held(obj) \
+ lockdep_assert(lock_is_held((obj)->gpuva.lock_dep_map))
+#else
+#define drm_gem_gpuva_set_lock(obj, lock) do {} while (0)
+#define drm_gem_gpuva_assert_lock_held(obj) do {} while (0)
+#endif
+
+/**
+ * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object
+ * @obj: the &drm_gem_object
+ *
+ * This initializes the &drm_gem_object's &drm_gpuva list.
+ *
+ * Calling this function is only necessary for drivers intending to support the
+ * &drm_driver_feature DRIVER_GEM_GPUVA.
+ */
+static inline void drm_gem_gpuva_init(struct drm_gem_object *obj)
+{
+ INIT_LIST_HEAD(&obj->gpuva.list);
+ drm_gem_gpuva_set_lock(obj, &obj->resv->lock.base);
+}
+
+/**
+ * drm_gem_for_each_gpuva() - iternator to walk over a list of gpuvas
+ * @entry__: &drm_gpuva structure to assign to in each iteration step
+ * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gpuva_manager.
+ */
+#define drm_gem_for_each_gpuva(entry__, obj__) \
+ list_for_each_entry(entry__, &(obj__)->gpuva.list, gem.entry)
+
+/**
+ * drm_gem_for_each_gpuva_safe() - iternator to safely walk over a list of
+ * gpuvas
+ * @entry__: &drm_gpuva structure to assign to in each iteration step
+ * @next__: &next &drm_gpuva to store the next step
+ * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence
+ * it is save against removal of elements.
+ */
+#define drm_gem_for_each_gpuva_safe(entry__, next__, obj__) \
+ list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, gem.entry)
+
#endif /* __DRM_GEM_H__ */