aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_clflush.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c162
1 files changed, 162 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
new file mode 100644
index 000000000000..5295285d5843
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -0,0 +1,162 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#include "display/intel_frontbuffer.h"
+
+#include "i915_drv.h"
+#include "i915_gem_clflush.h"
+
+static DEFINE_SPINLOCK(clflush_lock);
+
+struct clflush {
+ struct dma_fence dma; /* Must be first for dma_fence_free() */
+ struct i915_sw_fence wait;
+ struct work_struct work;
+ struct drm_i915_gem_object *obj;
+};
+
+static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
+{
+ return DRIVER_NAME;
+}
+
+static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
+{
+ return "clflush";
+}
+
+static void i915_clflush_release(struct dma_fence *fence)
+{
+ struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
+
+ i915_sw_fence_fini(&clflush->wait);
+
+ BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
+ dma_fence_free(&clflush->dma);
+}
+
+static const struct dma_fence_ops i915_clflush_ops = {
+ .get_driver_name = i915_clflush_get_driver_name,
+ .get_timeline_name = i915_clflush_get_timeline_name,
+ .release = i915_clflush_release,
+};
+
+static void __i915_do_clflush(struct drm_i915_gem_object *obj)
+{
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+ drm_clflush_sg(obj->mm.pages);
+ intel_fb_obj_flush(obj, ORIGIN_CPU);
+}
+
+static void i915_clflush_work(struct work_struct *work)
+{
+ struct clflush *clflush = container_of(work, typeof(*clflush), work);
+ struct drm_i915_gem_object *obj = clflush->obj;
+
+ if (i915_gem_object_pin_pages(obj)) {
+ DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
+ goto out;
+ }
+
+ __i915_do_clflush(obj);
+
+ i915_gem_object_unpin_pages(obj);
+
+out:
+ i915_gem_object_put(obj);
+
+ dma_fence_signal(&clflush->dma);
+ dma_fence_put(&clflush->dma);
+}
+
+static int __i915_sw_fence_call
+i915_clflush_notify(struct i915_sw_fence *fence,
+ enum i915_sw_fence_notify state)
+{
+ struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ schedule_work(&clflush->work);
+ break;
+
+ case FENCE_FREE:
+ dma_fence_put(&clflush->dma);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+ unsigned int flags)
+{
+ struct clflush *clflush;
+
+ assert_object_held(obj);
+
+ /*
+ * Stolen memory is always coherent with the GPU as it is explicitly
+ * marked as wc by the system, or the system is cache-coherent.
+ * Similarly, we only access struct pages through the CPU cache, so
+ * anything not backed by physical memory we consider to be always
+ * coherent and not need clflushing.
+ */
+ if (!i915_gem_object_has_struct_page(obj)) {
+ obj->cache_dirty = false;
+ return false;
+ }
+
+ /* If the GPU is snooping the contents of the CPU cache,
+ * we do not need to manually clear the CPU cache lines. However,
+ * the caches are only snooped when the render cache is
+ * flushed/invalidated. As we always have to emit invalidations
+ * and flushes when moving into and out of the RENDER domain, correct
+ * snooping behaviour occurs naturally as the result of our domain
+ * tracking.
+ */
+ if (!(flags & I915_CLFLUSH_FORCE) &&
+ obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
+ return false;
+
+ trace_i915_gem_object_clflush(obj);
+
+ clflush = NULL;
+ if (!(flags & I915_CLFLUSH_SYNC))
+ clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
+ if (clflush) {
+ GEM_BUG_ON(!obj->cache_dirty);
+
+ dma_fence_init(&clflush->dma,
+ &i915_clflush_ops,
+ &clflush_lock,
+ to_i915(obj->base.dev)->mm.unordered_timeline,
+ 0);
+ i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
+
+ clflush->obj = i915_gem_object_get(obj);
+ INIT_WORK(&clflush->work, i915_clflush_work);
+
+ dma_fence_get(&clflush->dma);
+
+ i915_sw_fence_await_reservation(&clflush->wait,
+ obj->base.resv, NULL,
+ true, I915_FENCE_TIMEOUT,
+ I915_FENCE_GFP);
+
+ reservation_object_add_excl_fence(obj->base.resv,
+ &clflush->dma);
+
+ i915_sw_fence_commit(&clflush->wait);
+ } else if (obj->mm.pages) {
+ __i915_do_clflush(obj);
+ } else {
+ GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
+ }
+
+ obj->cache_dirty = false;
+ return true;
+}