aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c303
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c68
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c4
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h12
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c27
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_request.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c8
24 files changed, 341 insertions, 208 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index ba26545392bc..53a00cf3fa32 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -18748,7 +18748,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
*/
ret = intel_initial_commit(&i915->drm);
if (ret)
- return ret;
+ drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
intel_overlay_setup(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 3896d08c4177..2165398d2c7c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -615,7 +615,7 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return 0;
}
/* Also take into account max slice width */
- min_slice_count = min_t(u8, min_slice_count,
+ min_slice_count = max_t(u8, min_slice_count,
DIV_ROUND_UP(mode_hdisplay,
max_slice_width));
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 0c8684634fca..27f04aed8764 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -23,6 +23,7 @@
*
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
@@ -719,11 +720,13 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
}
static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
- struct drm_connector_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
+ connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
- struct intel_crtc *crtc = to_intel_crtc(state->crtc);
+ struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc);
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 0dd477e56573..04e9c04545ad 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -114,8 +114,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
if (ret)
return ret;
- fput(vma->vm_file);
- vma->vm_file = get_file(obj->base.filp);
+ vma_set_file(vma, obj->base.filp);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 1904e6e5ea64..bcc80f428172 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -382,7 +382,7 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
return true;
if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
- (vma->node.start + vma->node.size - 1) >> 32)
+ (vma->node.start + vma->node.size + 4095) >> 32)
return true;
if (flags & __EXEC_OBJECT_NEEDS_MAP &&
@@ -3097,7 +3097,7 @@ static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
break;
}
-static void eb_request_add(struct i915_execbuffer *eb)
+static int eb_request_add(struct i915_execbuffer *eb, int err)
{
struct i915_request *rq = eb->request;
struct intel_timeline * const tl = i915_request_timeline(rq);
@@ -3118,6 +3118,7 @@ static void eb_request_add(struct i915_execbuffer *eb)
/* Serialise with context_close via the add_to_timeline */
i915_request_set_error_once(rq, -ENOENT);
__i915_request_skip(rq);
+ err = -ENOENT; /* override any transient errors */
}
__i915_request_queue(rq, &attr);
@@ -3127,6 +3128,8 @@ static void eb_request_add(struct i915_execbuffer *eb)
retire_requests(tl, prev);
mutex_unlock(&tl->mutex);
+
+ return err;
}
static const i915_user_extension_fn execbuf_extensions[] = {
@@ -3332,7 +3335,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
err = eb_submit(&eb, batch);
err_request:
i915_request_get(eb.request);
- eb_request_add(&eb);
+ err = eb_request_add(&eb, err);
if (eb.fences)
signal_fence_array(&eb);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 3d69e51f3e4d..ec28a6cde49b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -893,8 +893,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
* requires avoiding extraneous references to their filp, hence why
* we prefer to use an anonymous file for their mmaps.
*/
- fput(vma->vm_file);
- vma->vm_file = anon;
+ vma_set_file(vma, anon);
+ /* Drop the initial creation reference, the vma is now holding one. */
+ fput(anon);
switch (mmo->mmap_type) {
case I915_MMAP_TYPE_WC:
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index d8b206e53660..a24cc1ff08a0 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -30,18 +30,21 @@
#include "i915_trace.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
+#include "intel_engine_pm.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
-static void irq_enable(struct intel_engine_cs *engine)
+static bool irq_enable(struct intel_engine_cs *engine)
{
if (!engine->irq_enable)
- return;
+ return false;
/* Caller disables interrupts */
spin_lock(&engine->gt->irq_lock);
engine->irq_enable(engine);
spin_unlock(&engine->gt->irq_lock);
+
+ return true;
}
static void irq_disable(struct intel_engine_cs *engine)
@@ -57,12 +60,11 @@ static void irq_disable(struct intel_engine_cs *engine)
static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
- lockdep_assert_held(&b->irq_lock);
-
- if (!b->irq_engine || b->irq_armed)
- return;
-
- if (!intel_gt_pm_get_if_awake(b->irq_engine->gt))
+ /*
+ * Since we are waiting on a request, the GPU should be busy
+ * and should have its own rpm reference.
+ */
+ if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt)))
return;
/*
@@ -73,25 +75,24 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
*/
WRITE_ONCE(b->irq_armed, true);
- /*
- * Since we are waiting on a request, the GPU should be busy
- * and should have its own rpm reference. This is tracked
- * by i915->gt.awake, we can forgo holding our own wakref
- * for the interrupt as before i915->gt.awake is released (when
- * the driver is idle) we disarm the breadcrumbs.
- */
-
- if (!b->irq_enabled++)
- irq_enable(b->irq_engine);
+ /* Requests may have completed before we could enable the interrupt. */
+ if (!b->irq_enabled++ && irq_enable(b->irq_engine))
+ irq_work_queue(&b->irq_work);
}
-static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
+static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
- lockdep_assert_held(&b->irq_lock);
-
- if (!b->irq_engine || !b->irq_armed)
+ if (!b->irq_engine)
return;
+ spin_lock(&b->irq_lock);
+ if (!b->irq_armed)
+ __intel_breadcrumbs_arm_irq(b);
+ spin_unlock(&b->irq_lock);
+}
+
+static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
+{
GEM_BUG_ON(!b->irq_enabled);
if (!--b->irq_enabled)
irq_disable(b->irq_engine);
@@ -100,20 +101,37 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
intel_gt_pm_put_async(b->irq_engine->gt);
}
+static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
+{
+ spin_lock(&b->irq_lock);
+ if (b->irq_armed)
+ __intel_breadcrumbs_disarm_irq(b);
+ spin_unlock(&b->irq_lock);
+}
+
static void add_signaling_context(struct intel_breadcrumbs *b,
struct intel_context *ce)
{
- intel_context_get(ce);
- list_add_tail(&ce->signal_link, &b->signalers);
- if (list_is_first(&ce->signal_link, &b->signalers))
- __intel_breadcrumbs_arm_irq(b);
+ lockdep_assert_held(&ce->signal_lock);
+
+ spin_lock(&b->signalers_lock);
+ list_add_rcu(&ce->signal_link, &b->signalers);
+ spin_unlock(&b->signalers_lock);
}
-static void remove_signaling_context(struct intel_breadcrumbs *b,
+static bool remove_signaling_context(struct intel_breadcrumbs *b,
struct intel_context *ce)
{
- list_del(&ce->signal_link);
- intel_context_put(ce);
+ lockdep_assert_held(&ce->signal_lock);
+
+ if (!list_empty(&ce->signals))
+ return false;
+
+ spin_lock(&b->signalers_lock);
+ list_del_rcu(&ce->signal_link);
+ spin_unlock(&b->signalers_lock);
+
+ return true;
}
static inline bool __request_completed(const struct i915_request *rq)
@@ -174,73 +192,103 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
intel_engine_add_retire(b->irq_engine, tl);
}
-static bool __signal_request(struct i915_request *rq, struct list_head *signals)
+static bool __signal_request(struct i915_request *rq)
{
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+ GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
if (!__dma_fence_signal(&rq->fence)) {
i915_request_put(rq);
return false;
}
- list_add_tail(&rq->signal_link, signals);
return true;
}
+static struct llist_node *
+slist_add(struct llist_node *node, struct llist_node *head)
+{
+ node->next = head;
+ return node;
+}
+
static void signal_irq_work(struct irq_work *work)
{
struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
const ktime_t timestamp = ktime_get();
- struct intel_context *ce, *cn;
- struct list_head *pos, *next;
- LIST_HEAD(signal);
-
- spin_lock(&b->irq_lock);
+ struct llist_node *signal, *sn;
+ struct intel_context *ce;
- if (list_empty(&b->signalers))
- __intel_breadcrumbs_disarm_irq(b);
+ signal = NULL;
+ if (unlikely(!llist_empty(&b->signaled_requests)))
+ signal = llist_del_all(&b->signaled_requests);
- list_splice_init(&b->signaled_requests, &signal);
+ /*
+ * Keep the irq armed until the interrupt after all listeners are gone.
+ *
+ * Enabling/disabling the interrupt is rather costly, roughly a couple
+ * of hundred microseconds. If we are proactive and enable/disable
+ * the interrupt around every request that wants a breadcrumb, we
+ * quickly drown in the extra orders of magnitude of latency imposed
+ * on request submission.
+ *
+ * So we try to be lazy, and keep the interrupts enabled until no
+ * more listeners appear within a breadcrumb interrupt interval (that
+ * is until a request completes that no one cares about). The
+ * observation is that listeners come in batches, and will often
+ * listen to a bunch of requests in succession. Though note on icl+,
+ * interrupts are always enabled due to concerns with rc6 being
+ * dysfunctional with per-engine interrupt masking.
+ *
+ * We also try to avoid raising too many interrupts, as they may
+ * be generated by userspace batches and it is unfortunately rather
+ * too easy to drown the CPU under a flood of GPU interrupts. Thus
+ * whenever no one appears to be listening, we turn off the interrupts.
+ * Fewer interrupts should conserve power -- at the very least, fewer
+ * interrupt draw less ire from other users of the system and tools
+ * like powertop.
+ */
+ if (!signal && READ_ONCE(b->irq_armed) && list_empty(&b->signalers))
+ intel_breadcrumbs_disarm_irq(b);
- list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
- GEM_BUG_ON(list_empty(&ce->signals));
+ rcu_read_lock();
+ list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
+ struct i915_request *rq;
- list_for_each_safe(pos, next, &ce->signals) {
- struct i915_request *rq =
- list_entry(pos, typeof(*rq), signal_link);
+ list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
+ bool release;
- GEM_BUG_ON(!check_signal_order(ce, rq));
if (!__request_completed(rq))
break;
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
+ &rq->fence.flags))
+ break;
+
/*
* Queue for execution after dropping the signaling
* spinlock as the callback chain may end up adding
* more signalers to the same context or engine.
*/
- __signal_request(rq, &signal);
- }
+ spin_lock(&ce->signal_lock);
+ list_del_rcu(&rq->signal_link);
+ release = remove_signaling_context(b, ce);
+ spin_unlock(&ce->signal_lock);
- /*
- * We process the list deletion in bulk, only using a list_add
- * (not list_move) above but keeping the status of
- * rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit.
- */
- if (!list_is_first(pos, &ce->signals)) {
- /* Advance the list to the first incomplete request */
- __list_del_many(&ce->signals, pos);
- if (&ce->signals == pos) { /* now empty */
+ if (__signal_request(rq))
+ /* We own signal_node now, xfer to local list */
+ signal = slist_add(&rq->signal_node, signal);
+
+ if (release) {
add_retire(b, ce->timeline);
- remove_signaling_context(b, ce);
+ intel_context_put(ce);
}
}
}
+ rcu_read_unlock();
- spin_unlock(&b->irq_lock);
-
- list_for_each_safe(pos, next, &signal) {
+ llist_for_each_safe(signal, sn, signal) {
struct i915_request *rq =
- list_entry(pos, typeof(*rq), signal_link);
+ llist_entry(signal, typeof(*rq), signal_node);
struct list_head cb_list;
spin_lock(&rq->lock);
@@ -251,6 +299,9 @@ static void signal_irq_work(struct irq_work *work)
i915_request_put(rq);
}
+
+ if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
+ intel_breadcrumbs_arm_irq(b);
}
struct intel_breadcrumbs *
@@ -262,14 +313,15 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine)
if (!b)
return NULL;
- spin_lock_init(&b->irq_lock);
+ b->irq_engine = irq_engine;
+
+ spin_lock_init(&b->signalers_lock);
INIT_LIST_HEAD(&b->signalers);
- INIT_LIST_HEAD(&b->signaled_requests);
+ init_llist_head(&b->signaled_requests);
+ spin_lock_init(&b->irq_lock);
init_irq_work(&b->irq_work, signal_irq_work);
- b->irq_engine = irq_engine;
-
return b;
}
@@ -292,27 +344,28 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)
void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
{
- unsigned long flags;
-
- if (!READ_ONCE(b->irq_armed))
- return;
-
- spin_lock_irqsave(&b->irq_lock, flags);
- __intel_breadcrumbs_disarm_irq(b);
- spin_unlock_irqrestore(&b->irq_lock, flags);
-
- if (!list_empty(&b->signalers))
- irq_work_queue(&b->irq_work);
+ /* Kick the work once more to drain the signalers */
+ irq_work_sync(&b->irq_work);
+ while (unlikely(READ_ONCE(b->irq_armed))) {
+ local_irq_disable();
+ signal_irq_work(&b->irq_work);
+ local_irq_enable();
+ cond_resched();
+ }
+ GEM_BUG_ON(!list_empty(&b->signalers));
}
void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
{
+ irq_work_sync(&b->irq_work);
+ GEM_BUG_ON(!list_empty(&b->signalers));
+ GEM_BUG_ON(b->irq_armed);
kfree(b);
}
-static void insert_breadcrumb(struct i915_request *rq,
- struct intel_breadcrumbs *b)
+static void insert_breadcrumb(struct i915_request *rq)
{
+ struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
struct intel_context *ce = rq->context;
struct list_head *pos;
@@ -327,12 +380,14 @@ static void insert_breadcrumb(struct i915_request *rq,
* its signal completion.
*/
if (__request_completed(rq)) {
- if (__signal_request(rq, &b->signaled_requests))
+ if (__signal_request(rq) &&
+ llist_add(&rq->signal_node, &b->signaled_requests))
irq_work_queue(&b->irq_work);
return;
}
if (list_empty(&ce->signals)) {
+ intel_context_get(ce);
add_signaling_context(b, ce);
pos = &ce->signals;
} else {
@@ -358,18 +413,22 @@ static void insert_breadcrumb(struct i915_request *rq,
break;
}
}
- list_add(&rq->signal_link, pos);
+ list_add_rcu(&rq->signal_link, pos);
GEM_BUG_ON(!check_signal_order(ce, rq));
+ GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
- /* Check after attaching to irq, interrupt may have already fired. */
- if (__request_completed(rq))
- irq_work_queue(&b->irq_work);
+ /*
+ * Defer enabling the interrupt to after HW submission and recheck
+ * the request as it may have completed and raised the interrupt as
+ * we were attaching it into the lists.
+ */
+ irq_work_queue(&b->irq_work);
}
bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
- struct intel_breadcrumbs *b;
+ struct intel_context *ce = rq->context;
/* Serialises with i915_request_retire() using rq->lock */
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
@@ -384,67 +443,30 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
return true;
- /*
- * rq->engine is locked by rq->engine->active.lock. That however
- * is not known until after rq->engine has been dereferenced and
- * the lock acquired. Hence we acquire the lock and then validate
- * that rq->engine still matches the lock we hold for it.
- *
- * Here, we are using the breadcrumb lock as a proxy for the
- * rq->engine->active.lock, and we know that since the breadcrumb
- * will be serialised within i915_request_submit/i915_request_unsubmit,
- * the engine cannot change while active as long as we hold the
- * breadcrumb lock on that engine.
- *
- * From the dma_fence_enable_signaling() path, we are outside of the
- * request submit/unsubmit path, and so we must be more careful to
- * acquire the right lock.
- */
- b = READ_ONCE(rq->engine)->breadcrumbs;
- spin_lock(&b->irq_lock);
- while (unlikely(b != READ_ONCE(rq->engine)->breadcrumbs)) {
- spin_unlock(&b->irq_lock);
- b = READ_ONCE(rq->engine)->breadcrumbs;
- spin_lock(&b->irq_lock);
- }
-
- /*
- * Now that we are finally serialised with request submit/unsubmit,
- * [with b->irq_lock] and with i915_request_retire() [via checking
- * SIGNALED with rq->lock] confirm the request is indeed active. If
- * it is no longer active, the breadcrumb will be attached upon
- * i915_request_submit().
- */
+ spin_lock(&ce->signal_lock);
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
- insert_breadcrumb(rq, b);
-
- spin_unlock(&b->irq_lock);
+ insert_breadcrumb(rq);
+ spin_unlock(&ce->signal_lock);
return true;
}
void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
- struct intel_breadcrumbs *b = rq->engine->breadcrumbs;
+ struct intel_context *ce = rq->context;
+ bool release;
- /*
- * We must wait for b->irq_lock so that we know the interrupt handler
- * has released its reference to the intel_context and has completed
- * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if
- * required).
- */
- spin_lock(&b->irq_lock);
- if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
- struct intel_context *ce = rq->context;
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
+ return;
- list_del(&rq->signal_link);
- if (list_empty(&ce->signals))
- remove_signaling_context(b, ce);
+ spin_lock(&ce->signal_lock);
+ list_del_rcu(&rq->signal_link);
+ release = remove_signaling_context(rq->engine->breadcrumbs, ce);
+ spin_unlock(&ce->signal_lock);
+ if (release)
+ intel_context_put(ce);
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
- i915_request_put(rq);
- }
- spin_unlock(&b->irq_lock);
+ i915_request_put(rq);
}
static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
@@ -454,18 +476,17 @@ static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
drm_printf(p, "Signals:\n");
- spin_lock_irq(&b->irq_lock);
- list_for_each_entry(ce, &b->signalers, signal_link) {
- list_for_each_entry(rq, &ce->signals, signal_link) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
+ list_for_each_entry_rcu(rq, &ce->signals, signal_link)
drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
rq->fence.context, rq->fence.seqno,
i915_request_completed(rq) ? "!" :
i915_request_started(rq) ? "*" :
"",
jiffies_to_msecs(jiffies - rq->emitted_jiffies));
- }
}
- spin_unlock_irq(&b->irq_lock);
+ rcu_read_unlock();
}
void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
index 8e53b9942695..a74bb3062bd8 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
@@ -29,18 +29,16 @@
* the overhead of waking that client is much preferred.
*/
struct intel_breadcrumbs {
- spinlock_t irq_lock; /* protects the lists used in hardirq context */
-
/* Not all breadcrumbs are attached to physical HW */
struct intel_engine_cs *irq_engine;
+ spinlock_t signalers_lock; /* protects the list of signalers */
struct list_head signalers;
- struct list_head signaled_requests;
+ struct llist_head signaled_requests;
+ spinlock_t irq_lock; /* protects the interrupt from hardirq context */
struct irq_work irq_work; /* for use from inside irq_lock */
-
unsigned int irq_enabled;
-
bool irq_armed;
};
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 92a3f25c4006..349e7fa1488d 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -25,11 +25,18 @@ static struct intel_context *intel_context_alloc(void)
return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
}
-void intel_context_free(struct intel_context *ce)
+static void rcu_context_free(struct rcu_head *rcu)
{
+ struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
+
kmem_cache_free(global.slab_ce, ce);
}
+void intel_context_free(struct intel_context *ce)
+{
+ call_rcu(&ce->rcu, rcu_context_free);
+}
+
struct intel_context *
intel_context_create(struct intel_engine_cs *engine)
{
@@ -356,8 +363,7 @@ static int __intel_context_active(struct i915_active *active)
}
void
-intel_context_init(struct intel_context *ce,
- struct intel_engine_cs *engine)
+intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
{
GEM_BUG_ON(!engine->cops);
GEM_BUG_ON(!engine->gt->vm);
@@ -373,7 +379,8 @@ intel_context_init(struct intel_context *ce,
ce->vm = i915_vm_get(engine->gt->vm);
- INIT_LIST_HEAD(&ce->signal_link);
+ /* NB ce->signal_link/lock is used under RCU */
+ spin_lock_init(&ce->signal_lock);
INIT_LIST_HEAD(&ce->signals);
mutex_init(&ce->pin_mutex);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 552cb57a2e8c..52fa9c132746 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -25,6 +25,7 @@ DECLARE_EWMA(runtime, 3, 8);
struct i915_gem_context;
struct i915_gem_ww_ctx;
struct i915_vma;
+struct intel_breadcrumbs;
struct intel_context;
struct intel_ring;
@@ -44,7 +45,16 @@ struct intel_context_ops {
};
struct intel_context {
- struct kref ref;
+ /*
+ * Note: Some fields may be accessed under RCU.
+ *
+ * Unless otherwise noted a field can safely be assumed to be protected
+ * by strong reference counting.
+ */
+ union {
+ struct kref ref; /* no kref_get_unless_zero()! */
+ struct rcu_head rcu;
+ };
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
@@ -54,8 +64,15 @@ struct intel_context {
struct i915_address_space *vm;
struct i915_gem_context __rcu *gem_context;
- struct list_head signal_link;
- struct list_head signals;
+ /*
+ * @signal_lock protects the list of requests that need signaling,
+ * @signals. While there are any requests that need signaling,
+ * we add the context to the breadcrumbs worker, and remove it
+ * upon completion/cancellation of the last request.
+ */
+ struct list_head signal_link; /* Accessed under RCU */
+ struct list_head signals; /* Guarded by signal_lock */
+ spinlock_t signal_lock; /* protects signals, the list of requests */
struct i915_vma *state;
struct intel_ring *ring;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 8a51c1c3a091..7614a3d24fca 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -182,6 +182,7 @@
struct virtual_engine {
struct intel_engine_cs base;
struct intel_context context;
+ struct rcu_work rcu;
/*
* We allow only a single request through the virtual engine at a time
@@ -2837,6 +2838,9 @@ static void __execlists_hold(struct i915_request *rq)
static bool execlists_hold(struct intel_engine_cs *engine,
struct i915_request *rq)
{
+ if (i915_request_on_hold(rq))
+ return false;
+
spin_lock_irq(&engine->active.lock);
if (i915_request_completed(rq)) { /* too late! */
@@ -3220,8 +3224,10 @@ static void execlists_submission_tasklet(unsigned long data)
spin_unlock_irqrestore(&engine->active.lock, flags);
/* Recheck after serialising with direct-submission */
- if (unlikely(timeout && preempt_timeout(engine)))
+ if (unlikely(timeout && preempt_timeout(engine))) {
+ cancel_timer(&engine->execlists.preempt);
execlists_reset(engine, "preemption time out");
+ }
}
}
@@ -5480,44 +5486,90 @@ static struct list_head *virtual_queue(struct virtual_engine *ve)
return &ve->base.execlists.default_priolist.requests[0];
}
-static void virtual_context_destroy(struct kref *kref)
+static void rcu_virtual_context_destroy(struct work_struct *wrk)
{
struct virtual_engine *ve =
- container_of(kref, typeof(*ve), context.ref);
+ container_of(wrk, typeof(*ve), rcu.work);
unsigned int n;
- GEM_BUG_ON(!list_empty(virtual_queue(ve)));
- GEM_BUG_ON(ve->request);
GEM_BUG_ON(ve->context.inflight);
+ /* Preempt-to-busy may leave a stale request behind. */
+ if (unlikely(ve->request)) {
+ struct i915_request *old;
+
+ spin_lock_irq(&ve->base.active.lock);
+
+ old = fetch_and_zero(&ve->request);
+ if (old) {
+ GEM_BUG_ON(!i915_request_completed(old));
+ __i915_request_submit(old);
+ i915_request_put(old);
+ }
+
+ spin_unlock_irq(&ve->base.active.lock);
+ }
+
+ /*
+ * Flush the tasklet in case it is still running on another core.
+ *
+ * This needs to be done before we remove ourselves from the siblings'
+ * rbtrees as in the case it is running in parallel, it may reinsert
+ * the rb_node into a sibling.
+ */
+ tasklet_kill(&ve->base.execlists.tasklet);
+
+ /* Decouple ourselves from the siblings, no more access allowed. */
for (n = 0; n < ve->num_siblings; n++) {
struct intel_engine_cs *sibling = ve->siblings[n];
struct rb_node *node = &ve->nodes[sibling->id].rb;
- unsigned long flags;
if (RB_EMPTY_NODE(node))
continue;
- spin_lock_irqsave(&sibling->active.lock, flags);
+ spin_lock_irq(&sibling->active.lock);
/* Detachment is lazily performed in the execlists tasklet */
if (!RB_EMPTY_NODE(node))
rb_erase_cached(node, &sibling->execlists.virtual);
- spin_unlock_irqrestore(&sibling->active.lock, flags);
+ spin_unlock_irq(&sibling->active.lock);
}
GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
if (ve->context.state)
__execlists_context_fini(&ve->context);
intel_context_fini(&ve->context);
+ intel_breadcrumbs_free(ve->base.breadcrumbs);
intel_engine_free_request_pool(&ve->base);
kfree(ve->bonds);
kfree(ve);
}
+static void virtual_context_destroy(struct kref *kref)
+{
+ struct virtual_engine *ve =
+ container_of(kref, typeof(*ve), context.ref);
+
+ GEM_BUG_ON(!list_empty(&ve->context.signals));
+
+ /*
+ * When destroying the virtual engine, we have to be aware that
+ * it may still be in use from an hardirq/softirq context causing
+ * the resubmission of a completed request (background completion
+ * due to preempt-to-busy). Before we can free the engine, we need
+ * to flush the submission code and tasklets that are still potentially
+ * accessing the engine. Flushing the tasklets requires process context,
+ * and since we can guard the resubmit onto the engine with an RCU read
+ * lock, we can delegate the free of the engine to an RCU worker.
+ */
+ INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
+ queue_rcu_work(system_wq, &ve->rcu);
+}
+
static void virtual_engine_initial_hint(struct virtual_engine *ve)
{
int swp;
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 254873e1646e..ab6870242e18 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -59,8 +59,7 @@ struct drm_i915_mocs_table {
#define _L3_CACHEABILITY(value) ((value) << 4)
/* Helper defines */
-#define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */
-#define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
+#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
/* (e)LLC caching options */
/*
@@ -131,7 +130,19 @@ static const struct drm_i915_mocs_entry skl_mocs_table[] = {
GEN9_MOCS_ENTRIES,
MOCS_ENTRY(I915_MOCS_CACHED,
LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
- L3_3_WB)
+ L3_3_WB),
+
+ /*
+ * mocs:63
+ * - used by the L3 for all of its evictions.
+ * Thus it is expected to allow LLC cacheability to enable coherent
+ * flows to be maintained.
+ * - used to force L3 uncachable cycles.
+ * Thus it is expected to make the surface L3 uncacheable.
+ */
+ MOCS_ENTRY(63,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC)
};
/* NOTE: the LE_TGT_CACHE is not used on Broxton */
@@ -349,15 +360,15 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
if (IS_DG1(i915)) {
table->size = ARRAY_SIZE(dg1_mocs_table);
table->table = dg1_mocs_table;
- table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
} else if (INTEL_GEN(i915) >= 12) {
table->size = ARRAY_SIZE(tgl_mocs_table);
table->table = tgl_mocs_table;
- table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
} else if (IS_GEN(i915, 11)) {
table->size = ARRAY_SIZE(icl_mocs_table);
table->table = icl_mocs_table;
- table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
} else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
table->size = ARRAY_SIZE(skl_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 0d88f17799ff..b629eeb14002 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -897,6 +897,10 @@ void intel_rps_park(struct intel_rps *rps)
adj = -2;
rps->last_adj = adj;
rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
+ if (rps->cur_freq < rps->efficient_freq) {
+ rps->cur_freq = rps->efficient_freq;
+ rps->last_adj = 0;
+ }
GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index fed9503a7c4e..adc9a8ea410a 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -131,8 +131,10 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
return;
}
- if (wal->list)
+ if (wal->list) {
memcpy(list, wal->list, sizeof(*wa) * wal->count);
+ kfree(wal->list);
+ }
wal->list = list;
}
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index f011ea42487e..5982b62f913d 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -73,7 +73,7 @@ void *shmem_pin_map(struct file *file)
mapping_set_unevictable(file->f_mapping);
return vaddr;
err_page:
- while (--i >= 0)
+ while (i--)
put_page(pages[i]);
kvfree(pages);
return NULL;
@@ -103,10 +103,13 @@ static int __shmem_rw(struct file *file, loff_t off,
return PTR_ERR(page);
vaddr = kmap(page);
- if (write)
+ if (write) {
memcpy(vaddr + offset_in_page(off), ptr, this);
- else
+ set_page_dirty(page);
+ } else {
memcpy(ptr, vaddr + offset_in_page(off), this);
+ }
+ mark_page_accessed(page);
kunmap(page);
put_page(page);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 5c1fcac260d3..a15f87539657 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -164,7 +164,7 @@ static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
/* let the virtual display supports DP1.2 */
static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
- 0x12, 0x014, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ 0x12, 0x014, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index d830b6c65284..60f1a386dd06 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -829,8 +829,10 @@ static int intel_vgpu_open(struct mdev_device *mdev)
/* Take a module reference as mdev core doesn't take
* a reference for vendor driver.
*/
- if (!try_module_get(THIS_MODULE))
+ if (!try_module_get(THIS_MODULE)) {
+ ret = -ENODEV;
goto undo_group;
+ }
ret = kvmgt_guest_init(mdev);
if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 1c8e63f84134..e49944fde333 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -439,7 +439,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (IS_BROADWELL(dev_priv))
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
- else
+ /* FixMe: Re-enable APL/BXT once vfio_edid enabled */
+ else if (!IS_BROXTON(dev_priv))
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 15be8debae54..0a3ee4f9dc0a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1579,9 +1579,9 @@ static inline const struct i915_rev_steppings *
tgl_revids_get(struct drm_i915_private *dev_priv)
{
if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv))
- return tgl_uy_revids;
+ return &tgl_uy_revids[INTEL_REVID(dev_priv)];
else
- return tgl_revids;
+ return &tgl_revids[INTEL_REVID(dev_priv)];
}
#define IS_TGL_DISP_REVID(p, since, until) \
@@ -1591,14 +1591,14 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define IS_TGL_UY_GT_REVID(p, since, until) \
((IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_uy_revids->gt_stepping >= (since) && \
- tgl_uy_revids->gt_stepping <= (until))
+ tgl_uy_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
+ tgl_uy_revids[INTEL_REVID(p)].gt_stepping <= (until))
#define IS_TGL_GT_REVID(p, since, until) \
(IS_TIGERLAKE(p) && \
!(IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_revids->gt_stepping >= (since) && \
- tgl_revids->gt_stepping <= (until))
+ tgl_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
+ tgl_revids[INTEL_REVID(p)].gt_stepping <= (until))
#define RKL_REVID_A0 0x0
#define RKL_REVID_B0 0x1
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index dc6febc63f1c..c80eeac53952 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -4242,18 +4242,21 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
*/
dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
- if (HAS_PCH_DG1(dev_priv))
- dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
- else if (INTEL_GEN(dev_priv) >= 11)
- dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
- else if (IS_GEN9_LP(dev_priv))
- dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
- else if (HAS_GMCH(dev_priv) && I915_HAS_HOTPLUG(dev_priv))
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- else
- dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
+ if (HAS_GMCH(dev_priv)) {
+ if (I915_HAS_HOTPLUG(dev_priv))
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else {
+ if (HAS_PCH_DG1(dev_priv))
+ dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
+ else if (INTEL_GEN(dev_priv) >= 11)
+ dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
+ else if (IS_GEN9_LP(dev_priv))
+ dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
+ dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
+ else
+ dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
+ }
}
/**
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 3b12c8ff7182..649c26518d26 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -914,7 +914,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
intel_uncore_rmw(uncore, oastatus_reg,
GEN8_OASTATUS_COUNTER_OVERFLOW |
GEN8_OASTATUS_REPORT_LOST,
- IS_GEN_RANGE(uncore->i915, 8, 10) ?
+ IS_GEN_RANGE(uncore->i915, 8, 11) ?
(GEN8_OASTATUS_HEAD_POINTER_WRAP |
GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
}
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 16b721080195..620b6fab2c5c 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -176,7 +176,9 @@ struct i915_request {
struct intel_context *context;
struct intel_ring *ring;
struct intel_timeline __rcu *timeline;
+
struct list_head signal_link;
+ struct llist_node signal_node;
/*
* The rcu epoch of when this request was allocated. Used to judiciously
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 23a6132c5f4e..412e21604a05 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -211,8 +211,8 @@ static int igt_gem_ww_ctx(void *arg)
return PTR_ERR(obj);
obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
+ if (IS_ERR(obj2)) {
+ err = PTR_ERR(obj2);
goto put1;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 64bbb8288249..e424a6d1a68c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -2293,8 +2293,10 @@ static int perf_request_latency(void *arg)
struct intel_context *ce;
ce = intel_context_create(engine);
- if (IS_ERR(ce))
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
goto out;
+ }
err = intel_context_pin(ce);
if (err) {
@@ -2467,8 +2469,10 @@ static int perf_series_engines(void *arg)
struct intel_context *ce;
ce = intel_context_create(engine);
- if (IS_ERR(ce))
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
goto out;
+ }
err = intel_context_pin(ce);
if (err) {