aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt')
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_engine_cs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.h4
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_renderstate.c20
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderstate.c20
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c15
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c13
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_renderstate.c20
-rw-r--r--drivers/gpu/drm/i915/gt/gen9_renderstate.c20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_param.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c73
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c146
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c102
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c55
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.h22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c105
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.h20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c33
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c330
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c429
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c105
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c51
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.h21
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c7
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine.c3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine.h3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c12
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c17
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c11
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c77
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c29
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.h3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c19
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c40
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.h3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c180
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c101
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c16
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c6
105 files changed, 1304 insertions, 1420 deletions
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c
index 3a21cf63b3f0..591eb60785db 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: MIT
-
/*
* Copyright © 2019 Intel Corporation
*/
@@ -37,6 +36,7 @@ void intel_gt_debugfs_register_files(struct dentry *root,
{
while (count--) {
umode_t mode = files->fops->write ? 0644 : 0444;
+
if (!files->eval || files->eval(data))
debugfs_create_file(files->name,
mode, root, data,
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
index b491a64919c8..9646200d2792 100644
--- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -143,7 +143,7 @@ static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs,
int flush, int post)
{
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH;
diff --git a/drivers/gpu/drm/i915/gt/gen6_engine_cs.c b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
index ce38d1bcaba3..b388ceeeb1c9 100644
--- a/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
@@ -161,7 +161,7 @@ u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_CS_STALL);
- *cs++ = i915_request_active_timeline(rq)->hwsp_offset |
+ *cs++ = i915_request_active_seqno(rq) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = rq->fence.seqno;
@@ -359,7 +359,7 @@ u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL);
- *cs++ = i915_request_active_timeline(rq)->hwsp_offset;
+ *cs++ = i915_request_active_seqno(rq);
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
@@ -374,7 +374,7 @@ u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
{
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
@@ -394,7 +394,7 @@ u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
int i;
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
index 3357228f3304..6a61a5c3a85a 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
@@ -59,9 +59,9 @@ static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
for (iter = gen6_pde_index(start); \
length > 0 && iter < I915_PDES && \
(pt = i915_pt_entry(pd, iter), true); \
- ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
+ ({ u32 temp = ALIGN(start + 1, 1 << GEN6_PDE_SHIFT); \
temp = min(temp - start, length); \
- start += temp, length -= temp; }), ++iter)
+ start += temp; length -= temp; }), ++iter)
#define gen6_for_all_pdes(pt, pd, iter) \
for (iter = 0; \
diff --git a/drivers/gpu/drm/i915/gt/gen6_renderstate.c b/drivers/gpu/drm/i915/gt/gen6_renderstate.c
index 11c8e7b3dd7c..555e83f3a93a 100644
--- a/drivers/gpu/drm/i915/gt/gen6_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/gen6_renderstate.c
@@ -1,25 +1,7 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderstate.c b/drivers/gpu/drm/i915/gt/gen7_renderstate.c
index 655180646152..c36e84d30d24 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderstate.c
@@ -1,25 +1,7 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 07ba524da90b..732c2ed1d933 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -42,7 +42,7 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
vf_flush_wa = true;
/* WaForGAMHang:kbl */
- if (IS_KBL_GT_REVID(rq->engine->i915, 0, KBL_REVID_B0))
+ if (IS_KBL_GT_STEP(rq->engine->i915, 0, STEP_B0))
dc_flush_wa = true;
}
@@ -338,15 +338,14 @@ static u32 preempt_address(struct intel_engine_cs *engine)
static u32 hwsp_offset(const struct i915_request *rq)
{
- const struct intel_timeline_cacheline *cl;
+ const struct intel_timeline *tl;
- /* Before the request is executed, the timeline/cachline is fixed */
+ /* Before the request is executed, the timeline is fixed */
+ tl = rcu_dereference_protected(rq->timeline,
+ !i915_request_signaled(rq));
- cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
- if (cl)
- return cl->ggtt_offset;
-
- return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
+ /* See the comment in i915_request_active_seqno(). */
+ return page_mask_bits(tl->hwsp_offset) + offset_in_page(rq->hwsp_seqno);
}
int gen8_emit_init_breadcrumb(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 755522ced60d..176c19633412 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -5,6 +5,8 @@
#include <linux/log2.h>
+#include "gem/i915_gem_lmem.h"
+
#include "gen8_ppgtt.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
@@ -35,6 +37,9 @@ static u64 gen8_pte_encode(dma_addr_t addr,
if (unlikely(flags & PTE_READ_ONLY))
pte &= ~_PAGE_RW;
+ if (flags & PTE_LM)
+ pte |= GEN12_PPGTT_PTE_LM;
+
switch (level) {
case I915_CACHE_NONE:
pte |= PPAT_UNCACHED;
@@ -145,6 +150,7 @@ static unsigned int gen8_pt_count(u64 start, u64 end)
static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
{
unsigned int shift = __gen8_pte_shift(vm->top);
+
return (vm->total + (1ull << shift) - 1) >> shift;
}
@@ -557,6 +563,7 @@ static void gen8_ppgtt_insert(struct i915_address_space *vm,
static int gen8_init_scratch(struct i915_address_space *vm)
{
+ u32 pte_flags;
int ret;
int i;
@@ -580,9 +587,13 @@ static int gen8_init_scratch(struct i915_address_space *vm)
if (ret)
return ret;
+ pte_flags = vm->has_read_only;
+ if (i915_gem_object_is_lmem(vm->scratch[0]))
+ pte_flags |= PTE_LM;
+
vm->scratch[0]->encode =
gen8_pte_encode(px_dma(vm->scratch[0]),
- I915_CACHE_LLC, vm->has_read_only);
+ I915_CACHE_LLC, pte_flags);
for (i = 1; i <= vm->top; i++) {
struct drm_i915_gem_object *obj;
diff --git a/drivers/gpu/drm/i915/gt/gen8_renderstate.c b/drivers/gpu/drm/i915/gt/gen8_renderstate.c
index 95288a34c15d..ef9d7b0dd2da 100644
--- a/drivers/gpu/drm/i915/gt/gen8_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/gen8_renderstate.c
@@ -1,25 +1,7 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
diff --git a/drivers/gpu/drm/i915/gt/gen9_renderstate.c b/drivers/gpu/drm/i915/gt/gen9_renderstate.c
index 7d3ac02f0177..428b724ded3e 100644
--- a/drivers/gpu/drm/i915/gt/gen9_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/gen9_renderstate.c
@@ -1,25 +1,7 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Generated by: intel-gpu-tools-1.19-177-g68e2eab2
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 34a645d6babd..38cc42783dfb 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2015-2021 Intel Corporation
*/
#include <linux/kthread.h>
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 349e7fa1488d..17cf2640b082 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index d24ab6fa0ee5..f83a73a2b39f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.h b/drivers/gpu/drm/i915/gt/intel_context_param.h
index f053d8633fe2..3ecacc675f41 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_param.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.h
@@ -6,9 +6,18 @@
#ifndef INTEL_CONTEXT_PARAM_H
#define INTEL_CONTEXT_PARAM_H
-struct intel_context;
+#include <linux/types.h>
+
+#include "intel_context.h"
int intel_context_set_ring_size(struct intel_context *ce, long sz);
long intel_context_get_ring_size(struct intel_context *ce);
+static inline int
+intel_context_set_watchdog_us(struct intel_context *ce, u64 timeout_us)
+{
+ ce->watchdog.timeout_us = timeout_us;
+ return 0;
+}
+
#endif /* INTEL_CONTEXT_PARAM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index e10d78601bbd..ed8c447a7346 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
@@ -97,6 +96,10 @@ struct intel_context {
#define CONTEXT_FORCE_SINGLE_SUBMISSION 7
#define CONTEXT_NOPREEMPT 8
+ struct {
+ u64 timeout_us;
+ } watchdog;
+
u32 *lrc_reg_state;
union {
struct {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 9cf555d6842b..efe935f80c1a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#include <drm/drm_print.h>
@@ -619,6 +600,7 @@ static void cleanup_status_page(struct intel_engine_cs *engine)
}
static int pin_ggtt_status_page(struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
struct i915_vma *vma)
{
unsigned int flags;
@@ -639,12 +621,13 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine,
else
flags = PIN_HIGH;
- return i915_ggtt_pin(vma, NULL, 0, flags);
+ return i915_ggtt_pin(vma, ww, 0, flags);
}
static int init_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
+ struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
void *vaddr;
int ret;
@@ -670,30 +653,39 @@ static int init_status_page(struct intel_engine_cs *engine)
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- goto err;
+ goto err_put;
}
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(obj, &ww);
+ if (!ret && !HWS_NEEDS_PHYSICAL(engine->i915))
+ ret = pin_ggtt_status_page(engine, &ww, vma);
+ if (ret)
+ goto err;
+
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
- goto err;
+ goto err_unpin;
}
engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
engine->status_page.vma = vma;
- if (!HWS_NEEDS_PHYSICAL(engine->i915)) {
- ret = pin_ggtt_status_page(engine, vma);
- if (ret)
- goto err_unpin;
- }
-
- return 0;
-
err_unpin:
- i915_gem_object_unpin_map(obj);
+ if (ret)
+ i915_vma_unpin(vma);
err:
- i915_gem_object_put(obj);
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+err_put:
+ if (ret)
+ i915_gem_object_put(obj);
return ret;
}
@@ -763,6 +755,7 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
frame->rq.engine = engine;
frame->rq.context = ce;
rcu_assign_pointer(frame->rq.timeline, ce->timeline);
+ frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno;
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
@@ -1239,14 +1232,14 @@ void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
{
struct tasklet_struct *t = &engine->execlists.tasklet;
- if (!t->func)
+ if (!t->callback)
return;
local_bh_disable();
if (tasklet_trylock(t)) {
/* Must wait for any GPU reset in progress. */
if (__tasklet_is_enabled(t))
- t->func(t->data);
+ t->callback(t);
tasklet_unlock(t);
}
local_bh_enable();
@@ -1273,14 +1266,8 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return true;
/* Waiting to drain ELSP? */
- if (execlists_active(&engine->execlists)) {
- synchronize_hardirq(engine->i915->drm.pdev->irq);
-
- intel_engine_flush_submission(engine);
-
- if (execlists_active(&engine->execlists))
- return false;
- }
+ synchronize_hardirq(to_pci_dev(engine->i915->drm.dev)->irq);
+ intel_engine_flush_submission(engine);
/* ELSP is empty, but there are ready requests? E.g. after reset */
if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index d7be2b9339f9..b99ac41695f3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
@@ -32,7 +31,7 @@ static bool next_heartbeat(struct intel_engine_cs *engine)
delay = msecs_to_jiffies_timeout(delay);
if (delay >= HZ)
delay = round_jiffies_up_relative(delay);
- mod_delayed_work(system_highpri_wq, &engine->heartbeat.work, delay);
+ mod_delayed_work(system_highpri_wq, &engine->heartbeat.work, delay + 1);
return true;
}
@@ -81,9 +80,7 @@ static void show_heartbeat(const struct i915_request *rq,
static void heartbeat(struct work_struct *wrk)
{
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
- };
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MIN };
struct intel_engine_cs *engine =
container_of(wrk, typeof(*engine), heartbeat.work.work);
struct intel_context *ce = engine->kernel_context;
@@ -106,6 +103,13 @@ static void heartbeat(struct work_struct *wrk)
goto out;
if (engine->heartbeat.systole) {
+ long delay = READ_ONCE(engine->props.heartbeat_interval_ms);
+
+ /* Safeguard against too-fast worker invocations */
+ if (!time_after(jiffies,
+ rq->emitted_jiffies + msecs_to_jiffies(delay)))
+ goto out;
+
if (!i915_sw_fence_signaled(&rq->submit)) {
/*
* Not yet submitted, system is stalled.
@@ -125,9 +129,9 @@ static void heartbeat(struct work_struct *wrk)
* low latency and no jitter] the chance to naturally
* complete before being preempted.
*/
- attr.priority = I915_PRIORITY_MASK;
+ attr.priority = 0;
if (rq->sched.attr.priority >= attr.priority)
- attr.priority |= I915_USER_PRIORITY(I915_PRIORITY_HEARTBEAT);
+ attr.priority = I915_PRIORITY_HEARTBEAT;
if (rq->sched.attr.priority >= attr.priority)
attr.priority = I915_PRIORITY_BARRIER;
@@ -143,6 +147,8 @@ static void heartbeat(struct work_struct *wrk)
"stopped heartbeat on %s",
engine->name);
}
+
+ rq->emitted_jiffies = jiffies;
goto out;
}
@@ -279,15 +285,14 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
mutex_unlock(&ce->timeline->mutex);
}
+ intel_engine_flush_submission(engine);
intel_engine_pm_put(engine);
return err;
}
int intel_engine_flush_barriers(struct intel_engine_cs *engine)
{
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
- };
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MIN };
struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
int err;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
index a7b8c0f9e005..a488ea3e84a3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index e67d09259dd0..7c9af86fdb1e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
@@ -27,12 +26,16 @@ static void dbg_poison_ce(struct intel_context *ce)
int type = i915_coherent_map_type(ce->engine->i915);
void *map;
+ if (!i915_gem_object_trylock(obj))
+ return;
+
map = i915_gem_object_pin_map(obj, type);
if (!IS_ERR(map)) {
memset(map, CONTEXT_REDZONE, obj->base.size);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
}
+ i915_gem_object_unlock(obj);
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index 418df0a13145..70ea46d6cfb0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index d2346b425547..883bafc44902 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index da21d2a10cc9..3cca7ea2d6ea 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.h b/drivers/gpu/drm/i915/gt/intel_engine_user.h
index f845ea1cbfaa..3dc7e8ab9fbc 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index ac1be7a632d3..de124870af44 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -274,22 +274,13 @@ static int effective_prio(const struct i915_request *rq)
static int queue_prio(const struct intel_engine_execlists *execlists)
{
- struct i915_priolist *p;
struct rb_node *rb;
rb = rb_first_cached(&execlists->queue);
if (!rb)
return INT_MIN;
- /*
- * As the priolist[] are inverted, with the highest priority in [0],
- * we have to flip the index value to become priority.
- */
- p = to_priolist(rb);
- if (!I915_USER_PRIORITY_SHIFT)
- return p->priority;
-
- return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
+ return to_priolist(rb)->priority;
}
static int virtual_prio(const struct intel_engine_execlists *el)
@@ -470,6 +461,11 @@ static void reset_active(struct i915_request *rq,
ce->lrc.lrca = lrc_update_regs(ce, engine, head);
}
+static bool bad_request(const struct i915_request *rq)
+{
+ return rq->fence.error && i915_request_started(rq);
+}
+
static struct intel_engine_cs *
__execlists_schedule_in(struct i915_request *rq)
{
@@ -482,7 +478,7 @@ __execlists_schedule_in(struct i915_request *rq)
!intel_engine_has_heartbeat(engine)))
intel_context_set_banned(ce);
- if (unlikely(intel_context_is_banned(ce)))
+ if (unlikely(intel_context_is_banned(ce) || bad_request(rq)))
reset_active(rq, engine);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
@@ -752,9 +748,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
{
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
- struct i915_request * const *port, *rq;
+ struct i915_request * const *port, *rq, *prev = NULL;
struct intel_context *ce = NULL;
- bool sentinel = false;
u32 ccid = -1;
trace_ports(execlists, msg, execlists->pending);
@@ -804,15 +799,20 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
* Sentinels are supposed to be the last request so they flush
* the current execution off the HW. Check that they are the only
* request in the pending submission.
+ *
+ * NB: Due to the async nature of preempt-to-busy and request
+ * cancellation we need to handle the case where request
+ * becomes a sentinel in parallel to CSB processing.
*/
- if (sentinel) {
+ if (prev && i915_request_has_sentinel(prev) &&
+ !READ_ONCE(prev->fence.error)) {
GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
engine->name,
ce->timeline->fence_context,
port - execlists->pending);
return false;
}
- sentinel = i915_request_has_sentinel(rq);
+ prev = rq;
/*
* We want virtual requests to only be in the first slot so
@@ -948,7 +948,7 @@ static bool can_merge_rq(const struct i915_request *prev,
if (__i915_request_is_complete(next))
return true;
- if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
+ if (unlikely((i915_request_flags(prev) | i915_request_flags(next)) &
(BIT(I915_FENCE_FLAG_NOPREEMPT) |
BIT(I915_FENCE_FLAG_SENTINEL))))
return false;
@@ -1072,7 +1072,6 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
__i915_request_has_started(w) &&
!__i915_request_is_complete(rq));
- GEM_BUG_ON(i915_request_is_active(w));
if (!i915_request_is_ready(w))
continue;
@@ -1080,6 +1079,7 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
continue;
GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+ GEM_BUG_ON(i915_request_is_active(w));
list_move_tail(&w->sched.link, &list);
}
@@ -1208,7 +1208,7 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
return 0;
/* Force a fast reset for terminated contexts (ignoring sysfs!) */
- if (unlikely(intel_context_is_banned(rq->context)))
+ if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
return 1;
return READ_ONCE(engine->props.preempt_timeout_ms);
@@ -1452,9 +1452,8 @@ unlock:
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
- int i;
- priolist_for_each_request_consume(rq, rn, p, i) {
+ priolist_for_each_request_consume(rq, rn, p) {
bool merge = true;
/*
@@ -2344,9 +2343,10 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine)
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
*/
-static void execlists_submission_tasklet(unsigned long data)
+static void execlists_submission_tasklet(struct tasklet_struct *t)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct intel_engine_cs * const engine =
+ from_tasklet(engine, t, execlists.tasklet);
struct i915_request *post[2 * EXECLIST_MAX_PORTS];
struct i915_request **inactive;
@@ -2457,11 +2457,31 @@ static void execlists_submit_request(struct i915_request *request)
spin_unlock_irqrestore(&engine->active.lock, flags);
}
+static int
+__execlists_context_pre_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww, void **vaddr)
+{
+ int err;
+
+ err = lrc_pre_pin(ce, engine, ww, vaddr);
+ if (err)
+ return err;
+
+ if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags)) {
+ lrc_init_state(ce, engine, *vaddr);
+
+ __i915_gem_object_flush_map(ce->state->obj, 0, engine->context_size);
+ }
+
+ return 0;
+}
+
static int execlists_context_pre_pin(struct intel_context *ce,
struct i915_gem_ww_ctx *ww,
void **vaddr)
{
- return lrc_pre_pin(ce, ce->engine, ww, vaddr);
+ return __execlists_context_pre_pin(ce, ce->engine, ww, vaddr);
}
static int execlists_context_pin(struct intel_context *ce, void *vaddr)
@@ -2729,31 +2749,11 @@ static void enable_execlists(struct intel_engine_cs *engine)
enable_error_interrupt(engine);
}
-static bool unexpected_starting_state(struct intel_engine_cs *engine)
-{
- bool unexpected = false;
-
- if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
- drm_dbg(&engine->i915->drm,
- "STOP_RING still set in RING_MI_MODE\n");
- unexpected = true;
- }
-
- return unexpected;
-}
-
static int execlists_resume(struct intel_engine_cs *engine)
{
intel_mocs_init_engine(engine);
-
intel_breadcrumbs_reset(engine->breadcrumbs);
- if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
- struct drm_printer p = drm_debug_printer(__func__);
-
- intel_engine_dump(engine, &p, NULL);
- }
-
enable_execlists(engine);
return 0;
@@ -2924,9 +2924,10 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
rcu_read_unlock();
}
-static void nop_submission_tasklet(unsigned long data)
+static void nop_submission_tasklet(struct tasklet_struct *t)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct intel_engine_cs * const engine =
+ from_tasklet(engine, t, execlists.tasklet);
/* The driver is wedged; don't process any more events. */
WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
@@ -2962,17 +2963,18 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link)
- i915_request_mark_eio(rq);
+ i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(engine);
/* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
- int i;
- priolist_for_each_request_consume(rq, rn, p, i) {
- i915_request_mark_eio(rq);
- __i915_request_submit(rq);
+ priolist_for_each_request_consume(rq, rn, p) {
+ if (i915_request_mark_eio(rq)) {
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+ }
}
rb_erase_cached(&p->node, &execlists->queue);
@@ -2981,7 +2983,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
/* On-hold requests will be flushed to timeline upon their release */
list_for_each_entry(rq, &engine->active.hold, sched.link)
- i915_request_mark_eio(rq);
+ i915_request_put(i915_request_mark_eio(rq));
/* Cancel all attached virtual engines */
while ((rb = rb_first_cached(&execlists->virtual))) {
@@ -2994,10 +2996,11 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
spin_lock(&ve->base.active.lock);
rq = fetch_and_zero(&ve->request);
if (rq) {
- i915_request_mark_eio(rq);
-
- rq->engine = engine;
- __i915_request_submit(rq);
+ if (i915_request_mark_eio(rq)) {
+ rq->engine = engine;
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+ }
i915_request_put(rq);
ve->base.execlists.queue_priority_hint = INT_MIN;
@@ -3011,7 +3014,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
execlists->queue = RB_ROOT_CACHED;
GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
- execlists->tasklet.func = nop_submission_tasklet;
+ execlists->tasklet.callback = nop_submission_tasklet;
spin_unlock_irqrestore(&engine->active.lock, flags);
rcu_read_unlock();
@@ -3072,7 +3075,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
engine->schedule = i915_schedule;
- engine->execlists.tasklet.func = execlists_submission_tasklet;
+ engine->execlists.tasklet.callback = execlists_submission_tasklet;
engine->reset.prepare = execlists_reset_prepare;
engine->reset.rewind = execlists_reset_rewind;
@@ -3119,7 +3122,7 @@ static void execlists_release(struct intel_engine_cs *engine)
static void
logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
- /* Default vfuncs which can be overriden by each engine. */
+ /* Default vfuncs which can be overridden by each engine. */
engine->resume = execlists_resume;
@@ -3195,8 +3198,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
struct intel_uncore *uncore = engine->uncore;
u32 base = engine->mmio_base;
- tasklet_init(&engine->execlists.tasklet,
- execlists_submission_tasklet, (unsigned long)engine);
+ tasklet_setup(&engine->execlists.tasklet, execlists_submission_tasklet);
timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
@@ -3244,7 +3246,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
static struct list_head *virtual_queue(struct virtual_engine *ve)
{
- return &ve->base.execlists.default_priolist.requests[0];
+ return &ve->base.execlists.default_priolist.requests;
}
static void rcu_virtual_context_destroy(struct work_struct *wrk)
@@ -3360,13 +3362,13 @@ static int virtual_context_alloc(struct intel_context *ce)
}
static int virtual_context_pre_pin(struct intel_context *ce,
- struct i915_gem_ww_ctx *ww,
- void **vaddr)
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
{
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- /* Note: we must use a real engine class for setting up reg state */
- return lrc_pre_pin(ce, ve->siblings[0], ww, vaddr);
+ /* Note: we must use a real engine class for setting up reg state */
+ return __execlists_context_pre_pin(ce, ve->siblings[0], ww, vaddr);
}
static int virtual_context_pin(struct intel_context *ce, void *vaddr)
@@ -3438,9 +3440,10 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
return mask;
}
-static void virtual_submission_tasklet(unsigned long data)
+static void virtual_submission_tasklet(struct tasklet_struct *t)
{
- struct virtual_engine * const ve = (struct virtual_engine *)data;
+ struct virtual_engine * const ve =
+ from_tasklet(ve, t, base.execlists.tasklet);
const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
intel_engine_mask_t mask;
unsigned int n;
@@ -3650,9 +3653,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
INIT_LIST_HEAD(virtual_queue(ve));
ve->base.execlists.queue_priority_hint = INT_MIN;
- tasklet_init(&ve->base.execlists.tasklet,
- virtual_submission_tasklet,
- (unsigned long)ve);
+ tasklet_setup(&ve->base.execlists.tasklet, virtual_submission_tasklet);
intel_context_init(&ve->context, &ve->base);
@@ -3680,7 +3681,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
* layering if we handle cloning of the requests and
* submitting a copy into each backend.
*/
- if (sibling->execlists.tasklet.func !=
+ if (sibling->execlists.tasklet.callback !=
execlists_submission_tasklet) {
err = -ENODEV;
goto err_put;
@@ -3840,9 +3841,8 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
count = 0;
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
- int i;
- priolist_for_each_request(rq, p, i) {
+ priolist_for_each_request(rq, p) {
if (count++ < max - 1)
show_request(m, rq, "\t\t", 0);
else
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
index a8fd7adefd82..fd61dae820e9 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_EXECLISTS_SUBMISSION_H__
#define __INTEL_EXECLISTS_SUBMISSION_H__
+#include <linux/llist.h>
#include <linux/types.h>
struct drm_printer;
@@ -13,6 +14,7 @@ struct drm_printer;
struct i915_request;
struct intel_context;
struct intel_engine_cs;
+struct intel_gt;
enum {
INTEL_CONTEXT_SCHEDULE_IN = 0,
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 700588bc9d57..670c1271e7d5 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -10,6 +10,8 @@
#include <drm/i915_drm.h>
+#include "gem/i915_gem_lmem.h"
+
#include "intel_gt.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
@@ -92,7 +94,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915)
}
/*
- * Certain Gen5 chipsets require require idling the GPU before
+ * Certain Gen5 chipsets require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
static bool needs_idle_maps(struct drm_i915_private *i915)
@@ -189,7 +191,12 @@ static u64 gen8_ggtt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
{
- return addr | _PAGE_PRESENT;
+ gen8_pte_t pte = addr | _PAGE_PRESENT;
+
+ if (flags & PTE_LM)
+ pte |= GEN12_GGTT_PTE_LM;
+
+ return pte;
}
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
@@ -201,13 +208,13 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
enum i915_cache_level level,
- u32 unused)
+ u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
- gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, 0));
+ gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
ggtt->invalidate(ggtt);
}
@@ -217,7 +224,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level level,
u32 flags)
{
- const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
+ const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen8_pte_t __iomem *gte;
gen8_pte_t __iomem *end;
@@ -459,6 +466,8 @@ static void ggtt_bind_vma(struct i915_address_space *vm,
pte_flags = 0;
if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
+ if (i915_gem_object_is_lmem(obj))
+ pte_flags |= PTE_LM;
vm->insert_entries(vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@@ -647,7 +656,9 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
if (err)
goto err_ppgtt;
+ i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ i915_gem_object_unlock(ppgtt->vm.scratch[0]);
if (err)
goto err_stash;
@@ -734,6 +745,7 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
mutex_unlock(&ggtt->vm.mutex);
i915_address_space_fini(&ggtt->vm);
+ dma_resv_fini(&ggtt->vm.resv);
arch_phys_wc_del(ggtt->mtrr);
@@ -792,8 +804,9 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
phys_addr_t phys_addr;
+ u32 pte_flags;
int ret;
/* For Modern GENs the PTEs and register space are split in the BAR */
@@ -823,9 +836,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return ret;
}
+ pte_flags = 0;
+ if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
+ pte_flags |= PTE_LM;
+
ggtt->vm.scratch[0]->encode =
ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
- I915_CACHE_NONE, 0);
+ I915_CACHE_NONE, pte_flags);
return 0;
}
@@ -862,7 +879,7 @@ static struct resource pci_resource(struct pci_dev *pdev, int bar)
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
unsigned int size;
u16 snb_gmch_ctl;
@@ -1006,7 +1023,7 @@ static u64 iris_pte_encode(dma_addr_t addr,
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
unsigned int size;
u16 snb_gmch_ctl;
@@ -1069,7 +1086,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
phys_addr_t gmadr_base;
int ret;
- ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
+ ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
if (!ret) {
drm_err(&i915->drm, "failed to set up gmch\n");
return -EIO;
@@ -1114,7 +1131,8 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
ggtt->vm.gt = gt;
ggtt->vm.i915 = i915;
- ggtt->vm.dma = &i915->drm.pdev->dev;
+ ggtt->vm.dma = i915->drm.dev;
+ dma_resv_init(&ggtt->vm.resv);
if (INTEL_GEN(i915) <= 5)
ret = i915_gmch_probe(ggtt);
@@ -1122,8 +1140,10 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
ret = gen6_gmch_probe(ggtt);
else
ret = gen8_gmch_probe(ggtt);
- if (ret)
+ if (ret) {
+ dma_resv_fini(&ggtt->vm.resv);
return ret;
+ }
if ((ggtt->vm.total - 1) >> 32) {
drm_err(&i915->drm,
@@ -1247,14 +1267,16 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
static struct scatterlist *
rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
unsigned int width, unsigned int height,
- unsigned int stride,
+ unsigned int src_stride, unsigned int dst_stride,
struct sg_table *st, struct scatterlist *sg)
{
unsigned int column, row;
unsigned int src_idx;
for (column = 0; column < width; column++) {
- src_idx = stride * (height - 1) + column + offset;
+ unsigned int left;
+
+ src_idx = src_stride * (height - 1) + column + offset;
for (row = 0; row < height; row++) {
st->nents++;
/*
@@ -1267,8 +1289,25 @@ rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
i915_gem_object_get_dma_address(obj, src_idx);
sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
sg = sg_next(sg);
- src_idx -= stride;
+ src_idx -= src_stride;
}
+
+ left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
+
+ if (!left)
+ continue;
+
+ st->nents++;
+
+ /*
+ * The DE ignores the PTEs for the padding tiles, the sg entry
+ * here is just a conenience to indicate how many padding PTEs
+ * to insert at this spot.
+ */
+ sg_set_page(sg, NULL, left, 0);
+ sg_dma_address(sg) = 0;
+ sg_dma_len(sg) = left;
+ sg = sg_next(sg);
}
return sg;
@@ -1297,11 +1336,12 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
st->nents = 0;
sg = st->sgl;
- for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
+ for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
sg = rotate_pages(obj, rot_info->plane[i].offset,
rot_info->plane[i].width, rot_info->plane[i].height,
- rot_info->plane[i].stride, st, sg);
- }
+ rot_info->plane[i].src_stride,
+ rot_info->plane[i].dst_stride,
+ st, sg);
return st;
@@ -1319,7 +1359,7 @@ err_st_alloc:
static struct scatterlist *
remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
unsigned int width, unsigned int height,
- unsigned int stride,
+ unsigned int src_stride, unsigned int dst_stride,
struct sg_table *st, struct scatterlist *sg)
{
unsigned int row;
@@ -1352,7 +1392,24 @@ remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
left -= length;
}
- offset += stride - width;
+ offset += src_stride - width;
+
+ left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
+
+ if (!left)
+ continue;
+
+ st->nents++;
+
+ /*
+ * The DE ignores the PTEs for the padding tiles, the sg entry
+ * here is just a conenience to indicate how many padding PTEs
+ * to insert at this spot.
+ */
+ sg_set_page(sg, NULL, left, 0);
+ sg_dma_address(sg) = 0;
+ sg_dma_len(sg) = left;
+ sg = sg_next(sg);
}
return sg;
@@ -1384,7 +1441,8 @@ intel_remap_pages(struct intel_remapped_info *rem_info,
for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
sg = remap_pages(obj, rem_info->plane[i].offset,
rem_info->plane[i].width, rem_info->plane[i].height,
- rem_info->plane[i].stride, st, sg);
+ rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride,
+ st, sg);
}
i915_sg_trim(st);
@@ -1420,7 +1478,7 @@ intel_partial_pages(const struct i915_ggtt_view *view,
if (ret)
goto err_sg_alloc;
- iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
+ iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset, true);
GEM_BUG_ON(!iter);
sg = st->sgl;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 67de2b189598..e72b7a0dc316 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -1,24 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2008-2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
*/
#include "i915_drv.h"
@@ -609,6 +591,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
}
} else {
u32 dimm_c0, dimm_c1;
+
dimm_c0 = intel_uncore_read(uncore, MAD_DIMM_C0);
dimm_c1 = intel_uncore_read(uncore, MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
@@ -798,10 +781,12 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
i = 0;
for_each_sgt_page(page, sgt_iter, pages) {
char new_bit_17 = page_to_phys(page) >> 17;
+
if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
swizzle_page(page);
set_page_dirty(page);
}
+
i++;
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
index 9eef679e1311..25340be5ecf0 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#ifndef __INTEL_GGTT_FENCING_H__
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 534e435f20bc..14e2ffb6c0e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT*/
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright � 2003-2018 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index d8e1ab412634..8d77dcbad059 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -4,6 +4,8 @@
*/
#include "debugfs_gt.h"
+
+#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_gt.h"
@@ -29,6 +31,9 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
+ init_llist_head(&gt->watchdog.list);
+ INIT_WORK(&gt->watchdog.work, intel_gt_watchdog_work);
+
intel_gt_init_buffer_pool(gt);
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
@@ -39,6 +44,42 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
intel_uc_init_early(&gt->uc);
}
+int intel_gt_probe_lmem(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_memory_region *mem;
+ int id;
+ int err;
+
+ mem = intel_gt_setup_lmem(gt);
+ if (mem == ERR_PTR(-ENODEV))
+ mem = intel_gt_setup_fake_lmem(gt);
+ if (IS_ERR(mem)) {
+ err = PTR_ERR(mem);
+ if (err == -ENODEV)
+ return 0;
+
+ drm_err(&i915->drm,
+ "Failed to setup region(%d) type=%d\n",
+ err, INTEL_MEMORY_LOCAL);
+ return err;
+ }
+
+ id = INTEL_REGION_LMEM;
+
+ mem->id = id;
+ mem->type = INTEL_MEMORY_LOCAL;
+ mem->instance = 0;
+
+ intel_memory_region_set_name(mem, "local%u", mem->instance);
+
+ GEM_BUG_ON(!HAS_REGION(i915, id));
+ GEM_BUG_ON(i915->mm.regions[id]);
+ i915->mm.regions[id] = mem;
+
+ return 0;
+}
+
void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
{
gt->ggtt = ggtt;
@@ -344,11 +385,13 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
struct i915_vma *vma;
int ret;
- obj = i915_gem_object_create_stolen(i915, size);
+ obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate scratch page\n");
+ drm_err(&i915->drm, "Failed to allocate scratch page\n");
return PTR_ERR(obj);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 9157c7411f60..7ec395cace69 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -36,6 +36,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
+int intel_gt_probe_lmem(struct intel_gt *gt);
int intel_gt_init_mmio(struct intel_gt *gt);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
int intel_gt_init(struct intel_gt *gt);
@@ -77,4 +78,6 @@ static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p);
+void intel_gt_watchdog_work(struct work_struct *work);
+
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 06d84cf09570..c59468107598 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -98,28 +98,6 @@ static void pool_free_work(struct work_struct *wrk)
round_jiffies_up_relative(HZ));
}
-static int pool_active(struct i915_active *ref)
-{
- struct intel_gt_buffer_pool_node *node =
- container_of(ref, typeof(*node), active);
- struct dma_resv *resv = node->obj->base.resv;
- int err;
-
- if (dma_resv_trylock(resv)) {
- dma_resv_add_excl_fence(resv, NULL);
- dma_resv_unlock(resv);
- }
-
- err = i915_gem_object_pin_pages(node->obj);
- if (err)
- return err;
-
- /* Hide this pinned object from the shrinker until retired */
- i915_gem_object_make_unshrinkable(node->obj);
-
- return 0;
-}
-
__i915_active_call
static void pool_retire(struct i915_active *ref)
{
@@ -129,10 +107,13 @@ static void pool_retire(struct i915_active *ref)
struct list_head *list = bucket_for_size(pool, node->obj->base.size);
unsigned long flags;
- i915_gem_object_unpin_pages(node->obj);
+ if (node->pinned) {
+ i915_gem_object_unpin_pages(node->obj);
- /* Return this object to the shrinker pool */
- i915_gem_object_make_purgeable(node->obj);
+ /* Return this object to the shrinker pool */
+ i915_gem_object_make_purgeable(node->obj);
+ node->pinned = false;
+ }
GEM_BUG_ON(node->age);
spin_lock_irqsave(&pool->lock, flags);
@@ -144,6 +125,19 @@ static void pool_retire(struct i915_active *ref)
round_jiffies_up_relative(HZ));
}
+void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node)
+{
+ assert_object_held(node->obj);
+
+ if (node->pinned)
+ return;
+
+ __i915_gem_object_pin_pages(node->obj);
+ /* Hide this pinned object from the shrinker until retired */
+ i915_gem_object_make_unshrinkable(node->obj);
+ node->pinned = true;
+}
+
static struct intel_gt_buffer_pool_node *
node_create(struct intel_gt_buffer_pool *pool, size_t sz,
enum i915_map_type type)
@@ -159,7 +153,8 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz,
node->age = 0;
node->pool = pool;
- i915_active_init(&node->active, pool_active, pool_retire);
+ node->pinned = false;
+ i915_active_init(&node->active, NULL, pool_retire);
obj = i915_gem_object_create_internal(gt->i915, sz);
if (IS_ERR(obj)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
index 6068f8f1762e..487b8a5520f1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
@@ -18,10 +18,15 @@ struct intel_gt_buffer_pool_node *
intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
enum i915_map_type type);
+void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node);
+
static inline int
intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
struct i915_request *rq)
{
+ /* did we call mark_used? */
+ GEM_WARN_ON(!node->pinned);
+
return i915_active_add_request(&node->active, rq);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
index d8d82c890da8..df1d75d08cd2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2018 Intel Corporation
*/
@@ -31,6 +30,7 @@ struct intel_gt_buffer_pool_node {
};
unsigned long age;
enum i915_map_type type;
+ u32 pinned;
};
#endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index a4242ca8dcd7..582fcaee11aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -165,7 +165,6 @@ void intel_gt_init_clock_frequency(struct intel_gt *gt)
gt->clock_period_ns,
div_u64(mul_u32_u32(gt->clock_period_ns, S32_MAX),
USEC_PER_SEC));
-
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 9830342aa6f4..9fc6c912a4e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
index 886c5cf408a2..f667e976fb2b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index c94e8ac884eb..aef3084e8b16 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 63846a856e7e..d0588d8aaa44 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
index babe866126d7..811a11ed181c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
index b29816a04809..ff766966d6fc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index dc06c78c9eeb..647eca9d867a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
@@ -9,6 +8,7 @@
#include "i915_drv.h" /* for_each_engine() */
#include "i915_request.h"
#include "intel_engine_heartbeat.h"
+#include "intel_execlists_submission.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
@@ -243,4 +243,31 @@ void intel_gt_fini_requests(struct intel_gt *gt)
{
/* Wait until the work is marked as finished before unloading! */
cancel_delayed_work_sync(&gt->requests.retire_work);
+
+ flush_work(&gt->watchdog.work);
+}
+
+void intel_gt_watchdog_work(struct work_struct *work)
+{
+ struct intel_gt *gt =
+ container_of(work, typeof(*gt), watchdog.work);
+ struct i915_request *rq, *rn;
+ struct llist_node *first;
+
+ first = llist_del_all(&gt->watchdog.list);
+ if (!first)
+ return;
+
+ llist_for_each_entry_safe(rq, rn, first, watchdog.link) {
+ if (!i915_request_completed(rq)) {
+ struct dma_fence *f = &rq->fence;
+
+ pr_notice("Fence expiration time out i915-%s:%s:%llx!\n",
+ f->ops->get_driver_name(f),
+ f->ops->get_timeline_name(f),
+ f->seqno);
+ i915_request_cancel(rq, -EINTR);
+ }
+ i915_request_put(rq);
+ }
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
index dbac53baf1cb..fcc30a6e4fe9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index a83d3e18254d..0caf6ca0a784 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -8,10 +8,12 @@
#include <linux/ktime.h>
#include <linux/list.h>
+#include <linux/llist.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include "uc/intel_uc.h"
@@ -39,10 +41,6 @@ struct intel_gt {
struct intel_gt_timelines {
spinlock_t lock; /* protects active_list */
struct list_head active_list;
-
- /* Pack multiple timelines' seqnos into the same page */
- spinlock_t hwsp_lock;
- struct list_head hwsp_free_list;
} timelines;
struct intel_gt_requests {
@@ -56,6 +54,11 @@ struct intel_gt {
struct delayed_work retire_work;
} requests;
+ struct {
+ struct llist_head list;
+ struct work_struct work;
+ } watchdog;
+
struct intel_wakeref wakeref;
atomic_t user_wakeref;
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 04aa6601e984..941f8af016d6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -13,16 +13,36 @@
struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
{
+ struct drm_i915_gem_object *obj;
+
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
- return i915_gem_object_create_internal(vm->i915, sz);
+ obj = i915_gem_object_create_internal(vm->i915, sz);
+ /* ensure all dma objects have the same reservation class */
+ if (!IS_ERR(obj))
+ obj->base.resv = &vm->resv;
+ return obj;
}
int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
int err;
+ i915_gem_object_lock(obj, NULL);
+ err = i915_gem_object_pin_pages(obj);
+ i915_gem_object_unlock(obj);
+ if (err)
+ return err;
+
+ i915_gem_object_make_unshrinkable(obj);
+ return 0;
+}
+
+int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+{
+ int err;
+
err = i915_gem_object_pin_pages(obj);
if (err)
return err;
@@ -56,6 +76,20 @@ void __i915_vm_close(struct i915_address_space *vm)
mutex_unlock(&vm->mutex);
}
+/* lock the vm into the current ww, if we lock one, we lock all */
+int i915_vm_lock_objects(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww)
+{
+ if (vm->scratch[0]->base.resv == &vm->resv) {
+ return i915_gem_object_lock(vm->scratch[0], ww);
+ } else {
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+ /* We borrowed the scratch page from ggtt, take the top level object */
+ return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
+ }
+}
+
void i915_address_space_fini(struct i915_address_space *vm)
{
drm_mm_takedown(&vm->mm);
@@ -69,6 +103,7 @@ static void __i915_vm_release(struct work_struct *work)
vm->cleanup(vm);
i915_address_space_fini(vm);
+ dma_resv_fini(&vm->resv);
kfree(vm);
}
@@ -98,6 +133,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
mutex_init(&vm->mutex);
lockdep_set_subclass(&vm->mutex, subclass);
i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
+ dma_resv_init(&vm->resv);
GEM_BUG_ON(!vm->total);
drm_mm_init(&vm->mm, 0, vm->total);
@@ -427,7 +463,6 @@ __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- int err;
obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
if (IS_ERR(obj))
@@ -441,6 +476,19 @@ __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
return vma;
}
+ return vma;
+}
+
+struct i915_vma *
+__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
+{
+ struct i915_vma *vma;
+ int err;
+
+ vma = __vm_create_scratch_for_read(vm, size);
+ if (IS_ERR(vma))
+ return vma;
+
err = i915_vma_pin(vma, 0, 0,
i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
if (err) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 29c10fde8ce3..e67e34e17913 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -85,6 +85,10 @@ typedef u64 gen8_pte_t;
#define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2)
#define BYT_PTE_WRITEABLE REG_BIT(1)
+#define GEN12_PPGTT_PTE_LM BIT_ULL(11)
+
+#define GEN12_GGTT_PTE_LM BIT_ULL(1)
+
/*
* Cacheability Control is a 4-bit value. The low three bits are stored in bits
* 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
@@ -238,6 +242,7 @@ struct i915_address_space {
atomic_t open;
struct mutex mutex; /* protects vma and our lists */
+ struct dma_resv resv; /* reservation lock for all pd objects, and buffer pool */
#define VM_CLASS_GGTT 0
#define VM_CLASS_PPGTT 1
@@ -264,6 +269,7 @@ struct i915_address_space {
enum i915_cache_level level,
u32 flags); /* Create a valid PTE */
#define PTE_READ_ONLY BIT(0)
+#define PTE_LM BIT(1)
void (*allocate_va_range)(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
@@ -346,6 +352,9 @@ struct i915_ppgtt {
#define i915_is_ggtt(vm) ((vm)->is_ggtt)
+int __must_check
+i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
+
static inline bool
i915_vm_is_4lvl(const struct i915_address_space *vm)
{
@@ -522,6 +531,7 @@ struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
struct i915_page_directory *__alloc_pd(int npde);
int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
+int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
void free_px(struct i915_address_space *vm,
struct i915_page_table *pt, int lvl);
@@ -576,6 +586,9 @@ void i915_vm_free_pt_stash(struct i915_address_space *vm,
struct i915_vma *
__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
+struct i915_vma *
+__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
+
static inline struct sgt_dma {
struct scatterlist *sg;
dma_addr_t dma, max;
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index e3f637b3650e..075d741644ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.h b/drivers/gpu/drm/i915/gt/intel_llc.h
index ef09a890d2b7..0e2e3871c919 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.h
+++ b/drivers/gpu/drm/i915/gt/intel_llc.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_llc_types.h b/drivers/gpu/drm/i915/gt/intel_llc_types.h
index ecad4687b930..ca5bdf166a23 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_llc_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 94f485b591af..e86897cde984 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -3,6 +3,8 @@
* Copyright © 2014 Intel Corporation
*/
+#include "gem/i915_gem_lmem.h"
+
#include "gen8_engine_cs.h"
#include "i915_drv.h"
#include "i915_perf.h"
@@ -808,7 +810,9 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
context_size += PAGE_SIZE;
}
- obj = i915_gem_object_create_shmem(engine->i915, context_size);
+ obj = i915_gem_object_create_lmem(engine->i915, context_size, 0);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_shmem(engine->i915, context_size);
if (IS_ERR(obj))
return ERR_CAST(obj);
@@ -1417,7 +1421,7 @@ gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
#define CTX_WA_BB_SIZE (PAGE_SIZE)
-static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
+static int lrc_create_wa_ctx(struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -1433,10 +1437,6 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
goto err;
}
- err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
- if (err)
- goto err;
-
engine->wa_ctx.vma = vma;
return 0;
@@ -1448,9 +1448,6 @@ err:
void lrc_fini_wa_ctx(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
-
- /* Called on error unwind, clear all flags to prevent further use */
- memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx));
}
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
@@ -1462,6 +1459,7 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine)
&wa_ctx->indirect_ctx, &wa_ctx->per_ctx
};
wa_bb_func_t wa_bb_fn[ARRAY_SIZE(wa_bb)];
+ struct i915_gem_ww_ctx ww;
void *batch, *batch_ptr;
unsigned int i;
int err;
@@ -1490,7 +1488,7 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine)
return;
}
- err = lrc_setup_wa_ctx(engine);
+ err = lrc_create_wa_ctx(engine);
if (err) {
/*
* We continue even if we fail to initialize WA batch
@@ -1503,7 +1501,22 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine)
return;
}
+ if (!engine->wa_ctx.vma)
+ return;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(wa_ctx->vma->obj, &ww);
+ if (!err)
+ err = i915_ggtt_pin(wa_ctx->vma, &ww, 0, PIN_HIGH);
+ if (err)
+ goto err;
+
batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto err_unpin;
+ }
/*
* Emit the two workaround batch buffers, recording the offset from the
@@ -1528,8 +1541,26 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine)
__i915_gem_object_release_map(wa_ctx->vma->obj);
/* Verify that we can handle failure to setup the wa_ctx */
- if (err || i915_inject_probe_error(engine->i915, -ENODEV))
- lrc_fini_wa_ctx(engine);
+ if (!err)
+ err = i915_inject_probe_error(engine->i915, -ENODEV);
+
+err_unpin:
+ if (err)
+ i915_vma_unpin(wa_ctx->vma);
+err:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ if (err) {
+ i915_vma_put(engine->wa_ctx.vma);
+
+ /* Clear all flags to prevent further use */
+ memset(wa_ctx, 0, sizeof(*wa_ctx));
+ }
}
static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 65fe76738335..41e5350a7a05 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2018 Intel Corporation
*/
@@ -40,7 +39,7 @@
#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
u32 *reg_state__ = (reg_state); \
- const u64 addr__ = px_dma(ppgtt->pd); \
+ const u64 addr__ = px_dma((ppgtt)->pd); \
(reg_state__)[CTX_PDP0_UDW] = upper_32_bits(addr__); \
(reg_state__)[CTX_PDP0_LDW] = lower_32_bits(addr__); \
} while (0)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 8acb84960cd0..b14138fd505c 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -1,23 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions: *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright © 2015 Intel Corporation
*/
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index 83371f3e6ba1..d83274f5163b 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -1,24 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright © 2015 Intel Corporation
*/
#ifndef INTEL_MOCS_H
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 96b85a10ef33..014ae8ac4480 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -5,6 +5,8 @@
#include <linux/slab.h>
+#include "gem/i915_gem_lmem.h"
+
#include "i915_trace.h"
#include "intel_gtt.h"
#include "gen6_ppgtt.h"
@@ -192,6 +194,8 @@ void ppgtt_bind_vma(struct i915_address_space *vm,
pte_flags = 0;
if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
+ if (i915_gem_object_is_lmem(vma->obj))
+ pte_flags |= PTE_LM;
vm->insert_entries(vm, vma, cache_level, pte_flags);
wmb();
@@ -262,7 +266,7 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm,
for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
for (pt = stash->pt[n]; pt; pt = pt->stash) {
- err = pin_pt_dma(vm, pt->base);
+ err = pin_pt_dma_locked(vm, pt->base);
if (err)
return err;
}
@@ -301,9 +305,10 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
ppgtt->vm.gt = gt;
ppgtt->vm.i915 = i915;
- ppgtt->vm.dma = &i915->drm.pdev->dev;
+ ppgtt->vm.dma = i915->drm.dev;
ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+ dma_resv_init(&ppgtt->vm.resv);
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 35504c97f11d..3b7e62debe7e 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
@@ -176,7 +175,6 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
/* 3a: Enable RC6 */
set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
-
rc6->ctl_enable =
GEN6_RC_CTL_HW_ENABLE |
GEN6_RC_CTL_RC6_ENABLE |
@@ -485,14 +483,14 @@ static bool rc6_supported(struct intel_rc6 *rc6)
static void rpm_get(struct intel_rc6 *rc6)
{
GEM_BUG_ON(rc6->wakeref);
- pm_runtime_get_sync(&rc6_to_i915(rc6)->drm.pdev->dev);
+ pm_runtime_get_sync(rc6_to_i915(rc6)->drm.dev);
rc6->wakeref = true;
}
static void rpm_put(struct intel_rc6 *rc6)
{
GEM_BUG_ON(!rc6->wakeref);
- pm_runtime_put(&rc6_to_i915(rc6)->drm.pdev->dev);
+ pm_runtime_put(rc6_to_i915(rc6)->drm.dev);
rc6->wakeref = false;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h
index 9f0f23fca8af..e119ec4a0bcc 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.h
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
index bfbb623f7a4f..e747492b2f46 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 60393ce5614d..be6f2c8f5184 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -26,12 +26,12 @@ static int init_fake_lmem_bar(struct intel_memory_region *mem)
if (ret)
return ret;
- mem->remap_addr = dma_map_resource(&i915->drm.pdev->dev,
+ mem->remap_addr = dma_map_resource(i915->drm.dev,
mem->region.start,
mem->fake_mappable.size,
PCI_DMA_BIDIRECTIONAL,
DMA_ATTR_FORCE_CONTIGUOUS);
- if (dma_mapping_error(&i915->drm.pdev->dev, mem->remap_addr)) {
+ if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) {
drm_mm_remove_node(&mem->fake_mappable);
return -EINVAL;
}
@@ -56,7 +56,7 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem)
drm_mm_remove_node(&mem->fake_mappable);
- dma_unmap_resource(&mem->i915->drm.pdev->dev,
+ dma_unmap_resource(mem->i915->drm.dev,
mem->remap_addr,
mem->fake_mappable.size,
PCI_DMA_BIDIRECTIONAL,
@@ -90,8 +90,6 @@ region_lmem_init(struct intel_memory_region *mem)
if (ret)
io_mapping_fini(&mem->iomap);
- intel_memory_region_set_name(mem, "local");
-
return ret;
}
@@ -102,20 +100,26 @@ static const struct intel_memory_region_ops intel_region_lmem_ops = {
};
struct intel_memory_region *
-intel_setup_fake_lmem(struct drm_i915_private *i915)
+intel_gt_setup_fake_lmem(struct intel_gt *gt)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct drm_i915_private *i915 = gt->i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_memory_region *mem;
resource_size_t mappable_end;
resource_size_t io_start;
resource_size_t start;
+ if (!HAS_LMEM(i915))
+ return ERR_PTR(-ENODEV);
+
+ if (!i915->params.fake_lmem_start)
+ return ERR_PTR(-ENODEV);
+
GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));
- GEM_BUG_ON(!i915->params.fake_lmem_start);
/* Your mappable aperture belongs to me now! */
mappable_end = pci_resource_len(pdev, 2);
- io_start = pci_resource_start(pdev, 2),
+ io_start = pci_resource_start(pdev, 2);
start = i915->params.fake_lmem_start;
mem = intel_memory_region_create(i915,
@@ -136,3 +140,86 @@ intel_setup_fake_lmem(struct drm_i915_private *i915)
return mem;
}
+
+static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
+ u64 *start, u32 *size)
+{
+ if (!IS_DG1_REVID(uncore->i915, DG1_REVID_A0, DG1_REVID_B0))
+ return false;
+
+ *start = 0;
+ *size = SZ_1M;
+
+ drm_dbg(&uncore->i915->drm, "LMEM: reserved legacy low-memory [0x%llx-0x%llx]\n",
+ *start, *start + *size);
+
+ return true;
+}
+
+static int reserve_lowmem_region(struct intel_uncore *uncore,
+ struct intel_memory_region *mem)
+{
+ u64 reserve_start;
+ u32 reserve_size;
+ int ret;
+
+ if (!get_legacy_lowmem_region(uncore, &reserve_start, &reserve_size))
+ return 0;
+
+ ret = intel_memory_region_reserve(mem, reserve_start, reserve_size);
+ if (ret)
+ drm_err(&uncore->i915->drm, "LMEM: reserving low memory region failed\n");
+
+ return ret;
+}
+
+static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct intel_memory_region *mem;
+ resource_size_t io_start;
+ resource_size_t lmem_size;
+ int err;
+
+ if (!IS_DGFX(i915))
+ return ERR_PTR(-ENODEV);
+
+ /* Stolen starts from GSMBASE on DG1 */
+ lmem_size = intel_uncore_read64(uncore, GEN12_GSMBASE);
+
+ io_start = pci_resource_start(pdev, 2);
+ if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2)))
+ return ERR_PTR(-ENODEV);
+
+ mem = intel_memory_region_create(i915,
+ 0,
+ lmem_size,
+ I915_GTT_PAGE_SIZE_4K,
+ io_start,
+ &intel_region_lmem_ops);
+ if (IS_ERR(mem))
+ return mem;
+
+ err = reserve_lowmem_region(uncore, mem);
+ if (err)
+ goto err_region_put;
+
+ drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
+ drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
+ &mem->io_start);
+ drm_info(&i915->drm, "Local memory available: %pa\n",
+ &lmem_size);
+
+ return mem;
+
+err_region_put:
+ intel_memory_region_put(mem);
+ return ERR_PTR(err);
+}
+
+struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt)
+{
+ return setup_lmem(gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.h b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
index 8ea43e538dab..062d0542ae34 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.h
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
@@ -6,9 +6,11 @@
#ifndef __INTEL_REGION_LMEM_H
#define __INTEL_REGION_LMEM_H
-struct drm_i915_private;
+struct intel_gt;
+
+struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt);
struct intel_memory_region *
-intel_setup_fake_lmem(struct drm_i915_private *i915);
+intel_gt_setup_fake_lmem(struct intel_gt *gt);
#endif /* !__INTEL_REGION_LMEM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index ca816ba22197..b03e197b1d99 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -1,28 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Mika Kuoppala <mika.kuoppala@intel.com>
- *
*/
#include "i915_drv.h"
@@ -65,7 +43,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine)
if ((i) >= PAGE_SIZE / sizeof(u32)) \
goto out; \
(batch)[(i)++] = (val); \
- } while(0)
+ } while (0)
static int render_state_setup(struct intel_renderstate *so,
struct drm_i915_private *i915)
@@ -84,6 +62,7 @@ static int render_state_setup(struct intel_renderstate *so,
if (i * 4 == rodata->reloc[reloc_index]) {
u64 r = s + so->vma->node.start;
+
s = lower_32_bits(r);
if (HAS_64BIT_RELOC(i915)) {
if (i + 1 >= rodata->batch_items ||
@@ -197,7 +176,7 @@ retry:
if (err)
goto err_context;
- err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ err = i915_vma_pin_ww(so->vma, &so->ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
goto err_context;
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h
index 713aa1e86c80..48f009203917 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h
@@ -1,24 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
*/
#ifndef _INTEL_RENDERSTATE_H_
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 61410cd62927..a377c4588aaa 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008-2018 Intel Corporation
*/
@@ -178,7 +177,7 @@ static int i915_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
int err;
/* Assert reset for at least 20 usec, and wait for acknowledgement. */
@@ -207,7 +206,7 @@ static int g33_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
return wait_for_atomic(g4x_reset_complete(pdev), 50);
@@ -217,7 +216,7 @@ static int g4x_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
struct intel_uncore *uncore = gt->uncore;
int ret;
@@ -787,18 +786,15 @@ static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
static void nop_submit_request(struct i915_request *request)
{
- struct intel_engine_cs *engine = request->engine;
- unsigned long flags;
-
RQ_TRACE(request, "-EIO\n");
- i915_request_set_error_once(request, -EIO);
- spin_lock_irqsave(&engine->active.lock, flags);
- __i915_request_submit(request);
- i915_request_mark_complete(request);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ request = i915_request_mark_eio(request);
+ if (request) {
+ i915_request_submit(request);
+ intel_engine_signal_breadcrumbs(request->engine);
- intel_engine_signal_breadcrumbs(engine);
+ i915_request_put(request);
+ }
}
static void __intel_gt_set_wedged(struct intel_gt *gt)
@@ -974,8 +970,6 @@ static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
{
int err, i;
- gt_revoke(gt);
-
err = __intel_gt_reset(gt, ALL_ENGINES);
for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
msleep(10 * (i + 1));
@@ -1030,6 +1024,13 @@ void intel_gt_reset(struct intel_gt *gt,
might_sleep();
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
+
+ /*
+ * FIXME: Revoking cpu mmap ptes cannot be done from a dma_fence
+ * critical section like gpu reset.
+ */
+ gt_revoke(gt);
+
mutex_lock(&gt->reset.mutex);
/* Clear any previous failed attempts at recovery. Time to try again. */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index 7dbf5cc8a333..adc734e67387 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008-2018 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index add6b86d9d03..9312b29f5a97 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -32,7 +32,7 @@ struct intel_reset {
*
* #I915_WEDGED_ON_INIT - If we fail to initialize the GPU we can no
* longer use the GPU - similar to #I915_WEDGED bit. The difference in
- * in the way we're handling "forced" unwedged (e.g. through debugfs),
+ * the way we're handling "forced" unwedged (e.g. through debugfs),
* which is not allowed in case we failed to initialize.
*
* #I915_WEDGED_ON_FINI - Similar to #I915_WEDGED_ON_INIT, except we
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 78d1360caa0f..aee0a77c77e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
@@ -109,8 +109,8 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- obj = ERR_PTR(-ENODEV);
- if (i915_ggtt_has_aperture(ggtt))
+ obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE);
+ if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt))
obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
index 1700579bdc93..dbf5f14a136f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
@@ -82,6 +81,7 @@ static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
u32 offset = addr - rq->ring->vaddr;
+
GEM_BUG_ON(offset > rq->ring->size);
return intel_ring_wrap(rq->ring, offset);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 4984ff565424..9585546556ee 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1,30 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2008-2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- * Zou Nan hai <nanhai.zou@intel.com>
- * Xiang Hai hao<haihao.xiang@intel.com>
- *
+ * Copyright © 2008-2021 Intel Corporation
*/
#include "gen2_engine_cs.h"
@@ -183,15 +159,36 @@ static void set_pp_dir(struct intel_engine_cs *engine)
}
}
+static bool stop_ring(struct intel_engine_cs *engine)
+{
+ /* Empty the ring by skipping to the end */
+ ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL));
+ ENGINE_POSTING_READ(engine, RING_HEAD);
+
+ /* The ring must be empty before it is disabled */
+ ENGINE_WRITE_FW(engine, RING_CTL, 0);
+ ENGINE_POSTING_READ(engine, RING_CTL);
+
+ /* Then reset the disabled ring */
+ ENGINE_WRITE_FW(engine, RING_HEAD, 0);
+ ENGINE_WRITE_FW(engine, RING_TAIL, 0);
+
+ return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0;
+}
+
static int xcs_resume(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
struct intel_ring *ring = engine->legacy.ring;
ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
ring->head, ring->tail);
- if (HWS_NEEDS_PHYSICAL(dev_priv))
+ /* Double check the ring is empty & disabled before we resume */
+ synchronize_hardirq(engine->i915->drm.irq);
+ if (!stop_ring(engine))
+ goto err;
+
+ if (HWS_NEEDS_PHYSICAL(engine->i915))
ring_setup_phys_status_page(engine);
else
ring_setup_status_page(engine);
@@ -228,21 +225,10 @@ static int xcs_resume(struct intel_engine_cs *engine)
if (__intel_wait_for_register_fw(engine->uncore,
RING_CTL(engine->mmio_base),
RING_VALID, RING_VALID,
- 5000, 0, NULL)) {
- drm_err(&dev_priv->drm,
- "%s initialization failed; "
- "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_CTL) & RING_VALID,
- ENGINE_READ(engine, RING_HEAD), ring->head,
- ENGINE_READ(engine, RING_TAIL), ring->tail,
- ENGINE_READ(engine, RING_START),
- i915_ggtt_offset(ring->vma));
- return -EIO;
- }
+ 5000, 0, NULL))
+ goto err;
- if (INTEL_GEN(dev_priv) > 2)
+ if (INTEL_GEN(engine->i915) > 2)
ENGINE_WRITE_FW(engine,
RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
@@ -255,6 +241,19 @@ static int xcs_resume(struct intel_engine_cs *engine)
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_signal_breadcrumbs(engine);
return 0;
+
+err:
+ drm_err(&engine->i915->drm,
+ "%s initialization failed; "
+ "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
+ engine->name,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & RING_VALID,
+ ENGINE_READ(engine, RING_HEAD), ring->head,
+ ENGINE_READ(engine, RING_TAIL), ring->tail,
+ ENGINE_READ(engine, RING_START),
+ i915_ggtt_offset(ring->vma));
+ return -EIO;
}
static void sanitize_hwsp(struct intel_engine_cs *engine)
@@ -290,23 +289,6 @@ static void xcs_sanitize(struct intel_engine_cs *engine)
clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
}
-static bool stop_ring(struct intel_engine_cs *engine)
-{
- /* Empty the ring by skipping to the end */
- ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL));
- ENGINE_POSTING_READ(engine, RING_HEAD);
-
- /* The ring must be empty before it is disabled */
- ENGINE_WRITE_FW(engine, RING_CTL, 0);
- ENGINE_POSTING_READ(engine, RING_CTL);
-
- /* Then reset the disabled ring */
- ENGINE_WRITE_FW(engine, RING_HEAD, 0);
- ENGINE_WRITE_FW(engine, RING_TAIL, 0);
-
- return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0;
-}
-
static void reset_prepare(struct intel_engine_cs *engine)
{
/*
@@ -329,25 +311,23 @@ static void reset_prepare(struct intel_engine_cs *engine)
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
- drm_dbg(&engine->i915->drm,
- "%s head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- ENGINE_READ_FW(engine, RING_CTL),
- ENGINE_READ_FW(engine, RING_HEAD),
- ENGINE_READ_FW(engine, RING_TAIL),
- ENGINE_READ_FW(engine, RING_START));
- }
-
- if (!stop_ring(engine)) {
- drm_err(&engine->i915->drm,
- "failed to set %s head to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- ENGINE_READ_FW(engine, RING_CTL),
- ENGINE_READ_FW(engine, RING_HEAD),
- ENGINE_READ_FW(engine, RING_TAIL),
- ENGINE_READ_FW(engine, RING_START));
+ ENGINE_TRACE(engine,
+ "HEAD not reset to zero, "
+ "{ CTL:%08x, HEAD:%08x, TAIL:%08x, START:%08x }\n",
+ ENGINE_READ_FW(engine, RING_CTL),
+ ENGINE_READ_FW(engine, RING_HEAD),
+ ENGINE_READ_FW(engine, RING_TAIL),
+ ENGINE_READ_FW(engine, RING_START));
+ if (!stop_ring(engine)) {
+ drm_err(&engine->i915->drm,
+ "failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ ENGINE_READ_FW(engine, RING_CTL),
+ ENGINE_READ_FW(engine, RING_HEAD),
+ ENGINE_READ_FW(engine, RING_TAIL),
+ ENGINE_READ_FW(engine, RING_START));
+ }
}
}
@@ -431,7 +411,7 @@ static void reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->active.requests, sched.link)
- i915_request_mark_eio(request);
+ i915_request_put(i915_request_mark_eio(request));
intel_engine_signal_breadcrumbs(engine);
/* Remaining _unready_ requests will be nop'ed when submitted */
@@ -466,6 +446,26 @@ static void ring_context_destroy(struct kref *ref)
intel_context_free(ce);
}
+static int ring_context_init_default_state(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww)
+{
+ struct drm_i915_gem_object *obj = ce->state->obj;
+ void *vaddr;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ shmem_read(ce->engine->default_state, 0,
+ vaddr, ce->engine->context_size);
+
+ i915_gem_object_flush_map(obj);
+ __i915_gem_object_release_map(obj);
+
+ __set_bit(CONTEXT_VALID_BIT, &ce->flags);
+ return 0;
+}
+
static int ring_context_pre_pin(struct intel_context *ce,
struct i915_gem_ww_ctx *ww,
void **unused)
@@ -473,6 +473,13 @@ static int ring_context_pre_pin(struct intel_context *ce,
struct i915_address_space *vm;
int err = 0;
+ if (ce->engine->default_state &&
+ !test_bit(CONTEXT_VALID_BIT, &ce->flags)) {
+ err = ring_context_init_default_state(ce, ww);
+ if (err)
+ return err;
+ }
+
vm = vm_alias(ce->vm);
if (vm)
err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww);
@@ -528,22 +535,6 @@ alloc_context_vma(struct intel_engine_cs *engine)
if (IS_IVYBRIDGE(i915))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
- if (engine->default_state) {
- void *vaddr;
-
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
-
- shmem_read(engine->default_state, 0,
- vaddr, engine->context_size);
-
- i915_gem_object_flush_map(obj);
- __i915_gem_object_release_map(obj);
- }
-
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -575,8 +566,6 @@ static int ring_context_alloc(struct intel_context *ce)
return PTR_ERR(vma);
ce->state = vma;
- if (engine->default_state)
- __set_bit(CONTEXT_VALID_BIT, &ce->flags);
}
return 0;
@@ -761,13 +750,14 @@ static int mi_set_context(struct i915_request *rq,
static int remap_l3_slice(struct i915_request *rq, int slice)
{
+#define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32))
u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
int i;
if (!remap_info)
return 0;
- cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
+ cs = intel_ring_begin(rq, L3LOG_DW * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -776,8 +766,8 @@ static int remap_l3_slice(struct i915_request *rq, int slice)
* here because no other code should access these registers other than
* at initialization time.
*/
- *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
- for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
+ *cs++ = MI_LOAD_REGISTER_IMM(L3LOG_DW);
+ for (i = 0; i < L3LOG_DW; i++) {
*cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
*cs++ = remap_info[i];
}
@@ -785,6 +775,7 @@ static int remap_l3_slice(struct i915_request *rq, int slice)
intel_ring_advance(rq, cs);
return 0;
+#undef L3LOG_DW
}
static int remap_l3(struct i915_request *rq)
@@ -1176,37 +1167,15 @@ static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
return gen7_setup_clear_gpr_bb(engine, vma);
}
-static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
+static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
+ struct i915_vma *vma)
{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int size;
int err;
- size = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
- if (size <= 0)
- return size;
-
- size = ALIGN(size, PAGE_SIZE);
- obj = i915_gem_object_create_internal(engine->i915, size);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- vma = i915_vma_instance(obj, engine->gt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- vma->private = intel_context_create(engine); /* dummy residuals */
- if (IS_ERR(vma->private)) {
- err = PTR_ERR(vma->private);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ err = i915_vma_pin_ww(vma, ww, 0, 0, PIN_USER | PIN_HIGH);
if (err)
- goto err_private;
+ return err;
err = i915_vma_sync(vma);
if (err)
@@ -1221,17 +1190,53 @@ static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
err_unpin:
i915_vma_unpin(vma);
-err_private:
- intel_context_put(vma->private);
-err_obj:
- i915_gem_object_put(obj);
return err;
}
+static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int size, err;
+
+ if (!IS_GEN(engine->i915, 7) || engine->class != RENDER_CLASS)
+ return 0;
+
+ err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
+ if (err < 0)
+ return ERR_PTR(err);
+ if (!err)
+ return NULL;
+
+ size = ALIGN(err, PAGE_SIZE);
+
+ obj = i915_gem_object_create_internal(engine->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(vma);
+ }
+
+ vma->private = intel_context_create(engine); /* dummy residuals */
+ if (IS_ERR(vma->private)) {
+ err = PTR_ERR(vma->private);
+ vma->private = NULL;
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
int intel_ring_submission_setup(struct intel_engine_cs *engine)
{
+ struct i915_gem_ww_ctx ww;
struct intel_timeline *timeline;
struct intel_ring *ring;
+ struct i915_vma *gen7_wa_vma;
int err;
setup_common(engine);
@@ -1262,43 +1267,72 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
}
GEM_BUG_ON(timeline->has_initial_breadcrumb);
- err = intel_timeline_pin(timeline, NULL);
- if (err)
- goto err_timeline;
-
ring = intel_engine_create_ring(engine, SZ_16K);
if (IS_ERR(ring)) {
err = PTR_ERR(ring);
- goto err_timeline_unpin;
+ goto err_timeline;
}
- err = intel_ring_pin(ring, NULL);
- if (err)
- goto err_ring;
-
GEM_BUG_ON(engine->legacy.ring);
engine->legacy.ring = ring;
engine->legacy.timeline = timeline;
- GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
+ gen7_wa_vma = gen7_ctx_vma(engine);
+ if (IS_ERR(gen7_wa_vma)) {
+ err = PTR_ERR(gen7_wa_vma);
+ goto err_ring;
+ }
- if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) {
- err = gen7_ctx_switch_bb_init(engine);
+ i915_gem_ww_ctx_init(&ww, false);
+
+retry:
+ err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww);
+ if (!err && gen7_wa_vma)
+ err = i915_gem_object_lock(gen7_wa_vma->obj, &ww);
+ if (!err && engine->legacy.ring->vma->obj)
+ err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww);
+ if (!err)
+ err = intel_timeline_pin(timeline, &ww);
+ if (!err) {
+ err = intel_ring_pin(ring, &ww);
if (err)
- goto err_ring_unpin;
+ intel_timeline_unpin(timeline);
+ }
+ if (err)
+ goto out;
+
+ GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
+
+ if (gen7_wa_vma) {
+ err = gen7_ctx_switch_bb_init(engine, &ww, gen7_wa_vma);
+ if (err) {
+ intel_ring_unpin(ring);
+ intel_timeline_unpin(timeline);
+ }
}
+out:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (err)
+ goto err_gen7_put;
+
/* Finally, take ownership and responsibility for cleanup! */
engine->release = ring_release;
return 0;
-err_ring_unpin:
- intel_ring_unpin(ring);
+err_gen7_put:
+ if (gen7_wa_vma) {
+ intel_context_put(gen7_wa_vma->private);
+ i915_gem_object_put(gen7_wa_vma->obj);
+ }
err_ring:
intel_ring_put(ring);
-err_timeline_unpin:
- intel_timeline_unpin(timeline);
err_timeline:
intel_timeline_put(timeline);
err:
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h
index 1a189ea00fd8..49ccb76dda3b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index ee5835c29c03..405d814e9040 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 8d3c9d663662..1d2cfc98b510 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 029fe13cf303..3941d8551f52 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 8a72e0fe34ca..0d9f74aec8fe 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index 23ba6c2ebe70..4cd1a8a7298a 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 037b0e3ccbed..f19cf6d2fa85 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016-2018 Intel Corporation
*/
@@ -12,21 +11,9 @@
#include "intel_ring.h"
#include "intel_timeline.h"
-#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
-#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
+#define TIMELINE_SEQNO_BYTES 8
-#define CACHELINE_BITS 6
-#define CACHELINE_FREE CACHELINE_BITS
-
-struct intel_timeline_hwsp {
- struct intel_gt *gt;
- struct intel_gt_timelines *gt_timelines;
- struct list_head free_link;
- struct i915_vma *vma;
- u64 free_bitmap;
-};
-
-static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
+static struct i915_vma *hwsp_alloc(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
@@ -45,174 +32,42 @@ static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
return vma;
}
-static struct i915_vma *
-hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
-{
- struct intel_gt_timelines *gt = &timeline->gt->timelines;
- struct intel_timeline_hwsp *hwsp;
-
- BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
-
- spin_lock_irq(&gt->hwsp_lock);
-
- /* hwsp_free_list only contains HWSP that have available cachelines */
- hwsp = list_first_entry_or_null(&gt->hwsp_free_list,
- typeof(*hwsp), free_link);
- if (!hwsp) {
- struct i915_vma *vma;
-
- spin_unlock_irq(&gt->hwsp_lock);
-
- hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL);
- if (!hwsp)
- return ERR_PTR(-ENOMEM);
-
- vma = __hwsp_alloc(timeline->gt);
- if (IS_ERR(vma)) {
- kfree(hwsp);
- return vma;
- }
-
- GT_TRACE(timeline->gt, "new HWSP allocated\n");
-
- vma->private = hwsp;
- hwsp->gt = timeline->gt;
- hwsp->vma = vma;
- hwsp->free_bitmap = ~0ull;
- hwsp->gt_timelines = gt;
-
- spin_lock_irq(&gt->hwsp_lock);
- list_add(&hwsp->free_link, &gt->hwsp_free_list);
- }
-
- GEM_BUG_ON(!hwsp->free_bitmap);
- *cacheline = __ffs64(hwsp->free_bitmap);
- hwsp->free_bitmap &= ~BIT_ULL(*cacheline);
- if (!hwsp->free_bitmap)
- list_del(&hwsp->free_link);
-
- spin_unlock_irq(&gt->hwsp_lock);
-
- GEM_BUG_ON(hwsp->vma->private != hwsp);
- return hwsp->vma;
-}
-
-static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
-{
- struct intel_gt_timelines *gt = hwsp->gt_timelines;
- unsigned long flags;
-
- spin_lock_irqsave(&gt->hwsp_lock, flags);
-
- /* As a cacheline becomes available, publish the HWSP on the freelist */
- if (!hwsp->free_bitmap)
- list_add_tail(&hwsp->free_link, &gt->hwsp_free_list);
-
- GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap));
- hwsp->free_bitmap |= BIT_ULL(cacheline);
-
- /* And if no one is left using it, give the page back to the system */
- if (hwsp->free_bitmap == ~0ull) {
- i915_vma_put(hwsp->vma);
- list_del(&hwsp->free_link);
- kfree(hwsp);
- }
-
- spin_unlock_irqrestore(&gt->hwsp_lock, flags);
-}
-
-static void __rcu_cacheline_free(struct rcu_head *rcu)
-{
- struct intel_timeline_cacheline *cl =
- container_of(rcu, typeof(*cl), rcu);
-
- /* Must wait until after all *rq->hwsp are complete before removing */
- i915_gem_object_unpin_map(cl->hwsp->vma->obj);
- __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
-
- i915_active_fini(&cl->active);
- kfree(cl);
-}
-
-static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
-{
- GEM_BUG_ON(!i915_active_is_idle(&cl->active));
- call_rcu(&cl->rcu, __rcu_cacheline_free);
-}
-
__i915_active_call
-static void __cacheline_retire(struct i915_active *active)
+static void __timeline_retire(struct i915_active *active)
{
- struct intel_timeline_cacheline *cl =
- container_of(active, typeof(*cl), active);
+ struct intel_timeline *tl =
+ container_of(active, typeof(*tl), active);
- i915_vma_unpin(cl->hwsp->vma);
- if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
- __idle_cacheline_free(cl);
+ i915_vma_unpin(tl->hwsp_ggtt);
+ intel_timeline_put(tl);
}
-static int __cacheline_active(struct i915_active *active)
+static int __timeline_active(struct i915_active *active)
{
- struct intel_timeline_cacheline *cl =
- container_of(active, typeof(*cl), active);
+ struct intel_timeline *tl =
+ container_of(active, typeof(*tl), active);
- __i915_vma_pin(cl->hwsp->vma);
+ __i915_vma_pin(tl->hwsp_ggtt);
+ intel_timeline_get(tl);
return 0;
}
-static struct intel_timeline_cacheline *
-cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
+I915_SELFTEST_EXPORT int
+intel_timeline_pin_map(struct intel_timeline *timeline)
{
- struct intel_timeline_cacheline *cl;
+ struct drm_i915_gem_object *obj = timeline->hwsp_ggtt->obj;
+ u32 ofs = offset_in_page(timeline->hwsp_offset);
void *vaddr;
- GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
-
- cl = kmalloc(sizeof(*cl), GFP_KERNEL);
- if (!cl)
- return ERR_PTR(-ENOMEM);
-
- vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- kfree(cl);
- return ERR_CAST(vaddr);
- }
-
- cl->hwsp = hwsp;
- cl->vaddr = page_pack_bits(vaddr, cacheline);
-
- i915_active_init(&cl->active, __cacheline_active, __cacheline_retire);
-
- return cl;
-}
-
-static void cacheline_acquire(struct intel_timeline_cacheline *cl,
- u32 ggtt_offset)
-{
- if (!cl)
- return;
-
- cl->ggtt_offset = ggtt_offset;
- i915_active_acquire(&cl->active);
-}
-
-static void cacheline_release(struct intel_timeline_cacheline *cl)
-{
- if (cl)
- i915_active_release(&cl->active);
-}
-
-static void cacheline_free(struct intel_timeline_cacheline *cl)
-{
- if (!i915_active_acquire_if_busy(&cl->active)) {
- __idle_cacheline_free(cl);
- return;
- }
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
- GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
- cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
+ timeline->hwsp_map = vaddr;
+ timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES);
+ clflush(vaddr + ofs);
- i915_active_release(&cl->active);
+ return 0;
}
static int intel_timeline_init(struct intel_timeline *timeline,
@@ -220,45 +75,25 @@ static int intel_timeline_init(struct intel_timeline *timeline,
struct i915_vma *hwsp,
unsigned int offset)
{
- void *vaddr;
-
kref_init(&timeline->kref);
atomic_set(&timeline->pin_count, 0);
timeline->gt = gt;
- timeline->has_initial_breadcrumb = !hwsp;
- timeline->hwsp_cacheline = NULL;
-
- if (!hwsp) {
- struct intel_timeline_cacheline *cl;
- unsigned int cacheline;
-
- hwsp = hwsp_alloc(timeline, &cacheline);
+ if (hwsp) {
+ timeline->hwsp_offset = offset;
+ timeline->hwsp_ggtt = i915_vma_get(hwsp);
+ } else {
+ timeline->has_initial_breadcrumb = true;
+ hwsp = hwsp_alloc(gt);
if (IS_ERR(hwsp))
return PTR_ERR(hwsp);
-
- cl = cacheline_alloc(hwsp->private, cacheline);
- if (IS_ERR(cl)) {
- __idle_hwsp_free(hwsp->private, cacheline);
- return PTR_ERR(cl);
- }
-
- timeline->hwsp_cacheline = cl;
- timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
-
- vaddr = page_mask_bits(cl->vaddr);
- } else {
- timeline->hwsp_offset = offset;
- vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
- if (IS_ERR(vaddr))
- return PTR_ERR(vaddr);
+ timeline->hwsp_ggtt = hwsp;
}
- timeline->hwsp_seqno =
- memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
+ timeline->hwsp_map = NULL;
+ timeline->hwsp_seqno = (void *)(long)timeline->hwsp_offset;
- timeline->hwsp_ggtt = i915_vma_get(hwsp);
GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
timeline->fence_context = dma_fence_context_alloc(1);
@@ -269,6 +104,7 @@ static int intel_timeline_init(struct intel_timeline *timeline,
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
+ i915_active_init(&timeline->active, __timeline_active, __timeline_retire);
return 0;
}
@@ -279,23 +115,19 @@ void intel_gt_init_timelines(struct intel_gt *gt)
spin_lock_init(&timelines->lock);
INIT_LIST_HEAD(&timelines->active_list);
-
- spin_lock_init(&timelines->hwsp_lock);
- INIT_LIST_HEAD(&timelines->hwsp_free_list);
}
-static void intel_timeline_fini(struct intel_timeline *timeline)
+static void intel_timeline_fini(struct rcu_head *rcu)
{
- GEM_BUG_ON(atomic_read(&timeline->pin_count));
- GEM_BUG_ON(!list_empty(&timeline->requests));
- GEM_BUG_ON(timeline->retire);
+ struct intel_timeline *timeline =
+ container_of(rcu, struct intel_timeline, rcu);
- if (timeline->hwsp_cacheline)
- cacheline_free(timeline->hwsp_cacheline);
- else
+ if (timeline->hwsp_map)
i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
i915_vma_put(timeline->hwsp_ggtt);
+ i915_active_fini(&timeline->active);
+ kfree(timeline);
}
struct intel_timeline *
@@ -351,6 +183,12 @@ int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
if (atomic_add_unless(&tl->pin_count, 1, 0))
return 0;
+ if (!tl->hwsp_map) {
+ err = intel_timeline_pin_map(tl);
+ if (err)
+ return err;
+ }
+
err = i915_ggtt_pin(tl->hwsp_ggtt, ww, 0, PIN_HIGH);
if (err)
return err;
@@ -361,9 +199,9 @@ int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
tl->fence_context, tl->hwsp_offset);
- cacheline_acquire(tl->hwsp_cacheline, tl->hwsp_offset);
+ i915_active_acquire(&tl->active);
if (atomic_fetch_inc(&tl->pin_count)) {
- cacheline_release(tl->hwsp_cacheline);
+ i915_active_release(&tl->active);
__i915_vma_unpin(tl->hwsp_ggtt);
}
@@ -372,9 +210,13 @@ int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
void intel_timeline_reset_seqno(const struct intel_timeline *tl)
{
+ u32 *hwsp_seqno = (u32 *)tl->hwsp_seqno;
/* Must be pinned to be writable, and no requests in flight. */
GEM_BUG_ON(!atomic_read(&tl->pin_count));
- WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+
+ memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno));
+ WRITE_ONCE(*hwsp_seqno, tl->seqno);
+ clflush(hwsp_seqno);
}
void intel_timeline_enter(struct intel_timeline *tl)
@@ -450,106 +292,23 @@ static u32 timeline_advance(struct intel_timeline *tl)
return tl->seqno += 1 + tl->has_initial_breadcrumb;
}
-static void timeline_rollback(struct intel_timeline *tl)
-{
- tl->seqno -= 1 + tl->has_initial_breadcrumb;
-}
-
static noinline int
__intel_timeline_get_seqno(struct intel_timeline *tl,
- struct i915_request *rq,
u32 *seqno)
{
- struct intel_timeline_cacheline *cl;
- unsigned int cacheline;
- struct i915_vma *vma;
- void *vaddr;
- int err;
-
- might_lock(&tl->gt->ggtt->vm.mutex);
- GT_TRACE(tl->gt, "timeline:%llx wrapped\n", tl->fence_context);
-
- /*
- * If there is an outstanding GPU reference to this cacheline,
- * such as it being sampled by a HW semaphore on another timeline,
- * we cannot wraparound our seqno value (the HW semaphore does
- * a strict greater-than-or-equals compare, not i915_seqno_passed).
- * So if the cacheline is still busy, we must detach ourselves
- * from it and leave it inflight alongside its users.
- *
- * However, if nobody is watching and we can guarantee that nobody
- * will, we could simply reuse the same cacheline.
- *
- * if (i915_active_request_is_signaled(&tl->last_request) &&
- * i915_active_is_signaled(&tl->hwsp_cacheline->active))
- * return 0;
- *
- * That seems unlikely for a busy timeline that needed to wrap in
- * the first place, so just replace the cacheline.
- */
-
- vma = hwsp_alloc(tl, &cacheline);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_rollback;
- }
-
- err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
- if (err) {
- __idle_hwsp_free(vma->private, cacheline);
- goto err_rollback;
- }
-
- cl = cacheline_alloc(vma->private, cacheline);
- if (IS_ERR(cl)) {
- err = PTR_ERR(cl);
- __idle_hwsp_free(vma->private, cacheline);
- goto err_unpin;
- }
- GEM_BUG_ON(cl->hwsp->vma != vma);
-
- /*
- * Attach the old cacheline to the current request, so that we only
- * free it after the current request is retired, which ensures that
- * all writes into the cacheline from previous requests are complete.
- */
- err = i915_active_ref(&tl->hwsp_cacheline->active,
- tl->fence_context,
- &rq->fence);
- if (err)
- goto err_cacheline;
-
- cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */
- cacheline_free(tl->hwsp_cacheline);
-
- i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */
- i915_vma_put(tl->hwsp_ggtt);
-
- tl->hwsp_ggtt = i915_vma_get(vma);
+ u32 next_ofs = offset_in_page(tl->hwsp_offset + TIMELINE_SEQNO_BYTES);
- vaddr = page_mask_bits(cl->vaddr);
- tl->hwsp_offset = cacheline * CACHELINE_BYTES;
- tl->hwsp_seqno =
- memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
+ /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+ if (TIMELINE_SEQNO_BYTES <= BIT(5) && (next_ofs & BIT(5)))
+ next_ofs = offset_in_page(next_ofs + BIT(5));
- tl->hwsp_offset += i915_ggtt_offset(vma);
- GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
- tl->fence_context, tl->hwsp_offset);
-
- cacheline_acquire(cl, tl->hwsp_offset);
- tl->hwsp_cacheline = cl;
+ tl->hwsp_offset = i915_ggtt_offset(tl->hwsp_ggtt) + next_ofs;
+ tl->hwsp_seqno = tl->hwsp_map + next_ofs;
+ intel_timeline_reset_seqno(tl);
*seqno = timeline_advance(tl);
GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
return 0;
-
-err_cacheline:
- cacheline_free(cl);
-err_unpin:
- i915_vma_unpin(vma);
-err_rollback:
- timeline_rollback(tl);
- return err;
}
int intel_timeline_get_seqno(struct intel_timeline *tl,
@@ -559,51 +318,52 @@ int intel_timeline_get_seqno(struct intel_timeline *tl,
*seqno = timeline_advance(tl);
/* Replace the HWSP on wraparound for HW semaphores */
- if (unlikely(!*seqno && tl->hwsp_cacheline))
- return __intel_timeline_get_seqno(tl, rq, seqno);
+ if (unlikely(!*seqno && tl->has_initial_breadcrumb))
+ return __intel_timeline_get_seqno(tl, seqno);
return 0;
}
-static int cacheline_ref(struct intel_timeline_cacheline *cl,
- struct i915_request *rq)
-{
- return i915_active_add_request(&cl->active, rq);
-}
-
int intel_timeline_read_hwsp(struct i915_request *from,
struct i915_request *to,
u32 *hwsp)
{
- struct intel_timeline_cacheline *cl;
+ struct intel_timeline *tl;
int err;
- GEM_BUG_ON(!rcu_access_pointer(from->hwsp_cacheline));
-
rcu_read_lock();
- cl = rcu_dereference(from->hwsp_cacheline);
- if (i915_request_signaled(from)) /* confirm cacheline is valid */
- goto unlock;
- if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
- goto unlock; /* seqno wrapped and completed! */
- if (unlikely(__i915_request_is_complete(from)))
- goto release;
+ tl = rcu_dereference(from->timeline);
+ if (i915_request_signaled(from) ||
+ !i915_active_acquire_if_busy(&tl->active))
+ tl = NULL;
+
+ if (tl) {
+ /* hwsp_offset may wraparound, so use from->hwsp_seqno */
+ *hwsp = i915_ggtt_offset(tl->hwsp_ggtt) +
+ offset_in_page(from->hwsp_seqno);
+ }
+
+ /* ensure we wait on the right request, if not, we completed */
+ if (tl && __i915_request_is_complete(from)) {
+ i915_active_release(&tl->active);
+ tl = NULL;
+ }
rcu_read_unlock();
- err = cacheline_ref(cl, to);
- if (err)
+ if (!tl)
+ return 1;
+
+ /* Can't do semaphore waits on kernel context */
+ if (!tl->has_initial_breadcrumb) {
+ err = -EINVAL;
goto out;
+ }
+
+ err = i915_active_add_request(&tl->active, to);
- *hwsp = cl->ggtt_offset;
out:
- i915_active_release(&cl->active);
+ i915_active_release(&tl->active);
return err;
-
-release:
- i915_active_release(&cl->active);
-unlock:
- rcu_read_unlock();
- return 1;
}
void intel_timeline_unpin(struct intel_timeline *tl)
@@ -612,8 +372,7 @@ void intel_timeline_unpin(struct intel_timeline *tl)
if (!atomic_dec_and_test(&tl->pin_count))
return;
- cacheline_release(tl->hwsp_cacheline);
-
+ i915_active_release(&tl->active);
__i915_vma_unpin(tl->hwsp_ggtt);
}
@@ -622,8 +381,11 @@ void __intel_timeline_free(struct kref *kref)
struct intel_timeline *timeline =
container_of(kref, typeof(*timeline), kref);
- intel_timeline_fini(timeline);
- kfree_rcu(timeline, rcu);
+ GEM_BUG_ON(atomic_read(&timeline->pin_count));
+ GEM_BUG_ON(!list_empty(&timeline->requests));
+ GEM_BUG_ON(timeline->retire);
+
+ call_rcu(&timeline->rcu, intel_timeline_fini);
}
void intel_gt_fini_timelines(struct intel_gt *gt)
@@ -631,7 +393,6 @@ void intel_gt_fini_timelines(struct intel_gt *gt)
struct intel_gt_timelines *timelines = &gt->timelines;
GEM_BUG_ON(!list_empty(&timelines->active_list));
- GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
}
void intel_gt_show_timelines(struct intel_gt *gt,
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index dcdee692a80e..57308c4d664a 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#ifndef I915_TIMELINE_H
@@ -117,4 +98,6 @@ intel_timeline_is_last(const struct intel_timeline *tl,
return list_is_last_rcu(&rq->link, &tl->requests);
}
+I915_SELFTEST_DECLARE(int intel_timeline_pin_map(struct intel_timeline *tl));
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index e360f50706bf..74e67dbf89c5 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
@@ -18,7 +17,6 @@
struct i915_vma;
struct i915_syncmap;
struct intel_gt;
-struct intel_timeline_hwsp;
struct intel_timeline {
u64 fence_context;
@@ -45,12 +43,11 @@ struct intel_timeline {
atomic_t pin_count;
atomic_t active_count;
+ void *hwsp_map;
const u32 *hwsp_seqno;
struct i915_vma *hwsp_ggtt;
u32 hwsp_offset;
- struct intel_timeline_cacheline *hwsp_cacheline;
-
bool has_initial_breadcrumb;
/**
@@ -67,6 +64,8 @@ struct intel_timeline {
*/
struct i915_active_fence last_request;
+ struct i915_active active;
+
/** A chain of completed timelines ready for early retirement. */
struct intel_timeline *retire;
@@ -90,15 +89,4 @@ struct intel_timeline {
struct rcu_head rcu;
};
-struct intel_timeline_cacheline {
- struct i915_active active;
-
- struct intel_timeline_hwsp *hwsp;
- void *vaddr;
-
- u32 ggtt_offset;
-
- struct rcu_head rcu;
-};
-
#endif /* __I915_TIMELINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index ec366cf9ef56..2c6f7217469f 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2018 Intel Corporation
*/
@@ -53,37 +52,6 @@
* - Public functions to init or apply the given workaround type.
*/
-/*
- * KBL revision ID ordering is bizarre; higher revision ID's map to lower
- * steppings in some cases. So rather than test against the revision ID
- * directly, let's map that into our own range of increasing ID's that we
- * can test against in a regular manner.
- */
-
-const struct i915_rev_steppings kbl_revids[] = {
- [0] = { .gt_stepping = KBL_REVID_A0, .disp_stepping = KBL_REVID_A0 },
- [1] = { .gt_stepping = KBL_REVID_B0, .disp_stepping = KBL_REVID_B0 },
- [2] = { .gt_stepping = KBL_REVID_C0, .disp_stepping = KBL_REVID_B0 },
- [3] = { .gt_stepping = KBL_REVID_D0, .disp_stepping = KBL_REVID_B0 },
- [4] = { .gt_stepping = KBL_REVID_F0, .disp_stepping = KBL_REVID_C0 },
- [5] = { .gt_stepping = KBL_REVID_C0, .disp_stepping = KBL_REVID_B1 },
- [6] = { .gt_stepping = KBL_REVID_D1, .disp_stepping = KBL_REVID_B1 },
- [7] = { .gt_stepping = KBL_REVID_G0, .disp_stepping = KBL_REVID_C0 },
-};
-
-const struct i915_rev_steppings tgl_uy_revids[] = {
- [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_A0 },
- [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_C0 },
- [2] = { .gt_stepping = TGL_REVID_B1, .disp_stepping = TGL_REVID_C0 },
- [3] = { .gt_stepping = TGL_REVID_C0, .disp_stepping = TGL_REVID_D0 },
-};
-
-/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
-const struct i915_rev_steppings tgl_revids[] = {
- [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_B0 },
- [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_D0 },
-};
-
static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
{
wal->name = name;
@@ -273,7 +241,7 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Use Force Non-Coherent whenever executing a 3D context. This is a
- * workaround for for a possible hang in the unlikely event a TLB
+ * workaround for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:bdw,chv */
@@ -513,7 +481,7 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:kbl */
- if (IS_KBL_GT_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
+ if (IS_KBL_GT_STEP(i915, STEP_C0, STEP_FOREVER))
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
@@ -722,7 +690,8 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (IS_DG1(i915))
dg1_ctx_workarounds_init(engine, wal);
- else if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915))
+ else if (IS_ALDERLAKE_S(i915) || IS_ROCKETLAKE(i915) ||
+ IS_TIGERLAKE(i915))
tgl_ctx_workarounds_init(engine, wal);
else if (IS_GEN(i915, 12))
gen12_ctx_workarounds_init(engine, wal);
@@ -749,7 +718,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
else if (IS_GEN(i915, 6))
gen6_ctx_workarounds_init(engine, wal);
else if (INTEL_GEN(i915) < 8)
- return;
+ ;
else
MISSING_CASE(INTEL_GEN(i915));
@@ -930,7 +899,7 @@ kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
gen9_gt_workarounds_init(i915, wal);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_GT_REVID(i915, 0, KBL_REVID_B0))
+ if (IS_KBL_GT_STEP(i915, 0, STEP_B0))
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
@@ -1103,11 +1072,10 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
/* Wa_1607087056:icl,ehl,jsl */
if (IS_ICELAKE(i915) ||
- IS_JSL_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0)) {
+ IS_JSL_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
- }
}
static void
@@ -1123,19 +1091,19 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
gen12_gt_workarounds_init(i915, wal);
/* Wa_1409420604:tgl */
- if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_A0))
wa_write_or(wal,
SUBSLICE_UNIT_LEVEL_CLKGATE2,
CPSSUNIT_CLKGATE_DIS);
/* Wa_1607087056:tgl also know as BUG:1409180338 */
- if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_A0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
/* Wa_1408615072:tgl[a0] */
- if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_A0))
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
VSUNIT_CLKGATE_DIS_TGL);
}
@@ -1202,7 +1170,7 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
else if (IS_GEN(i915, 4))
gen4_gt_workarounds_init(i915, wal);
else if (INTEL_GEN(i915) <= 8)
- return;
+ ;
else
MISSING_CASE(INTEL_GEN(i915));
}
@@ -1577,7 +1545,7 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
else if (IS_SKYLAKE(i915))
skl_whitelist_build(engine);
else if (INTEL_GEN(i915) <= 8)
- return;
+ ;
else
MISSING_CASE(INTEL_GEN(i915));
@@ -1613,7 +1581,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
- IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
+ IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_A0)) {
/*
* Wa_1607138336:tgl[a0],dg1[a0]
* Wa_1607063988:tgl[a0],dg1[a0]
@@ -1623,7 +1591,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
}
- if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
+ if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_A0)) {
/*
* Wa_1606679103:tgl
* (see also Wa_1606682166:icl)
@@ -1633,45 +1601,45 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_DISABLE_SAMPLER_PREFETCH);
}
- if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
- /* Wa_1606931601:tgl,rkl,dg1 */
+ if (IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
+ IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
+ /* Wa_1606931601:tgl,rkl,dg1,adl-s */
wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
/*
* Wa_1407928979:tgl A*
* Wa_18011464164:tgl[B0+],dg1[B0+]
* Wa_22010931296:tgl[B0+],dg1[B0+]
- * Wa_14010919138:rkl, dg1
+ * Wa_14010919138:rkl,dg1,adl-s
*/
wa_write_or(wal, GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
/*
* Wa_1606700617:tgl,dg1
- * Wa_22010271021:tgl,rkl,dg1
+ * Wa_22010271021:tgl,rkl,dg1, adl-s
*/
wa_masked_en(wal,
GEN9_CS_DEBUG_MODE1,
FF_DOP_CLOCK_GATE_DISABLE);
-
- /* Wa_1406941453:tgl,rkl,dg1 */
- wa_masked_en(wal,
- GEN10_SAMPLER_MODE,
- ENABLE_SMALLPL);
}
- if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
+ if (IS_ALDERLAKE_S(i915) || IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
- /* Wa_1409804808:tgl,rkl,dg1[a0] */
+ /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s */
wa_masked_en(wal, GEN7_ROW_CHICKEN2,
GEN12_PUSH_CONST_DEREF_HOLD_DIS);
/*
* Wa_1409085225:tgl
- * Wa_14010229206:tgl,rkl,dg1[a0]
+ * Wa_14010229206:tgl,rkl,dg1[a0],adl-s
*/
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
+ }
+
+ if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
+ IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/*
* Wa_1607030317:tgl
* Wa_1607186500:tgl
@@ -1688,6 +1656,13 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
}
+ if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
+ /* Wa_1406941453:tgl,rkl,dg1 */
+ wa_masked_en(wal,
+ GEN10_SAMPLER_MODE,
+ ENABLE_SMALLPL);
+ }
+
if (IS_GEN(i915, 11)) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
@@ -2045,7 +2020,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
/* WaKBLVECSSemaphoreWaitPoll:kbl */
- if (IS_KBL_GT_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
+ if (IS_KBL_GT_STEP(i915, STEP_A0, STEP_E0)) {
wa_write(wal,
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
@@ -2197,10 +2172,15 @@ retry:
if (err)
goto err_pm;
+ err = i915_vma_pin_ww(vma, &ww, 0, 0,
+ i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
+ if (err)
+ goto err_unpin;
+
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_unpin;
+ goto err_vma;
}
err = i915_request_await_object(rq, vma->obj, true);
@@ -2241,6 +2221,8 @@ retry:
err_rq:
i915_request_put(rq);
+err_vma:
+ i915_vma_unpin(vma);
err_unpin:
intel_context_unpin(ce);
err_pm:
@@ -2251,7 +2233,6 @@ err_pm:
}
i915_gem_ww_ctx_fini(&ww);
intel_engine_pm_put(ce->engine);
- i915_vma_unpin(vma);
i915_vma_put(vma);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.h b/drivers/gpu/drm/i915/gt/intel_workarounds.h
index 8c9c769c2204..15abb68b6c00 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2018 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
index d166a7145720..c214111ea367 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2018 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 4b4f03b70df7..e1ba03b93ffa 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#include "gem/i915_gem_context.h"
@@ -32,9 +13,20 @@
#include "mock_engine.h"
#include "selftests/mock_request.h"
-static void mock_timeline_pin(struct intel_timeline *tl)
+static int mock_timeline_pin(struct intel_timeline *tl)
{
+ int err;
+
+ if (WARN_ON(!i915_gem_object_trylock(tl->hwsp_ggtt->obj)))
+ return -EBUSY;
+
+ err = intel_timeline_pin_map(tl);
+ i915_gem_object_unlock(tl->hwsp_ggtt->obj);
+ if (err)
+ return err;
+
atomic_inc(&tl->pin_count);
+ return 0;
}
static void mock_timeline_unpin(struct intel_timeline *tl)
@@ -152,6 +144,8 @@ static void mock_context_destroy(struct kref *ref)
static int mock_context_alloc(struct intel_context *ce)
{
+ int err;
+
ce->ring = mock_ring(ce->engine);
if (!ce->ring)
return -ENOMEM;
@@ -162,7 +156,12 @@ static int mock_context_alloc(struct intel_context *ce)
return PTR_ERR(ce->timeline);
}
- mock_timeline_pin(ce->timeline);
+ err = mock_timeline_pin(ce->timeline);
+ if (err) {
+ intel_timeline_put(ce->timeline);
+ ce->timeline = NULL;
+ return err;
+ }
return 0;
}
@@ -258,13 +257,15 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link)
- i915_request_mark_eio(rq);
+ i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(engine);
/* Cancel and submit all pending requests. */
list_for_each_entry(rq, &mock->hw_queue, mock.link) {
- i915_request_mark_eio(rq);
- __i915_request_submit(rq);
+ if (i915_request_mark_eio(rq)) {
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+ }
}
INIT_LIST_HEAD(&mock->hw_queue);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.h b/drivers/gpu/drm/i915/gt/mock_engine.h
index 3f9b698c49d2..cc5ab6e1f37e 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.h
+++ b/drivers/gpu/drm/i915/gt/mock_engine.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#ifndef __MOCK_ENGINE_H__
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index db738d400168..b9bdd1d23243 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * SPDX-License-Identifier: GPL-2.0
- *
* Copyright © 2019 Intel Corporation
*/
@@ -88,8 +87,8 @@ static int __live_context_size(struct intel_engine_cs *engine)
if (err)
goto err;
- vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(engine->i915));
+ vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj,
+ i915_coherent_map_type(engine->i915));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
intel_context_unpin(ce);
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.c b/drivers/gpu/drm/i915/gt/selftest_engine.c
index f65b118e261d..262764f6d90a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * SPDX-License-Identifier: GPL-2.0
- *
* Copyright © 2018 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.h b/drivers/gpu/drm/i915/gt/selftest_engine.h
index ab32d09ec5a1..c6feb3bd2ccc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine.h
+++ b/drivers/gpu/drm/i915/gt/selftest_engine.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 439c8984f5fa..b32814a1f20b 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * SPDX-License-Identifier: GPL-2.0
- *
* Copyright © 2018 Intel Corporation
*/
@@ -42,6 +41,9 @@ static int perf_end(struct intel_gt *gt)
static int write_timestamp(struct i915_request *rq, int slot)
{
+ struct intel_timeline *tl =
+ rcu_dereference_protected(rq->timeline,
+ !i915_request_signaled(rq));
u32 cmd;
u32 *cs;
@@ -54,7 +56,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
cmd++;
*cs++ = cmd;
*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
- *cs++ = i915_request_timeline(rq)->hwsp_offset + slot * sizeof(u32);
+ *cs++ = tl->hwsp_offset + slot * sizeof(u32);
*cs++ = 0;
intel_ring_advance(rq, cs);
@@ -73,7 +75,7 @@ static struct i915_vma *create_empty_batch(struct intel_context *ce)
if (IS_ERR(obj))
return ERR_CAST(obj);
- cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_put;
@@ -209,7 +211,7 @@ static struct i915_vma *create_nop_batch(struct intel_context *ce)
if (IS_ERR(obj))
return ERR_CAST(obj);
- cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_put;
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 223ab88f7e57..b2c369317bf1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2018 Intel Corporation
*/
@@ -12,6 +11,12 @@
#include "i915_selftest.h"
#include "selftest_engine_heartbeat.h"
+static void reset_heartbeat(struct intel_engine_cs *engine)
+{
+ intel_engine_set_heartbeat(engine,
+ engine->defaults.heartbeat_interval_ms);
+}
+
static int timeline_sync(struct intel_timeline *tl)
{
struct dma_fence *fence;
@@ -270,7 +275,7 @@ static int __live_heartbeat_fast(struct intel_engine_cs *engine)
err = -EINVAL;
}
- intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL);
+ reset_heartbeat(engine);
err_pm:
intel_engine_pm_put(engine);
intel_context_put(ce);
@@ -285,7 +290,7 @@ static int live_heartbeat_fast(void *arg)
int err = 0;
/* Check that the heartbeat ticks at the desired rate. */
- if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
return 0;
for_each_engine(engine, gt, id) {
@@ -333,7 +338,7 @@ static int __live_heartbeat_off(struct intel_engine_cs *engine)
}
err_beat:
- intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL);
+ reset_heartbeat(engine);
err_pm:
intel_engine_pm_put(engine);
return err;
@@ -347,7 +352,7 @@ static int live_heartbeat_off(void *arg)
int err = 0;
/* Check that we can turn off heartbeat and not interrupt VIP */
- if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
return 0;
for_each_engine(engine, gt, id) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index c3d965279fc3..2c898622bdfb 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * SPDX-License-Identifier: GPL-2.0
- *
* Copyright © 2018 Intel Corporation
*/
@@ -111,13 +110,15 @@ static int __measure_timestamps(struct intel_context *ce,
cpu_relax();
/* Run the request for a 100us, sampling timestamps before/after */
- preempt_disable();
- *dt = local_clock();
+ local_irq_disable();
write_semaphore(&sema[2], 0);
+ while (READ_ONCE(sema[1]) == 0) /* wait for the gpu to catch up */
+ cpu_relax();
+ *dt = local_clock();
udelay(100);
*dt = local_clock() - *dt;
write_semaphore(&sema[2], 1);
- preempt_enable();
+ local_irq_enable();
if (i915_request_wait(rq, 0, HZ / 2) < 0) {
i915_request_put(rq);
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 264b5ebdb021..1081cd36a2bd 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2018 Intel Corporation
*/
@@ -321,7 +320,7 @@ static int live_unlite_switch(void *arg)
static int live_unlite_preempt(void *arg)
{
- return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
+ return live_unlite_restore(arg, I915_PRIORITY_MAX);
}
static int live_unlite_ring(void *arg)
@@ -609,7 +608,7 @@ static int live_hold_reset(void *arg)
}
tasklet_disable(&engine->execlists.tasklet);
- engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ engine->execlists.tasklet.callback(&engine->execlists.tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
i915_request_get(rq);
@@ -989,7 +988,7 @@ static int live_timeslice_preempt(void *arg)
goto err_obj;
}
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_obj;
@@ -1081,7 +1080,6 @@ create_rewinder(struct intel_context *ce,
intel_ring_advance(rq, cs);
- rq->sched.attr.priority = I915_PRIORITY_MASK;
err = 0;
err:
i915_request_get(rq);
@@ -1297,7 +1295,7 @@ static int live_timeslice_queue(void *arg)
goto err_obj;
}
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_obj;
@@ -1312,9 +1310,7 @@ static int live_timeslice_queue(void *arg)
goto err_pin;
for_each_engine(engine, gt, id) {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
- };
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
struct i915_request *rq, *nop;
if (!intel_engine_has_preemption(engine))
@@ -1529,14 +1525,12 @@ static int live_busywait_preempt(void *arg)
ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
return -ENOMEM;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+ ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+ ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
@@ -1544,7 +1538,7 @@ static int live_busywait_preempt(void *arg)
goto err_ctx_lo;
}
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto err_obj;
@@ -1733,14 +1727,12 @@ static int live_preempt(void *arg)
ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+ ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+ ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
for_each_engine(engine, gt, id) {
struct igt_live_test t;
@@ -1833,7 +1825,7 @@ static int live_late_preempt(void *arg)
goto err_ctx_hi;
/* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
- ctx_lo->sched.priority = I915_USER_PRIORITY(1);
+ ctx_lo->sched.priority = 1;
for_each_engine(engine, gt, id) {
struct igt_live_test t;
@@ -1874,7 +1866,7 @@ static int live_late_preempt(void *arg)
goto err_wedged;
}
- attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+ attr.priority = I915_PRIORITY_MAX;
engine->schedule(rq, &attr);
if (!igt_wait_for_spinner(&spin_hi, rq)) {
@@ -1955,7 +1947,7 @@ static int live_nopreempt(void *arg)
return -ENOMEM;
if (preempt_client_init(gt, &b))
goto err_client_a;
- b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+ b.ctx->sched.priority = I915_PRIORITY_MAX;
for_each_engine(engine, gt, id) {
struct i915_request *rq_a, *rq_b;
@@ -2420,11 +2412,9 @@ err_wedged:
static int live_suppress_self_preempt(void *arg)
{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
- };
struct preempt_client a, b;
enum intel_engine_id id;
int err = -ENOMEM;
@@ -2555,9 +2545,7 @@ static int live_chain_preempt(void *arg)
goto err_client_hi;
for_each_engine(engine, gt, id) {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
- };
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
struct igt_live_test t;
struct i915_request *rq;
int ring_size, count, i;
@@ -2714,7 +2702,7 @@ static int create_gang(struct intel_engine_cs *engine,
if (err)
goto err_obj;
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_obj;
@@ -2876,7 +2864,7 @@ static int __live_preempt_ring(struct intel_engine_cs *engine,
err = wait_for_submit(engine, rq, HZ / 2);
i915_request_put(rq);
if (err) {
- pr_err("%s: preemption request was not submited\n",
+ pr_err("%s: preemption request was not submitted\n",
engine->name);
err = -ETIME;
}
@@ -2976,9 +2964,7 @@ static int live_preempt_gang(void *arg)
return -EIO;
do {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(prio++),
- };
+ struct i915_sched_attr attr = { .priority = prio++ };
err = create_gang(engine, &rq);
if (err)
@@ -2997,7 +2983,7 @@ static int live_preempt_gang(void *arg)
* it will terminate the next lowest spinner until there
* are no more spinners and the gang is complete.
*/
- cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(rq->batch->obj, I915_MAP_WC);
if (!IS_ERR(cs)) {
*cs = 0;
i915_gem_object_unpin_map(rq->batch->obj);
@@ -3014,7 +3000,7 @@ static int live_preempt_gang(void *arg)
drm_info_printer(engine->i915->drm.dev);
pr_err("Failed to flush chain of %d requests, at %d\n",
- prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
+ prio, rq_prio(rq));
intel_engine_dump(engine, &p,
"%s\n", engine->name);
@@ -3062,7 +3048,7 @@ create_gpr_user(struct intel_engine_cs *engine,
return ERR_PTR(err);
}
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cs)) {
i915_vma_put(vma);
return ERR_CAST(cs);
@@ -3269,7 +3255,7 @@ static int live_preempt_user(void *arg)
if (IS_ERR(global))
return PTR_ERR(global);
- result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
+ result = i915_gem_object_pin_map_unlocked(global->obj, I915_MAP_WC);
if (IS_ERR(result)) {
i915_vma_unpin_and_release(&global, 0);
return PTR_ERR(result);
@@ -3384,14 +3370,12 @@ static int live_preempt_timeout(void *arg)
ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+ ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+ ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
for_each_engine(engine, gt, id) {
unsigned long saved_timeout;
@@ -3658,7 +3642,7 @@ static int live_preempt_smoke(void *arg)
goto err_free;
}
- cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+ cs = i915_gem_object_pin_map_unlocked(smoke.batch, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_batch;
@@ -4197,8 +4181,9 @@ static int preserved_virtual_engine(struct intel_gt *gt,
int err = 0;
u32 *cs;
- scratch = __vm_create_scratch_for_read(&siblings[0]->gt->ggtt->vm,
- PAGE_SIZE);
+ scratch =
+ __vm_create_scratch_for_read_pinned(&siblings[0]->gt->ggtt->vm,
+ PAGE_SIZE);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -4262,7 +4247,7 @@ static int preserved_virtual_engine(struct intel_gt *gt,
goto out_end;
}
- cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto out_end;
@@ -4610,7 +4595,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
}
tasklet_disable(&engine->execlists.tasklet);
- engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ engine->execlists.tasklet.callback(&engine->execlists.tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
/* Fake a preemption event; failed of course */
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 5d911f724ebe..c0845bf72dd3 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -1,7 +1,5 @@
-
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 463bb6a700c8..746985971c3a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#include <linux/kthread.h>
@@ -80,15 +61,15 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
}
i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
- vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(h->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_obj;
}
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
- vaddr = i915_gem_object_pin_map(h->obj,
- i915_coherent_map_type(gt->i915));
+ vaddr = i915_gem_object_pin_map_unlocked(h->obj,
+ i915_coherent_map_type(gt->i915));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
@@ -149,7 +130,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
return ERR_CAST(obj);
}
- vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915));
+ vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
i915_vm_put(vm);
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
index a912159693fd..94006f117bbd 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.h b/drivers/gpu/drm/i915/gt/selftest_llc.h
index 873f896e72f2..88ee9480022a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.h
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 920979a89413..85e7df6a5123 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -27,7 +27,7 @@
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
- return __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
+ return __vm_create_scratch_for_read_pinned(&gt->ggtt->vm, PAGE_SIZE);
}
static bool is_active(struct i915_request *rq)
@@ -627,7 +627,7 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine,
goto err_rq;
}
- cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_rq;
@@ -733,7 +733,6 @@ create_timestamp(struct intel_context *ce, void *slot, int idx)
intel_ring_advance(rq, cs);
- rq->sched.attr.priority = I915_PRIORITY_MASK;
err = 0;
err:
i915_request_get(rq);
@@ -921,7 +920,7 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
if (IS_ERR(batch))
return batch;
- cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
if (IS_ERR(cs)) {
i915_vma_put(batch);
return ERR_CAST(cs);
@@ -1085,7 +1084,7 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
if (IS_ERR(batch))
return batch;
- cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
if (IS_ERR(cs)) {
i915_vma_put(batch);
return ERR_CAST(cs);
@@ -1199,29 +1198,29 @@ static int compare_isolation(struct intel_engine_cs *engine,
u32 *defaults;
int err = 0;
- A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
+ A[0] = i915_gem_object_pin_map_unlocked(ref[0]->obj, I915_MAP_WC);
if (IS_ERR(A[0]))
return PTR_ERR(A[0]);
- A[1] = i915_gem_object_pin_map(ref[1]->obj, I915_MAP_WC);
+ A[1] = i915_gem_object_pin_map_unlocked(ref[1]->obj, I915_MAP_WC);
if (IS_ERR(A[1])) {
err = PTR_ERR(A[1]);
goto err_A0;
}
- B[0] = i915_gem_object_pin_map(result[0]->obj, I915_MAP_WC);
+ B[0] = i915_gem_object_pin_map_unlocked(result[0]->obj, I915_MAP_WC);
if (IS_ERR(B[0])) {
err = PTR_ERR(B[0]);
goto err_A1;
}
- B[1] = i915_gem_object_pin_map(result[1]->obj, I915_MAP_WC);
+ B[1] = i915_gem_object_pin_map_unlocked(result[1]->obj, I915_MAP_WC);
if (IS_ERR(B[1])) {
err = PTR_ERR(B[1]);
goto err_B0;
}
- lrc = i915_gem_object_pin_map(ce->state->obj,
+ lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
i915_coherent_map_type(engine->i915));
if (IS_ERR(lrc)) {
err = PTR_ERR(lrc);
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index cf373c72359e..e55a887d11e2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
@@ -13,8 +12,9 @@
#include "selftests/igt_spinner.h"
struct live_mocs {
- struct drm_i915_mocs_table mocs;
- struct drm_i915_mocs_table l3cc;
+ struct drm_i915_mocs_table table;
+ struct drm_i915_mocs_table *mocs;
+ struct drm_i915_mocs_table *l3cc;
struct i915_vma *scratch;
void *vaddr;
};
@@ -59,27 +59,27 @@ static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
{
- struct drm_i915_mocs_table table;
unsigned int flags;
int err;
memset(arg, 0, sizeof(*arg));
- flags = get_mocs_settings(gt->i915, &table);
+ flags = get_mocs_settings(gt->i915, &arg->table);
if (!flags)
return -EINVAL;
if (flags & HAS_RENDER_L3CC)
- arg->l3cc = table;
+ arg->l3cc = &arg->table;
if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
- arg->mocs = table;
+ arg->mocs = &arg->table;
- arg->scratch = __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
+ arg->scratch =
+ __vm_create_scratch_for_read_pinned(&gt->ggtt->vm, PAGE_SIZE);
if (IS_ERR(arg->scratch))
return PTR_ERR(arg->scratch);
- arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB);
+ arg->vaddr = i915_gem_object_pin_map_unlocked(arg->scratch->obj, I915_MAP_WB);
if (IS_ERR(arg->vaddr)) {
err = PTR_ERR(arg->vaddr);
goto err_scratch;
@@ -131,6 +131,9 @@ static int read_mocs_table(struct i915_request *rq,
{
u32 addr;
+ if (!table)
+ return 0;
+
if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
addr = global_mocs_offset();
else
@@ -145,6 +148,9 @@ static int read_l3cc_table(struct i915_request *rq,
{
u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
+ if (!table)
+ return 0;
+
return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
}
@@ -155,6 +161,9 @@ static int check_mocs_table(struct intel_engine_cs *engine,
unsigned int i;
u32 expect;
+ if (!table)
+ return 0;
+
for_each_mocs(expect, table, i) {
if (**vaddr != expect) {
pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
@@ -186,6 +195,9 @@ static int check_l3cc_table(struct intel_engine_cs *engine,
unsigned int i;
u32 expect;
+ if (!table)
+ return 0;
+
for_each_l3cc(expect, table, i) {
if (!mcr_range(engine->i915, reg) && **vaddr != expect) {
pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
@@ -223,9 +235,9 @@ static int check_mocs_engine(struct live_mocs *arg,
/* Read the mocs tables back using SRM */
offset = i915_ggtt_offset(vma);
if (!err)
- err = read_mocs_table(rq, &arg->mocs, &offset);
+ err = read_mocs_table(rq, arg->mocs, &offset);
if (!err && ce->engine->class == RENDER_CLASS)
- err = read_l3cc_table(rq, &arg->l3cc, &offset);
+ err = read_l3cc_table(rq, arg->l3cc, &offset);
offset -= i915_ggtt_offset(vma);
GEM_BUG_ON(offset > PAGE_SIZE);
@@ -236,9 +248,9 @@ static int check_mocs_engine(struct live_mocs *arg,
/* Compare the results against the expected tables */
vaddr = arg->vaddr;
if (!err)
- err = check_mocs_table(ce->engine, &arg->mocs, &vaddr);
+ err = check_mocs_table(ce->engine, arg->mocs, &vaddr);
if (!err && ce->engine->class == RENDER_CLASS)
- err = check_l3cc_table(ce->engine, &arg->l3cc, &vaddr);
+ err = check_l3cc_table(ce->engine, arg->l3cc, &vaddr);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 61abc0556601..f097e420ac45 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.h b/drivers/gpu/drm/i915/gt/selftest_rc6.h
index 762fd442d7b2..daf0927909bc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.h
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
index 3350e7c995bc..99609271c3a7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -35,7 +35,7 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine)
return ERR_PTR(err);
}
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cs)) {
i915_gem_object_put(obj);
return ERR_CAST(cs);
@@ -212,7 +212,7 @@ static int __live_ctx_switch_wa(struct intel_engine_cs *engine)
if (IS_ERR(bb))
return PTR_ERR(bb);
- result = i915_gem_object_pin_map(bb->obj, I915_MAP_WC);
+ result = i915_gem_object_pin_map_unlocked(bb->obj, I915_MAP_WC);
if (IS_ERR(result)) {
intel_context_put(bb->private);
i915_vma_unpin_and_release(&bb, 0);
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 6f3a3687ef0f..9adbd9d147be 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2017-2018 Intel Corporation
*/
@@ -35,10 +34,31 @@ static unsigned long hwsp_cacheline(struct intel_timeline *tl)
{
unsigned long address = (unsigned long)page_address(hwsp_page(tl));
- return (address + tl->hwsp_offset) / CACHELINE_BYTES;
+ return (address + offset_in_page(tl->hwsp_offset)) / TIMELINE_SEQNO_BYTES;
}
-#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES)
+static int selftest_tl_pin(struct intel_timeline *tl)
+{
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_gem_object_lock(tl->hwsp_ggtt->obj, &ww);
+ if (!err)
+ err = intel_timeline_pin(tl, &ww);
+
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ return err;
+}
+
+/* Only half of seqno's are usable, see __intel_timeline_get_seqno() */
+#define CACHELINES_PER_PAGE (PAGE_SIZE / TIMELINE_SEQNO_BYTES / 2)
struct mock_hwsp_freelist {
struct intel_gt *gt;
@@ -59,6 +79,7 @@ static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
tl = xchg(&state->history[idx], tl);
if (tl) {
radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
+ intel_timeline_unpin(tl);
intel_timeline_put(tl);
}
}
@@ -78,6 +99,12 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
if (IS_ERR(tl))
return PTR_ERR(tl);
+ err = selftest_tl_pin(tl);
+ if (err) {
+ intel_timeline_put(tl);
+ return err;
+ }
+
cacheline = hwsp_cacheline(tl);
err = radix_tree_insert(&state->cachelines, cacheline, tl);
if (err) {
@@ -85,6 +112,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
cacheline);
}
+ intel_timeline_unpin(tl);
intel_timeline_put(tl);
return err;
}
@@ -452,17 +480,24 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
}
static struct i915_request *
-tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
+checked_tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
{
struct i915_request *rq;
int err;
- err = intel_timeline_pin(tl, NULL);
+ err = selftest_tl_pin(tl);
if (err) {
rq = ERR_PTR(err);
goto out;
}
+ if (READ_ONCE(*tl->hwsp_seqno) != tl->seqno) {
+ pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
+ *tl->hwsp_seqno, tl->seqno);
+ intel_timeline_unpin(tl);
+ return ERR_PTR(-EINVAL);
+ }
+
rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
goto out_unpin;
@@ -484,25 +519,6 @@ out:
return rq;
}
-static struct intel_timeline *
-checked_intel_timeline_create(struct intel_gt *gt)
-{
- struct intel_timeline *tl;
-
- tl = intel_timeline_create(gt);
- if (IS_ERR(tl))
- return tl;
-
- if (READ_ONCE(*tl->hwsp_seqno) != tl->seqno) {
- pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
- *tl->hwsp_seqno, tl->seqno);
- intel_timeline_put(tl);
- return ERR_PTR(-EINVAL);
- }
-
- return tl;
-}
-
static int live_hwsp_engine(void *arg)
{
#define NUM_TIMELINES 4096
@@ -535,13 +551,13 @@ static int live_hwsp_engine(void *arg)
struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_intel_timeline_create(gt);
+ tl = intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
break;
}
- rq = tl_write(tl, engine, count);
+ rq = checked_tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
@@ -608,14 +624,14 @@ static int live_hwsp_alternate(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- tl = checked_intel_timeline_create(gt);
+ tl = intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto out;
}
intel_engine_pm_get(engine);
- rq = tl_write(tl, engine, count);
+ rq = checked_tl_write(tl, engine, count);
intel_engine_pm_put(engine);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
@@ -666,10 +682,10 @@ static int live_hwsp_wrap(void *arg)
if (IS_ERR(tl))
return PTR_ERR(tl);
- if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+ if (!tl->has_initial_breadcrumb)
goto out_free;
- err = intel_timeline_pin(tl, NULL);
+ err = selftest_tl_pin(tl);
if (err)
goto out_free;
@@ -816,13 +832,13 @@ static int setup_watcher(struct hwsp_watcher *w, struct intel_gt *gt)
if (IS_ERR(obj))
return PTR_ERR(obj);
- w->map = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ w->map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(w->map)) {
i915_gem_object_put(obj);
return PTR_ERR(w->map);
}
- vma = i915_gem_object_ggtt_pin_ww(obj, NULL, NULL, 0, 0, 0);
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return PTR_ERR(vma);
@@ -833,12 +849,26 @@ static int setup_watcher(struct hwsp_watcher *w, struct intel_gt *gt)
return 0;
}
+static void switch_tl_lock(struct i915_request *from, struct i915_request *to)
+{
+ /* some light mutex juggling required; think co-routines */
+
+ if (from) {
+ lockdep_unpin_lock(&from->context->timeline->mutex, from->cookie);
+ mutex_unlock(&from->context->timeline->mutex);
+ }
+
+ if (to) {
+ mutex_lock(&to->context->timeline->mutex);
+ to->cookie = lockdep_pin_lock(&to->context->timeline->mutex);
+ }
+}
+
static int create_watcher(struct hwsp_watcher *w,
struct intel_engine_cs *engine,
int ringsz)
{
struct intel_context *ce;
- struct intel_timeline *tl;
ce = intel_context_create(engine);
if (IS_ERR(ce))
@@ -851,11 +881,8 @@ static int create_watcher(struct hwsp_watcher *w,
return PTR_ERR(w->rq);
w->addr = i915_ggtt_offset(w->vma);
- tl = w->rq->context->timeline;
- /* some light mutex juggling required; think co-routines */
- lockdep_unpin_lock(&tl->mutex, w->rq->cookie);
- mutex_unlock(&tl->mutex);
+ switch_tl_lock(w->rq, NULL);
return 0;
}
@@ -864,15 +891,13 @@ static int check_watcher(struct hwsp_watcher *w, const char *name,
bool (*op)(u32 hwsp, u32 seqno))
{
struct i915_request *rq = fetch_and_zero(&w->rq);
- struct intel_timeline *tl = rq->context->timeline;
u32 offset, end;
int err;
GEM_BUG_ON(w->addr - i915_ggtt_offset(w->vma) > w->vma->size);
i915_request_get(rq);
- mutex_lock(&tl->mutex);
- rq->cookie = lockdep_pin_lock(&tl->mutex);
+ switch_tl_lock(NULL, rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ) < 0) {
@@ -901,10 +926,7 @@ out:
static void cleanup_watcher(struct hwsp_watcher *w)
{
if (w->rq) {
- struct intel_timeline *tl = w->rq->context->timeline;
-
- mutex_lock(&tl->mutex);
- w->rq->cookie = lockdep_pin_lock(&tl->mutex);
+ switch_tl_lock(NULL, w->rq);
i915_request_add(w->rq);
}
@@ -942,7 +964,7 @@ static struct i915_request *wrap_timeline(struct i915_request *rq)
}
i915_request_put(rq);
- rq = intel_context_create_request(ce);
+ rq = i915_request_create(ce);
if (IS_ERR(rq))
return rq;
@@ -977,7 +999,7 @@ static int live_hwsp_read(void *arg)
if (IS_ERR(tl))
return PTR_ERR(tl);
- if (!tl->hwsp_cacheline)
+ if (!tl->has_initial_breadcrumb)
goto out_free;
for (i = 0; i < ARRAY_SIZE(watcher); i++) {
@@ -999,7 +1021,7 @@ static int live_hwsp_read(void *arg)
do {
struct i915_sw_fence *submit;
struct i915_request *rq;
- u32 hwsp;
+ u32 hwsp, dummy;
submit = heap_fence_create(GFP_KERNEL);
if (!submit) {
@@ -1017,14 +1039,26 @@ static int live_hwsp_read(void *arg)
goto out;
}
- /* Skip to the end, saving 30 minutes of nops */
- tl->seqno = -10u + 2 * (count & 3);
- WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
ce->timeline = intel_timeline_get(tl);
- rq = intel_context_create_request(ce);
+ /* Ensure timeline is mapped, done during first pin */
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+
+ /*
+ * Start at a new wrap, and set seqno right before another wrap,
+ * saving 30 minutes of nops
+ */
+ tl->seqno = -12u + 2 * (count & 3);
+ __intel_timeline_get_seqno(tl, &dummy);
+
+ rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
+ intel_context_unpin(ce);
intel_context_put(ce);
goto out;
}
@@ -1034,32 +1068,35 @@ static int live_hwsp_read(void *arg)
GFP_KERNEL);
if (err < 0) {
i915_request_add(rq);
+ intel_context_unpin(ce);
intel_context_put(ce);
goto out;
}
- mutex_lock(&watcher[0].rq->context->timeline->mutex);
+ switch_tl_lock(rq, watcher[0].rq);
err = intel_timeline_read_hwsp(rq, watcher[0].rq, &hwsp);
if (err == 0)
err = emit_read_hwsp(watcher[0].rq, /* before */
rq->fence.seqno, hwsp,
&watcher[0].addr);
- mutex_unlock(&watcher[0].rq->context->timeline->mutex);
+ switch_tl_lock(watcher[0].rq, rq);
if (err) {
i915_request_add(rq);
+ intel_context_unpin(ce);
intel_context_put(ce);
goto out;
}
- mutex_lock(&watcher[1].rq->context->timeline->mutex);
+ switch_tl_lock(rq, watcher[1].rq);
err = intel_timeline_read_hwsp(rq, watcher[1].rq, &hwsp);
if (err == 0)
err = emit_read_hwsp(watcher[1].rq, /* after */
rq->fence.seqno, hwsp,
&watcher[1].addr);
- mutex_unlock(&watcher[1].rq->context->timeline->mutex);
+ switch_tl_lock(watcher[1].rq, rq);
if (err) {
i915_request_add(rq);
+ intel_context_unpin(ce);
intel_context_put(ce);
goto out;
}
@@ -1068,6 +1105,7 @@ static int live_hwsp_read(void *arg)
i915_request_add(rq);
rq = wrap_timeline(rq);
+ intel_context_unpin(ce);
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
@@ -1107,8 +1145,8 @@ static int live_hwsp_read(void *arg)
3 * watcher[1].rq->ring->size)
break;
- } while (!__igt_timeout(end_time, NULL));
- WRITE_ONCE(*(u32 *)tl->hwsp_seqno, 0xdeadbeef);
+ } while (!__igt_timeout(end_time, NULL) &&
+ count < (PAGE_SIZE / TIMELINE_SEQNO_BYTES - 1) / 2);
pr_info("%s: simulated %lu wraps\n", engine->name, count);
err = check_watcher(&watcher[1], "after", cmp_gte);
@@ -1153,9 +1191,7 @@ static int live_hwsp_rollover_kernel(void *arg)
}
GEM_BUG_ON(i915_active_fence_isset(&tl->last_request));
- tl->seqno = 0;
- timeline_rollback(tl);
- timeline_rollback(tl);
+ tl->seqno = -2u;
WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
for (i = 0; i < ARRAY_SIZE(rq); i++) {
@@ -1235,11 +1271,14 @@ static int live_hwsp_rollover_user(void *arg)
goto out;
tl = ce->timeline;
- if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+ if (!tl->has_initial_breadcrumb)
+ goto out;
+
+ err = intel_context_pin(ce);
+ if (err)
goto out;
- timeline_rollback(tl);
- timeline_rollback(tl);
+ tl->seqno = -4u;
WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
for (i = 0; i < ARRAY_SIZE(rq); i++) {
@@ -1248,7 +1287,7 @@ static int live_hwsp_rollover_user(void *arg)
this = intel_context_create_request(ce);
if (IS_ERR(this)) {
err = PTR_ERR(this);
- goto out;
+ goto out_unpin;
}
pr_debug("%s: create fence.seqnp:%d\n",
@@ -1267,17 +1306,18 @@ static int live_hwsp_rollover_user(void *arg)
if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
pr_err("Wait for timeline wrap timed out!\n");
err = -EIO;
- goto out;
+ goto out_unpin;
}
for (i = 0; i < ARRAY_SIZE(rq); i++) {
if (!i915_request_completed(rq[i])) {
pr_err("Pre-wrap request not completed!\n");
err = -EINVAL;
- goto out;
+ goto out_unpin;
}
}
-
+out_unpin:
+ intel_context_unpin(ce);
out:
for (i = 0; i < ARRAY_SIZE(rq); i++)
i915_request_put(rq[i]);
@@ -1319,13 +1359,13 @@ static int live_hwsp_recycle(void *arg)
struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_intel_timeline_create(gt);
+ tl = intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
break;
}
- rq = tl_write(tl, engine, count);
+ rq = checked_tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 2070b91cb607..19850489a3fc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2018 Intel Corporation
*/
@@ -112,7 +111,7 @@ read_nonprivs(struct intel_context *ce)
i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
- cs = i915_gem_object_pin_map(result, I915_MAP_WB);
+ cs = i915_gem_object_pin_map_unlocked(result, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_obj;
@@ -218,7 +217,7 @@ static int check_whitelist(struct intel_context *ce)
i915_gem_object_lock(results, NULL);
intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
- i915_gem_object_unlock(results);
+
if (intel_gt_is_wedged(engine->gt))
err = -EIO;
if (err)
@@ -246,6 +245,7 @@ static int check_whitelist(struct intel_context *ce)
i915_gem_object_unpin_map(results);
out_put:
+ i915_gem_object_unlock(results);
i915_gem_object_put(results);
return err;
}
@@ -490,7 +490,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
u32 *cs, *results;
sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
- scratch = __vm_create_scratch_for_read(ce->vm, sz);
+ scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -502,6 +502,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
for (i = 0; i < engine->whitelist.count; i++) {
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ struct i915_gem_ww_ctx ww;
u64 addr = scratch->node.start;
struct i915_request *rq;
u32 srm, lrm, rsvd;
@@ -517,6 +518,29 @@ static int check_dirty_whitelist(struct intel_context *ce)
ro_reg = ro_register(reg);
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ cs = NULL;
+ err = i915_gem_object_lock(scratch->obj, &ww);
+ if (!err)
+ err = i915_gem_object_lock(batch->obj, &ww);
+ if (!err)
+ err = intel_context_pin_ww(ce, &ww);
+ if (err)
+ goto out;
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_ctx;
+ }
+
+ results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(results)) {
+ err = PTR_ERR(results);
+ goto out_unmap_batch;
+ }
+
/* Clear non priv flags */
reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
@@ -528,12 +552,6 @@ static int check_dirty_whitelist(struct intel_context *ce)
pr_debug("%s: Writing garbage to %x\n",
engine->name, reg);
- cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto out_batch;
- }
-
/* SRM original */
*cs++ = srm;
*cs++ = reg;
@@ -580,11 +598,12 @@ static int check_dirty_whitelist(struct intel_context *ce)
i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj);
intel_gt_chipset_flush(engine->gt);
+ cs = NULL;
- rq = intel_context_create_request(ce);
+ rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_batch;
+ goto out_unmap_scratch;
}
if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
@@ -593,20 +612,16 @@ static int check_dirty_whitelist(struct intel_context *ce)
goto err_request;
}
- i915_vma_lock(batch);
err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
if (err)
goto err_request;
- i915_vma_lock(scratch);
err = i915_request_await_object(rq, scratch->obj, true);
if (err == 0)
err = i915_vma_move_to_active(scratch, rq,
EXEC_OBJECT_WRITE);
- i915_vma_unlock(scratch);
if (err)
goto err_request;
@@ -622,13 +637,7 @@ err_request:
pr_err("%s: Futzing %x timedout; cancelling test\n",
engine->name, reg);
intel_gt_set_wedged(engine->gt);
- goto out_batch;
- }
-
- results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
- if (IS_ERR(results)) {
- err = PTR_ERR(results);
- goto out_batch;
+ goto out_unmap_scratch;
}
GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
@@ -639,7 +648,7 @@ err_request:
pr_err("%s: Unable to write to whitelisted register %x\n",
engine->name, reg);
err = -EINVAL;
- goto out_unpin;
+ goto out_unmap_scratch;
}
} else {
rsvd = 0;
@@ -705,15 +714,27 @@ err_request:
err = -EINVAL;
}
-out_unpin:
+out_unmap_scratch:
i915_gem_object_unpin_map(scratch->obj);
+out_unmap_batch:
+ if (cs)
+ i915_gem_object_unpin_map(batch->obj);
+out_ctx:
+ intel_context_unpin(ce);
+out:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
if (err)
break;
}
if (igt_flush_test(engine->i915))
err = -EIO;
-out_batch:
+
i915_vma_unpin_and_release(&batch, 0);
out_scratch:
i915_vma_unpin_and_release(&scratch, 0);
@@ -847,7 +868,7 @@ static int scrub_whitelisted_registers(struct intel_context *ce)
if (IS_ERR(batch))
return PTR_ERR(batch);
- cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_batch;
@@ -982,11 +1003,11 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
u32 *a, *b;
int i, err;
- a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+ a = i915_gem_object_pin_map_unlocked(A->obj, I915_MAP_WB);
if (IS_ERR(a))
return PTR_ERR(a);
- b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+ b = i915_gem_object_pin_map_unlocked(B->obj, I915_MAP_WB);
if (IS_ERR(b)) {
err = PTR_ERR(b);
goto err_a;
@@ -1030,14 +1051,14 @@ static int live_isolated_whitelist(void *arg)
for (i = 0; i < ARRAY_SIZE(client); i++) {
client[i].scratch[0] =
- __vm_create_scratch_for_read(gt->vm, 4096);
+ __vm_create_scratch_for_read_pinned(gt->vm, 4096);
if (IS_ERR(client[i].scratch[0])) {
err = PTR_ERR(client[i].scratch[0]);
goto err;
}
client[i].scratch[1] =
- __vm_create_scratch_for_read(gt->vm, 4096);
+ __vm_create_scratch_for_read_pinned(gt->vm, 4096);
if (IS_ERR(client[i].scratch[1])) {
err = PTR_ERR(client[i].scratch[1]);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
@@ -1220,7 +1241,11 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- intel_engine_reset(engine, "live_workarounds:idle");
+ ret = intel_engine_reset(engine, "live_workarounds:idle");
+ if (ret) {
+ pr_err("%s: Reset failed while idle\n", engine->name);
+ goto err;
+ }
ok = verify_wa_lists(gt, &lists, "after idle reset");
if (!ok) {
@@ -1241,12 +1266,18 @@ live_engine_reset_workarounds(void *arg)
ret = request_add_spin(rq, &spin);
if (ret) {
- pr_err("Spinner failed to start\n");
+ pr_err("%s: Spinner failed to start\n", engine->name);
igt_spinner_fini(&spin);
goto err;
}
- intel_engine_reset(engine, "live_workarounds:active");
+ ret = intel_engine_reset(engine, "live_workarounds:active");
+ if (ret) {
+ pr_err("%s: Reset failed on an active spinner\n",
+ engine->name);
+ igt_spinner_fini(&spin);
+ goto err;
+ }
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index a4d8fc9e2374..f8f02aab842b 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -39,7 +39,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
return file;
}
- ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 4545e90e3bf1..78305b2ec89d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -682,7 +682,7 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
if (IS_ERR(vma))
return PTR_ERR(vma);
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index c92f2c056db4..c36d5eb5bbb9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -335,7 +335,7 @@ static int guc_log_map(struct intel_guc_log *log)
* buffer pages, so that we can directly get the data
* (up-to-date) from memory.
*/
- vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
+ vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -744,7 +744,7 @@ int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
if (!obj)
return 0;
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(map)) {
DRM_DEBUG("Failed to pin object\n");
drm_puts(p, "(log data unaccessible)\n");
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 23dc0aeaa0ab..92688a9b6717 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -206,9 +206,8 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
- int i;
- priolist_for_each_request_consume(rq, rn, p, i) {
+ priolist_for_each_request_consume(rq, rn, p) {
if (last && rq->context != last->context) {
if (port == last_port)
goto done;
@@ -238,9 +237,10 @@ done:
execlists->active = execlists->inflight;
}
-static void guc_submission_tasklet(unsigned long data)
+static void guc_submission_tasklet(struct tasklet_struct *t)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct intel_engine_cs * const engine =
+ from_tasklet(engine, t, execlists.tasklet);
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port, *rq;
unsigned long flags;
@@ -361,9 +361,8 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
/* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
- int i;
- priolist_for_each_request_consume(rq, rn, p, i) {
+ priolist_for_each_request_consume(rq, rn, p) {
list_del_init(&rq->sched.link);
__i915_request_submit(rq);
dma_fence_set_error(&rq->fence, -EIO);
@@ -610,7 +609,7 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = guc_submit_request;
engine->schedule = i915_schedule;
- engine->execlists.tasklet.func = guc_submission_tasklet;
+ engine->execlists.tasklet.callback = guc_submission_tasklet;
engine->reset.prepare = guc_reset_prepare;
engine->reset.rewind = guc_reset_rewind;
@@ -702,8 +701,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
*/
GEM_BUG_ON(INTEL_GEN(i915) < 11);
- tasklet_init(&engine->execlists.tasklet,
- guc_submission_tasklet, (unsigned long)engine);
+ tasklet_setup(&engine->execlists.tasklet, guc_submission_tasklet);
guc_default_vfuncs(engine);
guc_default_irqs(engine);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 65eeb44b397d..2126dd81ac38 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -82,7 +82,7 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
if (IS_ERR(vma))
return PTR_ERR(vma);
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 67b06fde1225..df647c9a8d56 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -44,9 +44,11 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* List of required GuC and HuC binaries per-platform.
* Must be ordered based on platform + revid, from newer to older.
*
- * Note that RKL uses the same firmware as TGL.
+ * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
+ * firmware as TGL.
*/
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
+ fw_def(ALDERLAKE_S, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \
fw_def(ROCKETLAKE, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \
fw_def(TIGERLAKE, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \
fw_def(JASPERLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \
@@ -537,7 +539,7 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
if (!intel_uc_fw_is_available(uc_fw))
return -ENOEXEC;
- err = i915_gem_object_pin_pages(uc_fw->obj);
+ err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
if (err) {
DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
intel_uc_fw_type_repr(uc_fw->type), err);