aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gvt
diff options
context:
space:
mode:
authorZhenyu Wang <zhenyuw@linux.intel.com>2020-10-30 11:48:17 +0800
committerZhenyu Wang <zhenyuw@linux.intel.com>2020-10-30 11:48:17 +0800
commit4a95857a875e887cc958c92fe9d2cde6184d2ec0 (patch)
tree8d67b681d582ba7ee11a359be7af82ed77bd6620 /drivers/gpu/drm/i915/gvt
parentdrm/i915/gvt: Only pin/unpin intel_context along with workload (diff)
parentdrm/i915: Reject 90/270 degree rotated initial fbs (diff)
downloadlinux-dev-4a95857a875e887cc958c92fe9d2cde6184d2ec0.tar.xz
linux-dev-4a95857a875e887cc958c92fe9d2cde6184d2ec0.zip
Merge tag 'drm-intel-fixes-2020-10-29' into gvt-fixes
Backmerge for 5.10-rc1 to apply one extra APL fix. Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h44
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c32
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c17
6 files changed, 64 insertions, 43 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index f1940939260a..16b582cb97ed 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -936,7 +936,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
return -EFAULT;
}
- if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
+ if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
gvt_vgpu_err("%s access to non-render register (%x)\n",
cmd, offset);
return -EBADRQC;
@@ -976,7 +976,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
* inhibit context will restore with correct values
*/
if (IS_GEN(s->engine->i915, 9) &&
- intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+ intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
@@ -992,8 +992,6 @@ static int cmd_reg_handler(struct parser_exec_state *s,
}
}
- /* TODO: Update the global mask if this MMIO is a masked-MMIO */
- intel_gvt_mmio_set_cmd_accessed(gvt, offset);
return 0;
}
@@ -1923,6 +1921,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (ret)
goto err_unmap;
+ i915_gem_object_unlock(bb->obj);
INIT_LIST_HEAD(&bb->list);
list_add(&bb->list, &s->workload->shadow_bb);
@@ -2982,7 +2981,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
goto put_obj;
}
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
ret = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (ret) {
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index ff7f2515a6fe..9831361f181e 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -256,11 +256,11 @@ struct intel_gvt_mmio {
/* This reg has been accessed by a VM */
#define F_ACCESSED (1 << 4)
/* This reg has been accessed through GPU commands */
-#define F_CMD_ACCESSED (1 << 5)
-/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
-/* This reg is saved/restored in context */
-#define F_IN_CTX (1 << 7)
+/* This reg is in GVT's mmio save-restor list and in hardware
+ * logical context image
+ */
+#define F_SR_IN_CTX (1 << 7)
struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
@@ -597,39 +597,42 @@ static inline void intel_gvt_mmio_set_accessed(
}
/**
- * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
+ * intel_gvt_mmio_is_cmd_accessible - if a MMIO could be accessed by command
* @gvt: a GVT device
* @offset: register offset
*
+ * Returns:
+ * True if an MMIO is able to be accessed by GPU commands
*/
-static inline bool intel_gvt_mmio_is_cmd_access(
+static inline bool intel_gvt_mmio_is_cmd_accessible(
struct intel_gvt *gvt, unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
}
/**
- * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
+ * intel_gvt_mmio_set_cmd_accessible -
+ * mark a MMIO could be accessible by command
* @gvt: a GVT device
* @offset: register offset
*
*/
-static inline bool intel_gvt_mmio_is_unalign(
+static inline void intel_gvt_mmio_set_cmd_accessible(
struct intel_gvt *gvt, unsigned int offset)
{
- return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
+ gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS;
}
/**
- * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
+ * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
* @gvt: a GVT device
* @offset: register offset
*
*/
-static inline void intel_gvt_mmio_set_cmd_accessed(
+static inline bool intel_gvt_mmio_is_unalign(
struct intel_gvt *gvt, unsigned int offset)
{
- gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
+ return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
}
/**
@@ -648,30 +651,33 @@ static inline bool intel_gvt_mmio_has_mode_mask(
}
/**
- * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
+ * intel_gvt_mmio_is_sr_in_ctx -
+ * check if an MMIO has F_SR_IN_CTX mask
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
- * True if a MMIO has a in-context mask, false if it isn't.
+ * True if an MMIO has an F_SR_IN_CTX mask, false if it isn't.
*
*/
-static inline bool intel_gvt_mmio_is_in_ctx(
+static inline bool intel_gvt_mmio_is_sr_in_ctx(
struct intel_gvt *gvt, unsigned int offset)
{
- return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
+ return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX;
}
/**
- * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
+ * intel_gvt_mmio_set_sr_in_ctx -
+ * mask an MMIO in GVT's mmio save-restore list and also
+ * in hardware logical context image
* @gvt: a GVT device
* @offset: register offset
*
*/
-static inline void intel_gvt_mmio_set_in_ctx(
+static inline void intel_gvt_mmio_set_sr_in_ctx(
struct intel_gvt *gvt, unsigned int offset)
{
- gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
+ gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX;
}
void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index beafc5e435b4..6a026539c873 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1921,7 +1921,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
- MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
+ MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
intel_vgpu_reg_imr_handler);
MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
@@ -1929,7 +1929,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(SDEISR, D_ALL);
- MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL);
+
MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
gamw_echo_dev_rw_ia_write);
@@ -1956,11 +1957,11 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);
- MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL);
- MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
+ MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL);
+ MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL);
+ MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL);
+ MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL);
+ MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
/* RING MODE */
#define RING_REG(base) _MMIO((base) + 0x29c)
@@ -2715,7 +2716,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
+ MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL);
MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2800,7 +2801,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
intel_vgpu_reg_master_irq_handler);
- MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
+ MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0,
mmio_read_from_hw, NULL);
#define RING_REG(base) _MMIO((base) + 0xd0)
@@ -2814,7 +2815,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
#undef RING_REG
#define RING_REG(base) _MMIO((base) + 0x234)
- MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
+ MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS,
NULL, NULL);
#undef RING_REG
@@ -2849,7 +2850,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
- MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
+ MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2950,7 +2951,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(MMCD_MISC_CTRL, D_SKL_PLUS, NULL, NULL);
+ MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
MMIO_D(DC_STATE_EN, D_SKL_PLUS);
MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
@@ -3166,7 +3167,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL);
+ MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
return 0;
@@ -3388,7 +3389,10 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
gvt->mmio.mmio_attribute = NULL;
}
-/* Special MMIO blocks. */
+/* Special MMIO blocks. registers in MMIO block ranges should not be command
+ * accessible (should have no F_CMD_ACCESS flag).
+ * otherwise, need to update cmd_reg_handler in cmd_parser.c
+ */
static struct gvt_mmio_block mmio_blocks[] = {
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 291993615af9..b6811f6a230d 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -251,6 +251,9 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
+ /* uc reset hw expect GS_MIA_IN_RESET */
+ vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
+
if (IS_BROXTON(vgpu->gvt->gt->i915)) {
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
~(BIT(0) | BIT(1));
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 86a60bdf0818..afe574d6b3b5 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -595,7 +595,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->in_context) {
gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++;
- intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
+ intel_gvt_mmio_set_sr_in_ctx(gvt, mmio->reg.reg);
}
}
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 68b2d10108fd..aed2ef6466a2 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -403,6 +403,14 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx->indirect_ctx.shadow_va = NULL;
}
+static void set_dma_address(struct i915_page_directory *pd, dma_addr_t addr)
+{
+ struct scatterlist *sg = pd->pt.base->mm.pages->sgl;
+
+ /* This is not a good idea */
+ sg->dma_address = addr;
+}
+
static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
struct intel_context *ce)
{
@@ -411,7 +419,7 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
int i = 0;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
- px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
+ set_dma_address(ppgtt->pd, mm->ppgtt_mm.shadow_pdps[0]);
} else {
for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
struct i915_page_directory * const pd =
@@ -421,7 +429,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
shadow ppgtt. */
if (!pd)
break;
- px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
+
+ set_dma_address(pd, mm->ppgtt_mm.shadow_pdps[i]);
}
}
}
@@ -1240,13 +1249,13 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s,
int i;
if (i915_vm_is_4lvl(&ppgtt->vm)) {
- px_dma(ppgtt->pd) = s->i915_context_pml4;
+ set_dma_address(ppgtt->pd, s->i915_context_pml4);
} else {
for (i = 0; i < GEN8_3LVL_PDPES; i++) {
struct i915_page_directory * const pd =
i915_pd_entry(ppgtt->pd, i);
- px_dma(pd) = s->i915_context_pdps[i];
+ set_dma_address(pd, s->i915_context_pdps[i]);
}
}
}