aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gvt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c74
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h17
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c189
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c247
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h34
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c39
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c6
17 files changed, 377 insertions, 289 deletions
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index 271fb46d4dd0..ea8324abc784 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -5,5 +5,5 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
fb_decoder.o dmabuf.o page_track.o
-ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
+ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 3592d04c33b2..ab002cfd3cab 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -391,12 +391,12 @@ struct cmd_info {
#define F_POST_HANDLE (1<<2)
u32 flag;
-#define R_RCS (1 << RCS)
-#define R_VCS1 (1 << VCS)
-#define R_VCS2 (1 << VCS2)
+#define R_RCS BIT(RCS0)
+#define R_VCS1 BIT(VCS0)
+#define R_VCS2 BIT(VCS1)
#define R_VCS (R_VCS1 | R_VCS2)
-#define R_BCS (1 << BCS)
-#define R_VECS (1 << VECS)
+#define R_BCS BIT(BCS0)
+#define R_VECS BIT(VECS0)
#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
/* rings that support this cmd: BLT/RCS/VCS/VECS */
u16 rings;
@@ -558,7 +558,7 @@ static const struct decode_info decode_info_vebox = {
};
static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
- [RCS] = {
+ [RCS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -569,7 +569,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [VCS] = {
+ [VCS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -580,7 +580,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [BCS] = {
+ [BCS0] = {
&decode_info_mi,
NULL,
&decode_info_2d,
@@ -591,7 +591,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [VECS] = {
+ [VECS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -602,7 +602,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [VCS2] = {
+ [VCS1] = {
&decode_info_mi,
NULL,
NULL,
@@ -631,8 +631,7 @@ static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
struct cmd_entry *e;
hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
- if ((opcode == e->info->opcode) &&
- (e->info->rings & (1 << ring_id)))
+ if (opcode == e->info->opcode && e->info->rings & BIT(ring_id))
return e->info;
}
return NULL;
@@ -943,15 +942,12 @@ static int cmd_handler_lri(struct parser_exec_state *s)
struct intel_gvt *gvt = s->vgpu->gvt;
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(gvt->dev_priv) &&
- (s->ring_id != RCS)) {
- if (s->ring_id == BCS &&
- cmd_reg(s, i) ==
- i915_mmio_reg_offset(DERRMR))
+ if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
+ if (s->ring_id == BCS0 &&
+ cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
- ret |= (cmd_reg_inhibit(s, i)) ?
- -EBADRQC : 0;
+ ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0;
}
if (ret)
break;
@@ -1047,27 +1043,27 @@ struct cmd_interrupt_event {
};
static struct cmd_interrupt_event cmd_interrupt_events[] = {
- [RCS] = {
+ [RCS0] = {
.pipe_control_notify = RCS_PIPE_CONTROL,
.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
.mi_user_interrupt = RCS_MI_USER_INTERRUPT,
},
- [BCS] = {
+ [BCS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = BCS_MI_FLUSH_DW,
.mi_user_interrupt = BCS_MI_USER_INTERRUPT,
},
- [VCS] = {
+ [VCS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS_MI_FLUSH_DW,
.mi_user_interrupt = VCS_MI_USER_INTERRUPT,
},
- [VCS2] = {
+ [VCS1] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS2_MI_FLUSH_DW,
.mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
},
- [VECS] = {
+ [VECS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VECS_MI_FLUSH_DW,
.mi_user_interrupt = VECS_MI_USER_INTERRUPT,
@@ -1081,6 +1077,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
bool index_mode = false;
unsigned int post_sync;
int ret = 0;
+ u32 hws_pga, val;
post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
@@ -1104,6 +1101,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
index_mode = true;
ret |= cmd_address_audit(s, gma, sizeof(u64),
index_mode);
+ if (ret)
+ return ret;
+ if (index_mode) {
+ hws_pga = s->vgpu->hws_pga[s->ring_id];
+ gma = hws_pga + gma;
+ patch_value(s, cmd_ptr(s, 2), gma);
+ val = cmd_val(s, 1) & (~(1 << 21));
+ patch_value(s, cmd_ptr(s, 1), val);
+ }
}
}
}
@@ -1321,8 +1327,14 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
info->tile_val << 10);
}
- vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
- intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ if (info->plane == PLANE_PRIMARY)
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(info->pipe))++;
+
+ if (info->async_flip)
+ intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ else
+ set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]);
+
return 0;
}
@@ -1567,6 +1579,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
unsigned long gma;
bool index_mode = false;
int ret = 0;
+ u32 hws_pga, val;
/* Check post-sync and ppgtt bit */
if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
@@ -1577,6 +1590,15 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
if (cmd_val(s, 0) & (1 << 21))
index_mode = true;
ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
+ if (ret)
+ return ret;
+ if (index_mode) {
+ hws_pga = s->vgpu->hws_pga[s->ring_id];
+ gma = hws_pga + gma;
+ patch_value(s, cmd_ptr(s, 1), gma);
+ val = cmd_val(s, 0) & (~(1 << 21));
+ patch_value(s, cmd_ptr(s, 0), val);
+ }
}
/* Check notify bit */
if ((cmd_val(s, 0) & (1 << 8)))
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index e3f9caa7839f..e1c313da6c00 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -407,7 +407,6 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
if (!pipe_is_enabled(vgpu, pipe))
continue;
- vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, event);
}
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 69a9a1b2ea4a..4e1e425189ba 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -153,7 +153,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return NULL;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 70494e394d2c..f21b8fb5b37e 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -47,17 +47,16 @@
((a)->lrca == (b)->lrca))
static int context_switch_events[] = {
- [RCS] = RCS_AS_CONTEXT_SWITCH,
- [BCS] = BCS_AS_CONTEXT_SWITCH,
- [VCS] = VCS_AS_CONTEXT_SWITCH,
- [VCS2] = VCS2_AS_CONTEXT_SWITCH,
- [VECS] = VECS_AS_CONTEXT_SWITCH,
+ [RCS0] = RCS_AS_CONTEXT_SWITCH,
+ [BCS0] = BCS_AS_CONTEXT_SWITCH,
+ [VCS0] = VCS_AS_CONTEXT_SWITCH,
+ [VCS1] = VCS2_AS_CONTEXT_SWITCH,
+ [VECS0] = VECS_AS_CONTEXT_SWITCH,
};
-static int ring_id_to_context_switch_event(int ring_id)
+static int ring_id_to_context_switch_event(unsigned int ring_id)
{
- if (WARN_ON(ring_id < RCS ||
- ring_id >= ARRAY_SIZE(context_switch_events)))
+ if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events)))
return -EINVAL;
return context_switch_events[ring_id];
@@ -411,7 +410,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
- if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id)))
+ if (workload->status || (vgpu->resetting_eng & BIT(ring_id)))
goto out;
if (!list_empty(workload_q_head(vgpu, ring_id))) {
@@ -527,12 +526,13 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
-static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
+static void clean_execlist(struct intel_vgpu *vgpu,
+ intel_engine_mask_t engine_mask)
{
- unsigned int tmp;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission;
+ intel_engine_mask_t tmp;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
kfree(s->ring_scan_buffer[engine->id]);
@@ -542,18 +542,18 @@ static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
}
static void reset_execlist(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
}
static int init_execlist(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
reset_execlist(vgpu, engine_mask);
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 714d709829a2..5ccc2c695848 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -180,6 +180,6 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
- unsigned long engine_mask);
+ intel_engine_mask_t engine_mask);
#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 9814773882ec..c2f7d20f6346 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2504,6 +2504,7 @@ static void clean_spt_oos(struct intel_gvt *gvt)
list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
list_del(&oos_page->list);
+ free_page((unsigned long)oos_page->mem);
kfree(oos_page);
}
}
@@ -2524,6 +2525,12 @@ static int setup_spt_oos(struct intel_gvt *gvt)
ret = -ENOMEM;
goto fail;
}
+ oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
+ if (!oos_page->mem) {
+ ret = -ENOMEM;
+ kfree(oos_page);
+ goto fail;
+ }
INIT_LIST_HEAD(&oos_page->list);
INIT_LIST_HEAD(&oos_page->vm_list);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index edb610dc5d86..32c573aea494 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -222,7 +222,7 @@ struct intel_vgpu_oos_page {
struct list_head list;
struct list_head vm_list;
int id;
- unsigned char mem[I915_GTT_PAGE_SIZE];
+ void *mem;
};
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 8bce09de4b82..f5a328b5290a 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -94,7 +94,6 @@ struct intel_vgpu_fence {
struct intel_vgpu_mmio {
void *vreg;
- void *sreg;
};
#define INTEL_GVT_MAX_BAR_NUM 4
@@ -111,11 +110,9 @@ struct intel_vgpu_cfg_space {
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
-#define INTEL_GVT_MAX_PIPE 4
-
struct intel_vgpu_irq {
bool irq_warn_once[INTEL_GVT_EVENT_MAX];
- DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
+ DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES],
INTEL_GVT_EVENT_MAX);
};
@@ -144,9 +141,9 @@ enum {
struct intel_vgpu_submission_ops {
const char *name;
- int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
- void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
- void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
+ int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
+ void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
+ void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
};
struct intel_vgpu_submission {
@@ -449,10 +446,6 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
(*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
#define vgpu_vreg64(vgpu, offset) \
(*(u64 *)(vgpu->mmio.vreg + (offset)))
-#define vgpu_sreg_t(vgpu, reg) \
- (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
-#define vgpu_sreg(vgpu, offset) \
- (*(u32 *)(vgpu->mmio.sreg + (offset)))
#define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
@@ -488,7 +481,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
- unsigned int engine_mask);
+ intel_engine_mask_t engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index bc64b810e0d5..18f01eeb2510 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -311,7 +311,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- unsigned int engine_mask = 0;
+ intel_engine_mask_t engine_mask = 0;
u32 data;
write_vreg(vgpu, offset, p_data, bytes);
@@ -323,25 +323,25 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
} else {
if (data & GEN6_GRDOM_RENDER) {
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
- engine_mask |= (1 << RCS);
+ engine_mask |= BIT(RCS0);
}
if (data & GEN6_GRDOM_MEDIA) {
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
- engine_mask |= (1 << VCS);
+ engine_mask |= BIT(VCS0);
}
if (data & GEN6_GRDOM_BLT) {
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
- engine_mask |= (1 << BCS);
+ engine_mask |= BIT(BCS0);
}
if (data & GEN6_GRDOM_VECS) {
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
- engine_mask |= (1 << VECS);
+ engine_mask |= BIT(VECS0);
}
if (data & GEN8_GRDOM_MEDIA2) {
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
- if (HAS_BSD2(vgpu->gvt->dev_priv))
- engine_mask |= (1 << VCS2);
+ engine_mask |= BIT(VCS1);
}
+ engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
}
/* vgpu_lock already hold by emulate mmio r/w */
@@ -750,18 +750,19 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- unsigned int index = DSPSURF_TO_PIPE(offset);
- i915_reg_t surflive_reg = DSPSURFLIVE(index);
- int flip_event[] = {
- [PIPE_A] = PRIMARY_A_FLIP_DONE,
- [PIPE_B] = PRIMARY_B_FLIP_DONE,
- [PIPE_C] = PRIMARY_C_FLIP_DONE,
- };
+ u32 pipe = DSPSURF_TO_PIPE(offset);
+ int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+
+ if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ else
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
- set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
}
@@ -771,18 +772,42 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- unsigned int index = SPRSURF_TO_PIPE(offset);
- i915_reg_t surflive_reg = SPRSURFLIVE(index);
- int flip_event[] = {
- [PIPE_A] = SPRITE_A_FLIP_DONE,
- [PIPE_B] = SPRITE_B_FLIP_DONE,
- [PIPE_C] = SPRITE_C_FLIP_DONE,
- };
+ u32 pipe = SPRSURF_TO_PIPE(offset);
+ int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+
+ if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ else
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
+
+ return 0;
+}
+
+static int reg50080_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data,
+ unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ enum pipe pipe = REG_50080_TO_PIPE(offset);
+ enum plane_id plane = REG_50080_TO_PLANE(offset);
+ int event = SKL_FLIP_EVENT(pipe, plane);
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ if (plane == PLANE_PRIMARY) {
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+ } else {
+ vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+ }
+
+ if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ else
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
- set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
}
@@ -1704,7 +1729,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
ret = intel_vgpu_select_submission_ops(vgpu,
- ENGINE_MASK(ring_id),
+ BIT(ring_id),
INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret)
return ret;
@@ -1724,19 +1749,19 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
switch (offset) {
case 0x4260:
- id = RCS;
+ id = RCS0;
break;
case 0x4264:
- id = VCS;
+ id = VCS0;
break;
case 0x4268:
- id = VCS2;
+ id = VCS1;
break;
case 0x426c:
- id = BCS;
+ id = BCS0;
break;
case 0x4270:
- id = VECS;
+ id = VECS0;
break;
default:
return -EINVAL;
@@ -1793,7 +1818,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
- if (HAS_BSD2(dev_priv)) \
+ if (HAS_ENGINE(dev_priv, VCS1)) \
MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
} while (0)
@@ -1848,7 +1873,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
- MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);
@@ -1969,6 +1994,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
+ MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(DSPCNTR(PIPE_B), D_ALL);
MMIO_D(DSPADDR(PIPE_B), D_ALL);
@@ -1978,6 +2005,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
+ MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(DSPCNTR(PIPE_C), D_ALL);
MMIO_D(DSPADDR(PIPE_C), D_ALL);
@@ -1987,6 +2016,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
+ MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_A), D_ALL);
MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
@@ -2000,6 +2031,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_A), D_ALL);
MMIO_D(SPRSCALE(PIPE_A), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
+ MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_B), D_ALL);
MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
@@ -2013,6 +2046,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_B), D_ALL);
MMIO_D(SPRSCALE(PIPE_B), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
+ MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_C), D_ALL);
MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
@@ -2026,6 +2061,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_C), D_ALL);
MMIO_D(SPRSCALE(PIPE_C), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
+ MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
@@ -2827,26 +2864,26 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
- MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
+ MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(_MMIO(0x4ddc), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_MMIO(0x42080), D_SKL_PLUS, NULL, NULL);
- MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
- MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
- MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
- MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
- MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
- MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
- MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
+ MMIO_DH(MMCD_MISC_CTRL, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
+ MMIO_D(DC_STATE_EN, D_SKL_PLUS);
+ MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
+ MMIO_D(CDCLK_CTL, D_SKL_PLUS);
+ MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS);
+ MMIO_D(DPLL_CTRL1, D_SKL_PLUS);
+ MMIO_D(DPLL_CTRL2, D_SKL_PLUS);
+ MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
@@ -2965,40 +3002,41 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
- MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS);
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
- MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
+ MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
+ MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
+ MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
- MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
+ MMIO_D(BDW_SCRATCH1, D_SKL_PLUS);
- MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
+ MMIO_D(SKL_DFSM, D_SKL_PLUS);
+ MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
- MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
- MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
- MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK,
+ NULL, NULL);
+ MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
/* TRTT */
- MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
+ MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS,
NULL, gen9_trtte_write);
MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
@@ -3011,7 +3049,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
- MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
+ MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS);
MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
@@ -3042,8 +3080,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(_MMIO(0x4ab8), D_KBL | D_CFL);
- MMIO_D(_MMIO(0x2248), D_SKL_PLUS);
+ MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
+ MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
return 0;
}
@@ -3265,7 +3303,7 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
/* Special MMIO blocks. */
static struct gvt_mmio_block mmio_blocks[] = {
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
- {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
+ {D_ALL, MCHBAR_MIRROR_REG_BASE, 0x4000, NULL, NULL},
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
pvinfo_mmio_read, pvinfo_mmio_write},
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
@@ -3489,12 +3527,11 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
return mmio_info->read(vgpu, offset, pdata, bytes);
else {
u64 ro_mask = mmio_info->ro_mask;
- u32 old_vreg = 0, old_sreg = 0;
+ u32 old_vreg = 0;
u64 data = 0;
if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
old_vreg = vgpu_vreg(vgpu, offset);
- old_sreg = vgpu_sreg(vgpu, offset);
}
if (likely(!ro_mask))
@@ -3516,8 +3553,6 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
| (vgpu_vreg(vgpu, offset) & mask);
- vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
- | (vgpu_sreg(vgpu, offset) & mask);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 67125c5eec6e..951681813230 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -536,7 +536,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
- if (HAS_BSD2(gvt->dev_priv)) {
+ if (HAS_ENGINE(gvt->dev_priv, VCS1)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index ed4df2f6d60b..a55178884d67 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -239,7 +239,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
if (dmlr) {
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
- memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
@@ -280,7 +279,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
* touched
*/
memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
- memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
}
}
@@ -296,12 +294,10 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
- vgpu->mmio.vreg = vzalloc(array_size(info->mmio_size, 2));
+ vgpu->mmio.vreg = vzalloc(info->mmio_size);
if (!vgpu->mmio.vreg)
return -ENOMEM;
- vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
-
intel_vgpu_reset_mmio(vgpu, true);
return 0;
@@ -315,5 +311,5 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
{
vfree(vgpu->mmio.vreg);
- vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+ vgpu->mmio.vreg = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 7902fb162d09..e7e14c842be4 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -41,103 +41,102 @@
/* Raw offset is appened to each line for convenience. */
static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
- {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
- {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
- {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
- {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
- {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
- {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
- {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
- {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
- {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
- {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
-
- {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
- {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
- {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
- {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
- {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
- {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
+ {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
+ {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
- {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
- {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
- {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
- {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
- {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
- {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
- {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
- {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
- {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
- {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
-
- {RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
- {RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
- {RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
- {RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
- {RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
- {RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
- {RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
- {RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
- {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
- {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
- {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
- {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
- {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
- {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
- {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
- {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
- {RCS, TRVADR, 0, false}, /* 0x4df0 */
- {RCS, TRTTE, 0, false}, /* 0x4df4 */
-
- {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
- {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
- {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
- {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
- {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
-
- {VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
-
- {VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
-
- {RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
- {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
- {RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
- {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
-
- {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
- {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
- {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
-
- {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
- {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
- {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
- {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
+ {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
+ {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
+ {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
+ {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
+ {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
+ {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
+ {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
+ {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
+ {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
+ {RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
+ {RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
+ {RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
+ {RCS0, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
+ {RCS0, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
+ {RCS0, TRNULLDETCT, 0, false}, /* 0x4de8 */
+ {RCS0, TRINVTILEDETCT, 0, false}, /* 0x4dec */
+ {RCS0, TRVADR, 0, false}, /* 0x4df0 */
+ {RCS0, TRTTE, 0, false}, /* 0x4df4 */
+
+ {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
+
+ {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
+
+ {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
+
+ {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
+ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
+ {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
+
+ {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
+ {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+
+ {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
+ {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
+ {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
+ {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct {
@@ -150,11 +149,11 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
{
i915_reg_t offset;
u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
+ [RCS0] = 0xc800,
+ [VCS0] = 0xc900,
+ [VCS1] = 0xca00,
+ [BCS0] = 0xcc00,
+ [VECS0] = 0xcb00,
};
int ring_id, i;
@@ -302,7 +301,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
goto out;
/* no MOCS register in context except render engine */
- if (req->engine->id != RCS)
+ if (req->engine->id != RCS0)
goto out;
ret = restore_render_mocs_control_for_inhibit(vgpu, req);
@@ -328,15 +327,16 @@ out:
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_vgpu_submission *s = &vgpu->submission;
enum forcewake_domains fw;
i915_reg_t reg;
u32 regs[] = {
- [RCS] = 0x4260,
- [VCS] = 0x4264,
- [VCS2] = 0x4268,
- [BCS] = 0x426c,
- [VECS] = 0x4270,
+ [RCS0] = 0x4260,
+ [VCS0] = 0x4264,
+ [VCS1] = 0x4268,
+ [BCS0] = 0x426c,
+ [VECS0] = 0x4270,
};
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
@@ -352,21 +352,21 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
* otherwise device can go to RC6 state and interrupt invalidation
* process
*/
- fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9))
+ if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
fw |= FORCEWAKE_RENDER;
- intel_uncore_forcewake_get(dev_priv, fw);
+ intel_uncore_forcewake_get(uncore, fw);
- I915_WRITE_FW(reg, 0x1);
+ intel_uncore_write_fw(uncore, reg, 0x1);
- if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
+ if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50))
gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
else
vgpu_vreg_t(vgpu, reg) = 0;
- intel_uncore_forcewake_put(dev_priv, fw);
+ intel_uncore_forcewake_put(uncore, fw);
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
}
@@ -379,11 +379,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
u32 old_v, new_v;
u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
+ [RCS0] = 0xc800,
+ [VCS0] = 0xc900,
+ [VCS1] = 0xca00,
+ [BCS0] = 0xcc00,
+ [VECS0] = 0xcb00,
};
int i;
@@ -391,8 +391,10 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
- || IS_COFFEELAKE(dev_priv)) && ring_id == RCS)
+ if (ring_id == RCS0 &&
+ (IS_KABYLAKE(dev_priv) ||
+ IS_BROXTON(dev_priv) ||
+ IS_COFFEELAKE(dev_priv)))
return;
if (!pre && !gen9_render_mocs.initialized)
@@ -415,7 +417,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
offset.reg += 4;
}
- if (ring_id == RCS) {
+ if (ring_id == RCS0) {
l3_offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
if (pre)
@@ -493,7 +495,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
+ !is_inhibit_context(intel_context_lookup(s->shadow_ctx,
+ dev_priv->engine[ring_id])))
continue;
if (mmio->mask)
@@ -550,9 +553,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
* performace for batch mmio read/write, so we need
* handle forcewake mannually.
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
switch_mmio(pre, next, ring_id);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 428d252344f1..3de5b643b266 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -60,6 +60,37 @@
#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
+#define SKL_FLIP_EVENT(pipe, plane) (PRIMARY_A_FLIP_DONE + (plane) * 3 + (pipe))
+
+#define PLANE_CTL_ASYNC_FLIP (1 << 9)
+#define REG50080_FLIP_TYPE_MASK 0x3
+#define REG50080_FLIP_TYPE_ASYNC 0x1
+
+#define REG_50080(_pipe, _plane) ({ \
+ typeof(_pipe) (p) = (_pipe); \
+ typeof(_plane) (q) = (_plane); \
+ (((p) == PIPE_A) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50080)) : \
+ (_MMIO(0x50090))) : \
+ (((p) == PIPE_B) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50088)) : \
+ (_MMIO(0x50098))) : \
+ (((p) == PIPE_C) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x5008C)) : \
+ (_MMIO(0x5009C))) : \
+ (_MMIO(0x50080))))); })
+
+#define REG_50080_TO_PIPE(_reg) ({ \
+ typeof(_reg) (reg) = (_reg); \
+ (((reg) == 0x50080 || (reg) == 0x50090) ? (PIPE_A) : \
+ (((reg) == 0x50088 || (reg) == 0x50098) ? (PIPE_B) : \
+ (((reg) == 0x5008C || (reg) == 0x5009C) ? (PIPE_C) : \
+ (INVALID_PIPE)))); })
+
+#define REG_50080_TO_PLANE(_reg) ({ \
+ typeof(_reg) (reg) = (_reg); \
+ (((reg) == 0x50080 || (reg) == 0x50088 || (reg) == 0x5008C) ? \
+ (PLANE_PRIMARY) : \
+ (((reg) == 0x50090 || (reg) == 0x50098 || (reg) == 0x5009C) ? \
+ (PLANE_SPRITE0) : (I915_MAX_PLANES))); })
+
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
@@ -95,4 +126,7 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4)
+/* define the effective range of MCHBAR register on Sandybridge+ */
+#define MCHBAR_MIRROR_REG_BASE _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000)
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 05b953793316..8998fa5ab198 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -93,7 +93,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
i915_mmio_reg_offset(EU_PERF_CNTL6),
};
- if (workload->ring_id != RCS)
+ if (workload->ring_id != RCS0)
return;
if (save) {
@@ -149,7 +149,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG_MASKED(ctx_ctrl);
COPY_REG(ctx_timestamp);
- if (ring_id == RCS) {
+ if (ring_id == RCS0) {
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
@@ -177,7 +177,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
context_page_num = 19;
i = 2;
@@ -434,8 +434,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (ret)
goto err_unpin;
- if ((workload->ring_id == RCS) &&
- (workload->wa_ctx.indirect_ctx.size != 0)) {
+ if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto err_shadow;
@@ -803,7 +802,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
+ if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
context_page_num = 19;
i = 2;
@@ -851,13 +850,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
}
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
/* free the unsubmited workloads in the queues. */
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
@@ -903,8 +902,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0;
}
- if (!workload->status && !(vgpu->resetting_eng &
- ENGINE_MASK(ring_id))) {
+ if (!workload->status &&
+ !(vgpu->resetting_eng & BIT(ring_id))) {
update_guest_context(workload);
for_each_set_bit(event, workload->pending_events,
@@ -927,7 +926,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
list_del_init(&workload->list);
- if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+ if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
/* if workload->status is not successful means HW GPU
* has occurred GPU hang or something wrong with i915/GVT,
* and GVT won't inject context switch interrupt to guest.
@@ -941,7 +940,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* cleaned up during the resetting process later, so doing
* the workload clean up here doesn't have any impact.
**/
- intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
+ intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
}
workload->complete(workload);
@@ -1001,7 +1000,7 @@ static int workload_thread(void *priv)
workload->ring_id, workload);
if (need_force_wake)
- intel_uncore_forcewake_get(gvt->dev_priv,
+ intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
ret = dispatch_workload(workload);
@@ -1023,7 +1022,7 @@ complete:
complete_current_workload(gvt, ring_id);
if (need_force_wake)
- intel_uncore_forcewake_put(gvt->dev_priv,
+ intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
intel_runtime_pm_put_unchecked(gvt->dev_priv);
@@ -1114,9 +1113,9 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
- if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ if (i915_vm_is_4lvl(&i915_ppgtt->vm)) {
px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
- else {
+ } else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)
px_dma(i915_ppgtt->pdp.page_directory[i]) =
s->i915_context_pdps[i];
@@ -1150,7 +1149,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
*
*/
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
@@ -1167,7 +1166,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
- if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ if (i915_vm_is_4lvl(&i915_ppgtt->vm))
s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)
@@ -1240,7 +1239,7 @@ out_shadow_ctx:
*
*/
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int interface)
{
struct intel_vgpu_submission *s = &vgpu->submission;
@@ -1450,7 +1449,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->rb_start = start;
workload->rb_ctl = ctl;
- if (ring_id == RCS) {
+ if (ring_id == RCS0) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 0635b2c4bed7..90c6756f5453 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -142,12 +142,12 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
- unsigned long engine_mask);
+ intel_engine_mask_t engine_mask);
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int interface);
extern const struct intel_vgpu_submission_ops
@@ -160,6 +160,6 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
- unsigned long engine_mask);
+ intel_engine_mask_t engine_mask);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 720e2b10adaa..44ce3c2b9ac1 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -44,7 +44,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
- vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
@@ -526,11 +526,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
* GPU engines. For FLR, engine_mask is ignored.
*/
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
- unsigned int engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
+ intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
gvt_dbg_core("------------------------------------------\n");
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",