aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gvt/cmd_parser.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/cmd_parser.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c86
1 files changed, 44 insertions, 42 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 2b92cc8a7d1a..41b2c3aaa04a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -616,9 +616,6 @@ static inline u32 get_opcode(u32 cmd, int ring_id)
{
struct decode_info *d_info;
- if (ring_id >= I915_NUM_ENGINES)
- return INVALID_OP;
-
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
if (d_info == NULL)
return INVALID_OP;
@@ -661,9 +658,6 @@ static inline void print_opcode(u32 cmd, int ring_id)
struct decode_info *d_info;
int i;
- if (ring_id >= I915_NUM_ENGINES)
- return;
-
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
if (d_info == NULL)
return;
@@ -1215,7 +1209,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
if (!info->async_flip)
return 0;
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1243,7 +1237,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1267,7 +1261,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info);
- if (IS_SKYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1278,7 +1272,9 @@ static int check_mi_display_flip(struct parser_exec_state *s,
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
- if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ if (IS_BROADWELL(dev_priv)
+ || IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv))
return gen8_check_mi_display_flip(s, info);
return -ENODEV;
}
@@ -1289,7 +1285,9 @@ static int update_plane_mmio_from_mi_display_flip(
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
- if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ if (IS_BROADWELL(dev_priv)
+ || IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv))
return gen8_update_plane_mmio_from_mi_display_flip(s, info);
return -ENODEV;
}
@@ -1557,7 +1555,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
len += copy_len;
gma += copy_len;
}
- return 0;
+ return len;
}
@@ -1569,7 +1567,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
struct intel_gvt *gvt = s->vgpu->gvt;
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)) {
/* BDW decides privilege based on address space */
if (cmd_val(s, 0) & (1 << 8))
return 0;
@@ -1673,7 +1672,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
gma, gma + bb_size,
dst);
- if (ret) {
+ if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
goto unmap_src;
}
@@ -2478,7 +2477,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
t1 = get_cycles();
- memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state));
+ s_before_advance_custom = *s;
if (info->handler) {
ret = info->handler(s);
@@ -2604,6 +2603,9 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
struct parser_exec_state s;
int ret = 0;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
/* ring base is page aligned */
if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
@@ -2618,14 +2620,14 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
- s.vgpu = wa_ctx->workload->vgpu;
- s.ring_id = wa_ctx->workload->ring_id;
+ s.vgpu = workload->vgpu;
+ s.ring_id = workload->ring_id;
s.ring_start = wa_ctx->indirect_ctx.guest_gma;
s.ring_size = ring_size;
s.ring_head = gma_head;
s.ring_tail = gma_tail;
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
- s.workload = wa_ctx->workload;
+ s.workload = workload;
ret = ip_gma_set(&s, gma_head);
if (ret)
@@ -2640,11 +2642,8 @@ out:
static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
- struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
- unsigned int copy_len = 0;
+ u32 *cs;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2658,36 +2657,33 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_top = workload->rb_start + guest_rb_size;
/* allocate shadow ring buffer */
- ret = intel_ring_begin(workload->req, workload->rb_len / 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* get shadow ring buffer va */
- workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
+ workload->shadow_ring_buffer_va = cs;
/* head > tail --> copy head <-> top */
if (gma_head > gma_tail) {
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
- gma_head, gma_top,
- workload->shadow_ring_buffer_va);
- if (ret) {
+ gma_head, gma_top, cs);
+ if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
- copy_len = gma_top - gma_head;
+ cs += ret / sizeof(u32);
gma_head = workload->rb_start;
}
/* copy head or start <-> tail */
- ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
- gma_head, gma_tail,
- workload->shadow_ring_buffer_va + copy_len);
- if (ret) {
+ ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
+ if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
- ring->tail += workload->rb_len;
- intel_ring_advance(ring);
+ cs += ret / sizeof(u32);
+ intel_ring_advance(workload->req, cs);
return 0;
}
@@ -2714,12 +2710,15 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ctx_size = wa_ctx->indirect_ctx.size;
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
- struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
+ struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_gem_object *obj;
int ret = 0;
void *map;
- obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv,
+ obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
roundup(ctx_size + CACHELINE_BYTES,
PAGE_SIZE));
if (IS_ERR(obj))
@@ -2739,11 +2738,11 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
goto unmap_src;
}
- ret = copy_gma_to_hva(wa_ctx->workload->vgpu,
- wa_ctx->workload->vgpu->gtt.ggtt_mm,
+ ret = copy_gma_to_hva(workload->vgpu,
+ workload->vgpu->gtt.ggtt_mm,
guest_gma, guest_gma + ctx_size,
map);
- if (ret) {
+ if (ret < 0) {
gvt_vgpu_err("fail to copy guest indirect ctx\n");
goto unmap_src;
}
@@ -2778,7 +2777,10 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ret;
- struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
+ struct intel_vgpu *vgpu = workload->vgpu;
if (wa_ctx->indirect_ctx.size == 0)
return 0;