aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChangbin Du <changbin.du@intel.com>2017-06-23 15:45:32 +0800
committerZhenyu Wang <zhenyuw@linux.intel.com>2017-08-10 10:26:06 +0800
commit4671ea204179dc705d4b0c31045e6acdfd6e59e8 (patch)
tree852888a135c1fc7cf5c0bced5262cb925ae05726 /drivers
parentdrm/i915/gvt: Optimize ring siwtch 2x faster by removing unnecessary POSTING_READ (diff)
downloadlinux-dev-4671ea204179dc705d4b0c31045e6acdfd6e59e8.tar.xz
linux-dev-4671ea204179dc705d4b0c31045e6acdfd6e59e8.zip
drm/i915/gvt: Optimize ring siwtch 2x faster again by light weight mmio access wrapper
The I915_READ/WRITE is not only a mmio read/write, it also contains debug checking and Forcewake domain lookup. This is too heavy for GVT ring switch case which access batch of mmio registers on ring switch. We can handle Forcewake manually and use the raw i915_read/write instead. The benefit from this is 2x faster mmio switch performance. Before After cycles ~550000 ~250000 v2: Use existing I915_READ_FW/I915_WRITE_FW macro. (zhenyu) Signed-off-by: Changbin Du <changbin.du@intel.com> Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c39
1 files changed, 26 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 5a08bcd436a1..2ea542257f03 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -207,7 +207,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
- gen9_render_mocs[ring_id][i] = I915_READ(offset);
+ gen9_render_mocs[ring_id][i] = I915_READ_FW(offset);
I915_WRITE(offset, vgpu_vreg(vgpu, offset));
offset.reg += 4;
}
@@ -215,8 +215,8 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
if (ring_id == RCS) {
l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) {
- gen9_render_mocs_L3[i] = I915_READ(l3_offset);
- I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
+ gen9_render_mocs_L3[i] = I915_READ_FW(l3_offset);
+ I915_WRITE_FW(l3_offset, vgpu_vreg(vgpu, l3_offset));
l3_offset.reg += 4;
}
}
@@ -240,16 +240,16 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
- vgpu_vreg(vgpu, offset) = I915_READ(offset);
- I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
+ vgpu_vreg(vgpu, offset) = I915_READ_FW(offset);
+ I915_WRITE_FW(offset, gen9_render_mocs[ring_id][i]);
offset.reg += 4;
}
if (ring_id == RCS) {
l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) {
- vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
- I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
+ vgpu_vreg(vgpu, l3_offset) = I915_READ_FW(l3_offset);
+ I915_WRITE_FW(l3_offset, gen9_render_mocs_L3[i]);
l3_offset.reg += 4;
}
}
@@ -284,7 +284,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
if (mmio->ring_id != ring_id)
continue;
- mmio->value = I915_READ(mmio->reg);
+ mmio->value = I915_READ_FW(mmio->reg);
/*
* if it is an inhibit context, load in_context mmio
@@ -301,7 +301,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
else
v = vgpu_vreg(vgpu, mmio->reg);
- I915_WRITE(mmio->reg, v);
+ I915_WRITE_FW(mmio->reg, v);
last_reg = mmio->reg;
trace_render_mmio(vgpu->id, "load",
@@ -311,7 +311,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
/* Make sure the swiched MMIOs has taken effect. */
if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
- POSTING_READ(last_reg);
+ I915_READ_FW(last_reg);
handle_tlb_pending_event(vgpu, ring_id);
}
@@ -338,7 +338,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
if (mmio->ring_id != ring_id)
continue;
- vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
+ vgpu_vreg(vgpu, mmio->reg) = I915_READ_FW(mmio->reg);
if (mmio->mask) {
vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
@@ -349,7 +349,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
if (mmio->in_context)
continue;
- I915_WRITE(mmio->reg, v);
+ I915_WRITE_FW(mmio->reg, v);
last_reg = mmio->reg;
trace_render_mmio(vgpu->id, "restore",
@@ -359,7 +359,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
/* Make sure the swiched MMIOs has taken effect. */
if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
- POSTING_READ(last_reg);
+ I915_READ_FW(last_reg);
}
/**
@@ -374,12 +374,23 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next, int ring_id)
{
+ struct drm_i915_private *dev_priv;
+
if (WARN_ON(!pre && !next))
return;
gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
+ dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
+
+ /**
+ * We are using raw mmio access wrapper to improve the
+ * performace for batch mmio read/write, so we need
+ * handle forcewake mannually.
+ */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
/**
* TODO: Optimize for vGPU to vGPU switch by merging
* switch_mmio_to_host() and switch_mmio_to_vgpu().
@@ -389,4 +400,6 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
if (next)
switch_mmio_to_vgpu(next, ring_id);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}