aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChangbin Du <changbin.du@intel.com>2017-09-22 10:00:09 +0800
committerZhi Wang <zhi.a.wang@intel.com>2017-10-17 00:44:10 +0800
commitba3ee00683bc2dad4c14fba805c2241ae23acff9 (patch)
treef037488d488883e9b8195d21f426b4ac7709712c /drivers/gpu
parentdrm/i915/bios: parse DDI ports also for CHV for HDMI DDC pin and DP AUX channel (diff)
downloadlinux-dev-ba3ee00683bc2dad4c14fba805c2241ae23acff9.tar.xz
linux-dev-ba3ee00683bc2dad4c14fba805c2241ae23acff9.zip
drm/i915/gvt: Fix GPU hang after reusing vGPU instance across different guest OS
We have implemented delayed ring mmio switch mechanism to reduce unnecessary mmio switch. While the vGPU is being destroyed or detached from VM, we need to force the ring switch to host context. The later deadline is missed. Then it got a chance that word load from VM2 might execute under the ring context of VM1 which was attached to a same vGPU instance. Finally, the GPU is hang. This patch guarantee the two deadline are performed. v2: Remove unused variable 'scheduler' Signed-off-by: Changbin Du <changbin.du@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 436377da41ba..03532dfc0cd5 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
{
- struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
- int ring_id;
-
kfree(vgpu->sched_data);
vgpu->sched_data = NULL;
-
- spin_lock_bh(&scheduler->mmio_context_lock);
- for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
- if (scheduler->engine_owner[ring_id] == vgpu) {
- intel_gvt_switch_mmio(vgpu, NULL, ring_id);
- scheduler->engine_owner[ring_id] = NULL;
- }
- }
- spin_unlock_bh(&scheduler->mmio_context_lock);
}
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
@@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
+ int ring_id;
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
@@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
scheduler->need_reschedule = true;
scheduler->current_vgpu = NULL;
}
+
+ spin_lock_bh(&scheduler->mmio_context_lock);
+ for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
+ if (scheduler->engine_owner[ring_id] == vgpu) {
+ intel_gvt_switch_mmio(vgpu, NULL, ring_id);
+ scheduler->engine_owner[ring_id] = NULL;
+ }
+ }
+ spin_unlock_bh(&scheduler->mmio_context_lock);
}