From 19ab4ed329393c0674f2b78fb71365a9461ee79b Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 27 Apr 2016 17:43:22 +0300 Subject: drm/i915: Update RAWCLK_FREQ register on VLV/CHV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I just noticed that VLV/CHV have a RAWCLK_FREQ register just like PCH platforms. It lives in the display power well, so we should update it when enabling the power well. Interestingly the BIOS seems to leave it at the reset value (125) which doesn't match the rawclk frequency on VLV/CHV (200 MHz). As always with these register, the spec is extremely vague what the register does. All it says is: "This is used to generate a divided down clock for miscellaneous timers in display." Based on a quick test, at least AUX and PWM appear to be unaffected by this. But since the register is there, let's configure it in accordance with the spec. Note that we have to move intel_update_rawclk() to occur before we touch the power wells, so that the dev_priv->rawclk_freq is already populated when the disp2 enable hook gets called for the first time. I think this should be safe to do on other platforms as well. Cc: Rodrigo Vivi Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1461768202-17544-1-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Rodrigo Vivi --- drivers/gpu/drm/i915/i915_dma.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/i915/i915_dma.c') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 5c7615041b31..fd1260d2b6ec 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -454,6 +454,9 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_vga_client; + /* must happen before intel_power_domains_init_hw() on VLV/CHV */ + intel_update_rawclk(dev_priv); + intel_power_domains_init_hw(dev_priv, false); intel_csr_ucode_init(dev_priv); -- cgit v1.2.3-59-g8ed1b From e7ae86bab90887e1892e6c33c4fb38d8ca6935f9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 28 Apr 2016 09:56:59 +0100 Subject: drm/i915: Unify GPU resets upon shutdown Both execlists and legacy need to reset the context (and mode) of the GPU before we lose control of the system. By resetting the GPU, we revert back to default settings. This simplifies the life of any subsequent driver (in particular for virtualized setups) as it does not then have to try and recover from an unknown condition. As both paths need to reset for the same reason, move the reset to a common point. This unifies the resets added in a647828afc (drm/i915: Also perform gpu reset under execlist mode) and 8e96d9c4d9 (drm/i915: reset the GPU on context fini). v2: Restrict the reset to "modern" gen (where we enable HW contexts) to try and avoid leaving the machine in an unusable state with a risky reset on older GPU. This should keep the status quo as to who performs resets (i.e. currently only GPUs with HW contexts perform a reset on shutdown). Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin CC: Joonas Lahtinen Cc: Mika Kuoppala Cc: "Niu, Bing" Reviewed-by: Mika Kuoppala Link: http://patchwork.freedesktop.org/patch/msgid/1461833819-3991-25-git-send-email-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_dma.c | 45 +++++++++++++++++++++++++++------ drivers/gpu/drm/i915/i915_gem.c | 8 ------ drivers/gpu/drm/i915/i915_gem_context.c | 10 +------- 3 files changed, 38 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_dma.c') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index fd1260d2b6ec..f69330cf0118 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -425,6 +425,41 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { .can_switch = i915_switcheroo_can_switch, }; +static void i915_gem_fini(struct drm_device *dev) +{ + /* + * Neither the BIOS, ourselves or any other kernel + * expects the system to be in execlists mode on startup, + * so we need to reset the GPU back to legacy mode. And the only + * known way to disable logical contexts is through a GPU reset. + * + * So in order to leave the system in a known default configuration, + * always reset the GPU upon unload. Afterwards we then clean up the + * GEM state tracking, flushing off the requests and leaving the + * system in a known idle state. + * + * Note that is of the upmost importance that the GPU is idle and + * all stray writes are flushed *before* we dismantle the backing + * storage for the pinned objects. + * + * However, since we are uncertain that reseting the GPU on older + * machines is a good idea, we don't - just in case it leaves the + * machine in an unusable condition. + */ + if (HAS_HW_CONTEXTS(dev)) { + int reset = intel_gpu_reset(dev, ALL_ENGINES); + WARN_ON(reset && reset != -ENODEV); + } + + mutex_lock(&dev->struct_mutex); + i915_gem_reset(dev); + i915_gem_cleanup_engines(dev); + i915_gem_context_fini(dev); + mutex_unlock(&dev->struct_mutex); + + WARN_ON(!list_empty(&to_i915(dev)->context_list)); +} + static int i915_load_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -509,10 +544,7 @@ static int i915_load_modeset_init(struct drm_device *dev) return 0; cleanup_gem: - mutex_lock(&dev->struct_mutex); - i915_gem_cleanup_engines(dev); - i915_gem_context_fini(dev); - mutex_unlock(&dev->struct_mutex); + i915_gem_fini(dev); cleanup_irq: intel_guc_ucode_fini(dev); drm_irq_uninstall(dev); @@ -1459,10 +1491,7 @@ int i915_driver_unload(struct drm_device *dev) flush_workqueue(dev_priv->wq); intel_guc_ucode_fini(dev); - mutex_lock(&dev->struct_mutex); - i915_gem_cleanup_engines(dev); - i915_gem_context_fini(dev); - mutex_unlock(&dev->struct_mutex); + i915_gem_fini(dev); intel_fbc_cleanup_cfb(dev_priv); intel_power_domains_fini(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 411201f94b43..e9841eab5e35 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4969,14 +4969,6 @@ i915_gem_cleanup_engines(struct drm_device *dev) for_each_engine(engine, dev_priv) dev_priv->gt.cleanup_engine(engine); - - if (i915.enable_execlists) - /* - * Neither the BIOS, ourselves or any other kernel - * expects the system to be in execlists mode on startup, - * so we need to reset the GPU back to legacy mode. - */ - intel_gpu_reset(dev, ALL_ENGINES); } static void diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 25a9a1445ba9..b1b704c2c001 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -450,16 +450,8 @@ void i915_gem_context_fini(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_context *dctx = dev_priv->kernel_context; - i915_gem_context_lost(dev_priv); - - if (dctx->legacy_hw_ctx.rcs_state) { - /* The only known way to stop the gpu from accessing the hw context is - * to reset it. Do this as the very last operation to avoid confusing - * other code, leading to spurious errors. */ - intel_gpu_reset(dev, ALL_ENGINES); - + if (dctx->legacy_hw_ctx.rcs_state) i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); - } i915_gem_context_unreference(dctx); dev_priv->kernel_context = NULL; -- cgit v1.2.3-59-g8ed1b From 0e4ca1008e101d781b430bfecae1ee6d945b7504 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 29 Apr 2016 13:18:22 +0100 Subject: drm/i915: Fix ordering of sanitize ppgtt and sanitize execlists The i915.enable_ppgtt option depends upon the state of i915.enable_execlists option - so we need to sanitize execlists first. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/1461932305-14637-2-git-send-email-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_dma.c | 13 +++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_gem.c | 3 --- drivers/gpu/drm/i915/i915_gem_gtt.c | 16 +++++----------- drivers/gpu/drm/i915/intel_lrc.c | 2 -- 5 files changed, 20 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_dma.c') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f69330cf0118..c91387f1aedd 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -968,6 +968,19 @@ static void intel_device_info_runtime_init(struct drm_device *dev) info->has_subslice_pg ? "y" : "n"); DRM_DEBUG_DRIVER("has EU power gating: %s\n", info->has_eu_pg ? "y" : "n"); + + i915.enable_execlists = + intel_sanitize_enable_execlists(dev, i915.enable_execlists); + + /* + * i915.enable_ppgtt is read-only, so do an early pass to validate the + * user's requested state against the hardware/driver capabilities. We + * do this now so that we can print out any log messages once rather + * than every time we check intel_enable_ppgtt(). + */ + i915.enable_ppgtt = + intel_sanitize_enable_ppgtt(dev, i915.enable_ppgtt); + DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); } static void intel_init_dpio(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7d15930b69b1..02c619e7af05 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2753,6 +2753,8 @@ extern int i915_max_ioctl; extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); extern int i915_resume_switcheroo(struct drm_device *dev); +int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt); + /* i915_dma.c */ void __printf(3, 4) __i915_printk(struct drm_i915_private *dev_priv, const char *level, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e9841eab5e35..902eea787907 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4904,9 +4904,6 @@ int i915_gem_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - i915.enable_execlists = intel_sanitize_enable_execlists(dev, - i915.enable_execlists); - mutex_lock(&dev->struct_mutex); if (!i915.enable_execlists) { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 59a78f760b6b..364cf8236021 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -110,7 +110,7 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = { .type = I915_GGTT_VIEW_ROTATED, }; -static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) +int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) { bool has_aliasing_ppgtt; bool has_full_ppgtt; @@ -123,12 +123,14 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) if (intel_vgpu_active(dev)) has_full_ppgtt = false; /* emulation is too hard */ + if (!has_aliasing_ppgtt) + return 0; + /* * We don't allow disabling PPGTT for gen9+ as it's a requirement for * execlists, the sole mechanism available to submit work. */ - if (INTEL_INFO(dev)->gen < 9 && - (enable_ppgtt == 0 || !has_aliasing_ppgtt)) + if (enable_ppgtt == 0 && INTEL_INFO(dev)->gen < 9) return 0; if (enable_ppgtt == 1) @@ -3219,14 +3221,6 @@ int i915_ggtt_init_hw(struct drm_device *dev) if (intel_iommu_gfx_mapped) DRM_INFO("VT-d active for gfx access\n"); #endif - /* - * i915.enable_ppgtt is read-only, so do an early pass to validate the - * user's requested state against the hardware/driver capabilities. We - * do this now so that we can print out any log messages once rather - * than every time we check intel_enable_ppgtt(). - */ - i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); - DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); return 0; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index b2a9072c5753..d8763524319d 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -248,8 +248,6 @@ static int intel_lr_context_pin(struct intel_context *ctx, */ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists) { - WARN_ON(i915.enable_ppgtt == -1); - /* On platforms with execlist available, vGPU will only * support execlist mode, no ring buffer mode. */ -- cgit v1.2.3-59-g8ed1b From 1ca3712ca3429a617ed6c5f87718e4f6fe4ae0c6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 4 May 2016 14:25:36 +0100 Subject: drm/i915: Report command parser version 0 if disabled If the command parser is not active, then it is appropriate to report it as operating at version 0 as no higher mode is supported. This greatly simplifies userspace querying for the command parser as we then do not need to second guess when it will be active (a mixture of module parameters and generational support, which may change over time). v2: s/comand/command/ misspelling in comment Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1462368336-21230-1-git-send-email-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_cmd_parser.c | 15 ++++++++++++++- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 2 +- 3 files changed, 16 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_dma.c') diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index a337f33bec5b..69a1ba8ebdfb 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1275,8 +1275,21 @@ int i915_parse_cmds(struct intel_engine_cs *engine, * * Return: the current version number of the cmd parser */ -int i915_cmd_parser_get_version(void) +int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) { + struct intel_engine_cs *engine; + bool active = false; + + /* If the command parser is not enabled, report 0 - unsupported */ + for_each_engine(engine, dev_priv) { + if (i915_needs_cmd_parser(engine)) { + active = true; + break; + } + } + if (!active) + return 0; + /* * Command parser version history * diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index c91387f1aedd..ad7abe517700 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -204,7 +204,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = 1; break; case I915_PARAM_CMD_PARSER_VERSION: - value = i915_cmd_parser_get_version(); + value = i915_cmd_parser_get_version(dev_priv); break; case I915_PARAM_HAS_COHERENT_PHYS_GTT: value = 1; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 75a1675ea6ce..d5496aba1cd5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3442,7 +3442,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); const char *i915_cache_level_str(struct drm_i915_private *i915, int type); /* i915_cmd_parser.c */ -int i915_cmd_parser_get_version(void); +int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); bool i915_needs_cmd_parser(struct intel_engine_cs *engine); -- cgit v1.2.3-59-g8ed1b From c033666a94b576185c4b5055f20536e13fada960 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 6 May 2016 15:40:21 +0100 Subject: drm/i915: Store a i915 backpointer from engine, and use it text data bss dec hex filename 6309351 3578714 696320 10584385 a18141 vmlinux 6308391 3578714 696320 10583425 a17d81 vmlinux Almost 1KiB of code reduction. v2: More s/INTEL_INFO()->gen/INTEL_GEN()/ and IS_GENx() conversions text data bss dec hex filename 6304579 3578778 696320 10579677 a16edd vmlinux 6303427 3578778 696320 10578525 a16a5d vmlinux Now over 1KiB! Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/1462545621-30125-3-git-send-email-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_cmd_parser.c | 12 +- drivers/gpu/drm/i915/i915_debugfs.c | 8 +- drivers/gpu/drm/i915/i915_dma.c | 9 +- drivers/gpu/drm/i915/i915_drv.c | 10 +- drivers/gpu/drm/i915/i915_drv.h | 35 +-- drivers/gpu/drm/i915/i915_gem.c | 47 ++-- drivers/gpu/drm/i915/i915_gem_context.c | 48 ++-- drivers/gpu/drm/i915/i915_gem_evict.c | 4 +- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 6 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 32 +-- drivers/gpu/drm/i915/i915_gem_render_state.c | 13 +- drivers/gpu/drm/i915/i915_gem_shrinker.c | 4 +- drivers/gpu/drm/i915/i915_gpu_error.c | 79 +++---- drivers/gpu/drm/i915/i915_irq.c | 80 ++++--- drivers/gpu/drm/i915/i915_trace.h | 36 ++- drivers/gpu/drm/i915/intel_display.c | 45 ++-- drivers/gpu/drm/i915/intel_drv.h | 4 +- drivers/gpu/drm/i915/intel_fbc.c | 2 +- drivers/gpu/drm/i915/intel_lrc.c | 139 ++++++------ drivers/gpu/drm/i915/intel_lrc.h | 3 +- drivers/gpu/drm/i915/intel_mocs.c | 2 +- drivers/gpu/drm/i915/intel_overlay.c | 3 +- drivers/gpu/drm/i915/intel_pm.c | 5 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 328 ++++++++++++--------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 8 +- drivers/gpu/drm/i915/intel_uncore.c | 14 +- 26 files changed, 459 insertions(+), 517 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_dma.c') diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index c3a760375905..d97f28bfa9db 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -751,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) int cmd_table_count; int ret; - if (!IS_GEN7(engine->dev)) + if (!IS_GEN7(engine->i915)) return 0; switch (engine->id) { case RCS: - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { cmd_tables = hsw_render_ring_cmds; cmd_table_count = ARRAY_SIZE(hsw_render_ring_cmds); @@ -765,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) cmd_table_count = ARRAY_SIZE(gen7_render_cmds); } - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { engine->reg_tables = hsw_render_reg_tables; engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables); } else { @@ -781,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; case BCS: - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { cmd_tables = hsw_blt_ring_cmds; cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); } else { @@ -789,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); } - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { engine->reg_tables = hsw_blt_reg_tables; engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); } else { @@ -1036,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine) if (!engine->needs_cmd_parser) return false; - if (!USES_PPGTT(engine->dev)) + if (!USES_PPGTT(engine->i915)) return false; return (i915.enable_cmd_parser == 1); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 2b4256afc40e..85ae5423de1b 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1380,7 +1380,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) seqno[id] = engine->get_seqno(engine); } - i915_get_extra_instdone(dev, instdone); + i915_get_extra_instdone(dev_priv, instdone); intel_runtime_pm_put(dev_priv); @@ -3157,7 +3157,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) enum intel_engine_id id; int j, ret; - if (!i915_semaphore_is_enabled(dev)) { + if (!i915_semaphore_is_enabled(dev_priv)) { seq_puts(m, "Semaphores are disabled\n"); return 0; } @@ -4757,7 +4757,7 @@ i915_wedged_set(void *data, u64 val) intel_runtime_pm_get(dev_priv); - i915_handle_error(dev, val, + i915_handle_error(dev_priv, val, "Manually setting wedged to %llu", val); intel_runtime_pm_put(dev_priv); @@ -4907,7 +4907,7 @@ i915_drop_caches_set(void *data, u64 val) } if (val & (DROP_RETIRE | DROP_ACTIVE)) - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev_priv); if (val & DROP_BOUND) i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ad7abe517700..46ac1da64a09 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -186,7 +186,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = 1; break; case I915_PARAM_HAS_SEMAPHORES: - value = i915_semaphore_is_enabled(dev); + value = i915_semaphore_is_enabled(dev_priv); break; case I915_PARAM_HAS_PRIME_VMAP_FLUSH: value = 1; @@ -970,7 +970,8 @@ static void intel_device_info_runtime_init(struct drm_device *dev) info->has_eu_pg ? "y" : "n"); i915.enable_execlists = - intel_sanitize_enable_execlists(dev, i915.enable_execlists); + intel_sanitize_enable_execlists(dev_priv, + i915.enable_execlists); /* * i915.enable_ppgtt is read-only, so do an early pass to validate the @@ -979,7 +980,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) * than every time we check intel_enable_ppgtt(). */ i915.enable_ppgtt = - intel_sanitize_enable_ppgtt(dev, i915.enable_ppgtt); + intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); } @@ -1345,7 +1346,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) * Notify a valid surface after modesetting, * when running inside a VM. */ - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); i915_setup_sysfs(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ca9bea6ce552..a116730c39aa 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -530,9 +530,9 @@ void intel_detect_pch(struct drm_device *dev) pci_dev_put(pch); } -bool i915_semaphore_is_enabled(struct drm_device *dev) +bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv) { - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_GEN(dev_priv) < 6) return false; if (i915.semaphores >= 0) @@ -544,7 +544,7 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) #ifdef CONFIG_INTEL_IOMMU /* Enable semaphores on SNB when IO remapping is off */ - if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) + if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) return false; #endif @@ -914,9 +914,9 @@ int i915_resume_switcheroo(struct drm_device *dev) * - re-init interrupt state * - re-init display */ -int i915_reset(struct drm_device *dev) +int i915_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = dev_priv->dev; struct i915_gpu_error *error = &dev_priv->gpu_error; unsigned reset_counter; int ret; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4c02d18bdd1d..26e7de415966 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2754,7 +2754,8 @@ extern int i915_max_ioctl; extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); extern int i915_resume_switcheroo(struct drm_device *dev); -int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt); +int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, + int enable_ppgtt); /* i915_dma.c */ void __printf(3, 4) @@ -2778,7 +2779,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, #endif extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask); extern bool intel_has_gpu_reset(struct drm_device *dev); -extern int i915_reset(struct drm_device *dev); +extern int i915_reset(struct drm_i915_private *dev_priv); extern int intel_guc_reset(struct drm_i915_private *dev_priv); extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); @@ -2796,9 +2797,10 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); /* i915_irq.c */ -void i915_queue_hangcheck(struct drm_device *dev); +void i915_queue_hangcheck(struct drm_i915_private *dev_priv); __printf(3, 4) -void i915_handle_error(struct drm_device *dev, u32 engine_mask, +void i915_handle_error(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *fmt, ...); extern void intel_irq_init(struct drm_i915_private *dev_priv); @@ -2828,9 +2830,9 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); -static inline bool intel_vgpu_active(struct drm_device *dev) +static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) { - return to_i915(dev)->vgpu.active; + return dev_priv->vgpu.active; } void @@ -3098,13 +3100,13 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, req->seqno); } -int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); +int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno); int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); struct drm_i915_gem_request * i915_gem_find_active_request(struct intel_engine_cs *engine); -bool i915_gem_retire_requests(struct drm_device *dev); +bool i915_gem_retire_requests(struct drm_i915_private *dev_priv); void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); static inline u32 i915_reset_counter(struct i915_gpu_error *error) @@ -3351,9 +3353,9 @@ int __must_check i915_gem_evict_for_vma(struct i915_vma *target); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); /* belongs in i915_gem_gtt.h */ -static inline void i915_gem_chipset_flush(struct drm_device *dev) +static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) { - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_GEN(dev_priv) < 6) intel_gtt_chipset_flush(); } @@ -3432,14 +3434,15 @@ static inline void i915_error_state_buf_release( { kfree(eb->buf); } -void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, +void i915_capture_error_state(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *error_msg); void i915_error_state_get(struct drm_device *dev, struct i915_error_state_file_priv *error_priv); void i915_error_state_put(struct i915_error_state_file_priv *error_priv); void i915_destroy_error_state(struct drm_device *dev); -void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); +void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone); const char *i915_cache_level_str(struct drm_i915_private *i915, int type); /* i915_cmd_parser.c */ @@ -3550,18 +3553,20 @@ extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, extern void intel_detect_pch(struct drm_device *dev); extern int intel_enable_rc6(const struct drm_device *dev); -extern bool i915_semaphore_is_enabled(struct drm_device *dev); +extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv); int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, struct drm_file *file); /* overlay */ -extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); +extern struct intel_overlay_error_state * +intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, struct intel_overlay_error_state *error); -extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); +extern struct intel_display_error_state * +intel_display_capture_error_state(struct drm_i915_private *dev_priv); extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, struct drm_device *dev, struct intel_display_error_state *error); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2dbf6f6c5b34..d986a3598827 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) vaddr += PAGE_SIZE; } - i915_gem_chipset_flush(obj->base.dev); + i915_gem_chipset_flush(to_i915(obj->base.dev)); st = kmalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) @@ -347,7 +347,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, } drm_clflush_virt_range(vaddr, args->size); - i915_gem_chipset_flush(dev); + i915_gem_chipset_flush(to_i915(dev)); out: intel_fb_obj_flush(obj, false, ORIGIN_CPU); @@ -1006,7 +1006,7 @@ out: } if (needs_clflush_after) - i915_gem_chipset_flush(dev); + i915_gem_chipset_flush(to_i915(dev)); else obj->cache_dirty = true; @@ -1230,8 +1230,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, struct intel_rps_client *rps) { struct intel_engine_cs *engine = i915_gem_request_get_engine(req); - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = req->i915; const bool irq_test_in_progress = ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine); int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; @@ -1429,7 +1428,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req) struct intel_engine_cs *engine = req->engine; struct drm_i915_gem_request *tmp; - lockdep_assert_held(&engine->dev->struct_mutex); + lockdep_assert_held(&engine->i915->dev->struct_mutex); if (list_empty(&req->list)) return; @@ -2505,9 +2504,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring) } static int -i915_gem_init_seqno(struct drm_device *dev, u32 seqno) +i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; int ret; @@ -2517,7 +2515,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno) if (ret) return ret; } - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev_priv); /* Finally reset hw state */ for_each_engine(engine, dev_priv) @@ -2537,7 +2535,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) /* HWS page needs to be set less than what we * will inject to ring */ - ret = i915_gem_init_seqno(dev, seqno - 1); + ret = i915_gem_init_seqno(dev_priv, seqno - 1); if (ret) return ret; @@ -2553,13 +2551,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) } int -i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) +i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* reserve 0 for non-seqno */ if (dev_priv->next_seqno == 0) { - int ret = i915_gem_init_seqno(dev, 0); + int ret = i915_gem_init_seqno(dev_priv, 0); if (ret) return ret; @@ -2657,7 +2653,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, /* Not allowed to fail! */ WARN(ret, "emit|add_request failed: %d!\n", ret); - i915_queue_hangcheck(engine->dev); + i915_queue_hangcheck(engine->i915); queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, @@ -2731,7 +2727,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, struct intel_context *ctx, struct drm_i915_gem_request **req_out) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error); struct drm_i915_gem_request *req; int ret; @@ -2753,7 +2749,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, if (req == NULL) return -ENOMEM; - ret = i915_gem_get_seqno(engine->dev, &req->seqno); + ret = i915_gem_get_seqno(engine->i915, &req->seqno); if (ret) goto err; @@ -2810,7 +2806,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, int err; if (ctx == NULL) - ctx = to_i915(engine->dev)->kernel_context; + ctx = engine->i915->kernel_context; err = __i915_gem_request_alloc(engine, ctx, &req); return err ? ERR_PTR(err) : req; } @@ -2985,9 +2981,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine) } bool -i915_gem_retire_requests(struct drm_device *dev) +i915_gem_retire_requests(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; bool idle = true; @@ -3020,7 +3015,7 @@ i915_gem_retire_work_handler(struct work_struct *work) /* Come back later if the device is busy... */ idle = false; if (mutex_trylock(&dev->struct_mutex)) { - idle = i915_gem_retire_requests(dev); + idle = i915_gem_retire_requests(dev_priv); mutex_unlock(&dev->struct_mutex); } if (!idle) @@ -3189,7 +3184,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, if (i915_gem_request_completed(from_req, true)) return 0; - if (!i915_semaphore_is_enabled(obj->base.dev)) { + if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) { struct drm_i915_private *i915 = to_i915(obj->base.dev); ret = __i915_wait_request(from_req, i915->mm.interruptible, @@ -3722,7 +3717,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) return; if (i915_gem_clflush_object(obj, obj->pin_display)) - i915_gem_chipset_flush(obj->base.dev); + i915_gem_chipset_flush(to_i915(obj->base.dev)); old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; @@ -3920,7 +3915,7 @@ out: obj->base.write_domain != I915_GEM_DOMAIN_CPU && cpu_write_needs_clflush(obj)) { if (i915_gem_clflush_object(obj, true)) - i915_gem_chipset_flush(obj->base.dev); + i915_gem_chipset_flush(to_i915(obj->base.dev)); } return 0; @@ -4698,7 +4693,7 @@ i915_gem_suspend(struct drm_device *dev) if (ret) goto err; - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev_priv); i915_gem_stop_engines(dev); i915_gem_context_lost(dev_priv); @@ -4989,7 +4984,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) else dev_priv->num_fence_regs = 8; - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) dev_priv->num_fence_regs = I915_READ(vgtif_reg(avail_rs.fence_num)); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index b1b704c2c001..0fffebcc0ace 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -99,28 +99,27 @@ #define GEN6_CONTEXT_ALIGN (64<<10) #define GEN7_CONTEXT_ALIGN 4096 -static size_t get_context_alignment(struct drm_device *dev) +static size_t get_context_alignment(struct drm_i915_private *dev_priv) { - if (IS_GEN6(dev)) + if (IS_GEN6(dev_priv)) return GEN6_CONTEXT_ALIGN; return GEN7_CONTEXT_ALIGN; } -static int get_context_size(struct drm_device *dev) +static int get_context_size(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; int ret; u32 reg; - switch (INTEL_INFO(dev)->gen) { + switch (INTEL_GEN(dev_priv)) { case 6: reg = I915_READ(CXT_SIZE); ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; break; case 7: reg = I915_READ(GEN7_CXT_SIZE); - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) ret = HSW_CXT_TOTAL_SIZE; else ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; @@ -224,7 +223,7 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) * Flush any pending retires to hopefully release some * stale contexts and try again. */ - i915_gem_retire_requests(dev_priv->dev); + i915_gem_retire_requests(dev_priv); ret = ida_simple_get(&dev_priv->context_hw_ida, 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); if (ret < 0) @@ -320,7 +319,7 @@ i915_gem_create_context(struct drm_device *dev, * context. */ ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, - get_context_alignment(dev), 0); + get_context_alignment(to_i915(dev)), 0); if (ret) { DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); goto err_destroy; @@ -389,7 +388,8 @@ int i915_gem_context_init(struct drm_device *dev) if (WARN_ON(dev_priv->kernel_context)) return 0; - if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { + if (intel_vgpu_active(dev_priv) && + HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { if (!i915.enable_execlists) { DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); return -EINVAL; @@ -404,8 +404,9 @@ int i915_gem_context_init(struct drm_device *dev) /* NB: intentionally left blank. We will allocate our own * backing objects as we need them, thank you very much */ dev_priv->hw_context_size = 0; - } else if (HAS_HW_CONTEXTS(dev)) { - dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); + } else if (HAS_HW_CONTEXTS(dev_priv)) { + dev_priv->hw_context_size = + round_up(get_context_size(dev_priv), 4096); if (dev_priv->hw_context_size > (1<<20)) { DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", dev_priv->hw_context_size); @@ -509,12 +510,13 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) static inline int mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) { + struct drm_i915_private *dev_priv = req->i915; struct intel_engine_cs *engine = req->engine; u32 flags = hw_flags | MI_MM_SPACE_GTT; const int num_rings = /* Use an extended w/a on ivb+ if signalling from other rings */ - i915_semaphore_is_enabled(engine->dev) ? - hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 : + i915_semaphore_is_enabled(dev_priv) ? + hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 : 0; int len, ret; @@ -523,21 +525,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) * explicitly, so we rely on the value at ring init, stored in * itlb_before_ctx_switch. */ - if (IS_GEN6(engine->dev)) { + if (IS_GEN6(dev_priv)) { ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0); if (ret) return ret; } /* These flags are for resource streamer on HSW+ */ - if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8) + if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8) flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); - else if (INTEL_INFO(engine->dev)->gen < 8) + else if (INTEL_GEN(dev_priv) < 8) flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); len = 4; - if (INTEL_INFO(engine->dev)->gen >= 7) + if (INTEL_GEN(dev_priv) >= 7) len += 2 + (num_rings ? 4*num_rings + 6 : 0); ret = intel_ring_begin(req, len); @@ -545,14 +547,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) return ret; /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ - if (INTEL_INFO(engine->dev)->gen >= 7) { + if (INTEL_GEN(dev_priv) >= 7) { intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE); if (num_rings) { struct intel_engine_cs *signaller; intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_engine(signaller, to_i915(engine->dev)) { + for_each_engine(signaller, dev_priv) { if (signaller == engine) continue; @@ -575,14 +577,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) */ intel_ring_emit(engine, MI_NOOP); - if (INTEL_INFO(engine->dev)->gen >= 7) { + if (INTEL_GEN(dev_priv) >= 7) { if (num_rings) { struct intel_engine_cs *signaller; i915_reg_t last_reg = {}; /* keep gcc quiet */ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_engine(signaller, to_i915(engine->dev)) { + for_each_engine(signaller, dev_priv) { if (signaller == engine) continue; @@ -673,7 +675,7 @@ needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, if (engine->id != RCS) return true; - if (INTEL_INFO(engine->dev)->gen < 8) + if (INTEL_GEN(engine->i915) < 8) return true; return false; @@ -710,7 +712,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) /* Trying to pin first makes error handling easier. */ ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, - get_context_alignment(engine->dev), + get_context_alignment(engine->i915), 0); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index ea1f8d1bd228..b144c3f5c650 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -154,7 +154,7 @@ none: if (ret) return ret; - i915_gem_retire_requests(dev); + i915_gem_retire_requests(to_i915(dev)); goto search_again; } @@ -265,7 +265,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) if (ret) return ret; - i915_gem_retire_requests(vm->dev); + i915_gem_retire_requests(to_i915(vm->dev)); WARN_ON(!list_empty(&vm->active_list)); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index e0ee5d1ac372..a54a243ccaac 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -724,7 +724,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, struct i915_address_space *vm; struct list_head ordered_vmas; struct list_head pinned_vmas; - bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4; + bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4; int retry; i915_gem_retire_requests_ring(engine); @@ -965,7 +965,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, } if (flush_chipset) - i915_gem_chipset_flush(req->engine->dev); + i915_gem_chipset_flush(req->engine->i915); if (flush_domains & I915_GEM_DOMAIN_GTT) wmb(); @@ -1119,7 +1119,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { i915_gem_request_assign(&obj->last_fenced_req, req); if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, &dev_priv->mm.fence_list); } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index d284b17af431..667f0e859671 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -110,17 +110,19 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = { .type = I915_GGTT_VIEW_ROTATED, }; -int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) +int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, + int enable_ppgtt) { bool has_aliasing_ppgtt; bool has_full_ppgtt; bool has_full_48bit_ppgtt; - has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; - has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; - has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9; + has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6; + has_full_ppgtt = INTEL_GEN(dev_priv) >= 7; + has_full_48bit_ppgtt = + IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9; - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) has_full_ppgtt = false; /* emulation is too hard */ if (!has_aliasing_ppgtt) @@ -130,7 +132,7 @@ int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) * We don't allow disabling PPGTT for gen9+ as it's a requirement for * execlists, the sole mechanism available to submit work. */ - if (enable_ppgtt == 0 && INTEL_INFO(dev)->gen < 9) + if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9) return 0; if (enable_ppgtt == 1) @@ -144,19 +146,19 @@ int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) #ifdef CONFIG_INTEL_IOMMU /* Disable ppgtt on SNB if VT-d is on. */ - if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { + if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) { DRM_INFO("Disabling PPGTT because VT-d is on\n"); return 0; } #endif /* Early VLV doesn't have this */ - if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { + if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) { DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); return 0; } - if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) + if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) return has_full_48bit_ppgtt ? 3 : 2; else return has_aliasing_ppgtt ? 1 : 0; @@ -994,7 +996,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - if (intel_vgpu_active(vm->dev)) + if (intel_vgpu_active(to_i915(vm->dev))) gen8_ppgtt_notify_vgt(ppgtt, false); if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) @@ -1545,14 +1547,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 0, 0, GEN8_PML4E_SHIFT); - if (intel_vgpu_active(ppgtt->base.dev)) { + if (intel_vgpu_active(to_i915(ppgtt->base.dev))) { ret = gen8_preallocate_top_level_pdps(ppgtt); if (ret) goto free_scratch; } } - if (intel_vgpu_active(ppgtt->base.dev)) + if (intel_vgpu_active(to_i915(ppgtt->base.dev))) gen8_ppgtt_notify_vgt(ppgtt, true); return 0; @@ -2080,7 +2082,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) } else BUG(); - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) ppgtt->switch_mm = vgpu_mm_switch; ret = gen6_ppgtt_alloc(ppgtt); @@ -2729,7 +2731,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, i915_address_space_init(&ggtt->base, dev_priv); ggtt->base.total += PAGE_SIZE; - if (intel_vgpu_active(dev)) { + if (intel_vgpu_active(dev_priv)) { ret = intel_vgt_balloon(dev); if (ret) return ret; @@ -2833,7 +2835,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev) i915_gem_cleanup_stolen(dev); if (drm_mm_initialized(&ggtt->base.mm)) { - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) intel_vgt_deballoon(); drm_mm_takedown(&ggtt->base.mm); diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 423cf5144bcb..7c93327b70fe 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -29,7 +29,7 @@ #include "intel_renderstate.h" static const struct intel_renderstate_rodata * -render_state_get_rodata(struct drm_device *dev, const int gen) +render_state_get_rodata(const int gen) { switch (gen) { case 6: @@ -45,19 +45,20 @@ render_state_get_rodata(struct drm_device *dev, const int gen) return NULL; } -static int render_state_init(struct render_state *so, struct drm_device *dev) +static int render_state_init(struct render_state *so, + struct drm_i915_private *dev_priv) { int ret; - so->gen = INTEL_INFO(dev)->gen; - so->rodata = render_state_get_rodata(dev, so->gen); + so->gen = INTEL_GEN(dev_priv); + so->rodata = render_state_get_rodata(so->gen); if (so->rodata == NULL) return 0; if (so->rodata->batch_items * 4 > 4096) return -EINVAL; - so->obj = i915_gem_object_create(dev, 4096); + so->obj = i915_gem_object_create(dev_priv->dev, 4096); if (IS_ERR(so->obj)) return PTR_ERR(so->obj); @@ -177,7 +178,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine, if (WARN_ON(engine->id != RCS)) return -ENOENT; - ret = render_state_init(so, engine->dev); + ret = render_state_init(so, engine->i915); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 79004f356174..538c30499848 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -131,7 +131,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, unsigned long count = 0; trace_i915_gem_shrink(dev_priv, target, flags); - i915_gem_retire_requests(dev_priv->dev); + i915_gem_retire_requests(dev_priv); /* * Unbinding of objects will require HW access; Let us not wake the @@ -209,7 +209,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, if (flags & I915_SHRINK_BOUND) intel_runtime_pm_put(dev_priv); - i915_gem_retire_requests(dev_priv->dev); + i915_gem_retire_requests(dev_priv); return count; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 89725c9efc25..0f6002cb86f4 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -824,19 +824,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, return error_code; } -static void i915_gem_record_fences(struct drm_device *dev, +static void i915_gem_record_fences(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { - struct drm_i915_private *dev_priv = dev->dev_private; int i; - if (IS_GEN3(dev) || IS_GEN2(dev)) { + if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) { for (i = 0; i < dev_priv->num_fence_regs; i++) error->fence[i] = I915_READ(FENCE_REG(i)); - } else if (IS_GEN5(dev) || IS_GEN4(dev)) { + } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) { for (i = 0; i < dev_priv->num_fence_regs; i++) error->fence[i] = I915_READ64(FENCE_REG_965_LO(i)); - } else if (INTEL_INFO(dev)->gen >= 6) { + } else if (INTEL_GEN(dev_priv) >= 6) { for (i = 0; i < dev_priv->num_fence_regs; i++) error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i)); } @@ -851,7 +850,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, struct intel_engine_cs *to; enum intel_engine_id id; - if (!i915_semaphore_is_enabled(dev_priv->dev)) + if (!i915_semaphore_is_enabled(dev_priv)) return; if (!error->semaphore_obj) @@ -893,31 +892,29 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv, } } -static void i915_record_ring_state(struct drm_device *dev, +static void i915_record_ring_state(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error, struct intel_engine_cs *engine, struct drm_i915_error_ring *ering) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (INTEL_INFO(dev)->gen >= 6) { + if (INTEL_GEN(dev_priv) >= 6) { ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); ering->fault_reg = I915_READ(RING_FAULT_REG(engine)); - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_GEN(dev_priv) >= 8) gen8_record_semaphore_state(dev_priv, error, engine, ering); else gen6_record_semaphore_state(dev_priv, engine, ering); } - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base)); ering->instps = I915_READ(RING_INSTPS(engine->mmio_base)); ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; } @@ -939,10 +936,10 @@ static void i915_record_ring_state(struct drm_device *dev, ering->tail = I915_READ_TAIL(engine); ering->ctl = I915_READ_CTL(engine); - if (I915_NEED_GFX_HWS(dev)) { + if (I915_NEED_GFX_HWS(dev_priv)) { i915_reg_t mmio; - if (IS_GEN7(dev)) { + if (IS_GEN7(dev_priv)) { switch (engine->id) { default: case RCS: @@ -958,7 +955,7 @@ static void i915_record_ring_state(struct drm_device *dev, mmio = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN6(engine->dev)) { + } else if (IS_GEN6(engine->i915)) { mmio = RING_HWS_PGA_GEN6(engine->mmio_base); } else { /* XXX: gen8 returns to sanity */ @@ -971,18 +968,18 @@ static void i915_record_ring_state(struct drm_device *dev, ering->hangcheck_score = engine->hangcheck.score; ering->hangcheck_action = engine->hangcheck.action; - if (USES_PPGTT(dev)) { + if (USES_PPGTT(dev_priv)) { int i; ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); - if (IS_GEN6(dev)) + if (IS_GEN6(dev_priv)) ering->vm_info.pp_dir_base = I915_READ(RING_PP_DIR_BASE_READ(engine)); - else if (IS_GEN7(dev)) + else if (IS_GEN7(dev_priv)) ering->vm_info.pp_dir_base = I915_READ(RING_PP_DIR_BASE(engine)); - else if (INTEL_INFO(dev)->gen >= 8) + else if (INTEL_GEN(dev_priv) >= 8) for (i = 0; i < 4; i++) { ering->vm_info.pdp[i] = I915_READ(GEN8_RING_PDP_UDW(engine, i)); @@ -998,7 +995,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine, struct drm_i915_error_state *error, struct drm_i915_error_ring *ering) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_gem_object *obj; /* Currently render ring is the only HW context user */ @@ -1016,10 +1013,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine, } } -static void i915_gem_record_rings(struct drm_device *dev, +static void i915_gem_record_rings(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_request *request; int i, count; @@ -1030,12 +1026,12 @@ static void i915_gem_record_rings(struct drm_device *dev, error->ring[i].pid = -1; - if (engine->dev == NULL) + if (!intel_engine_initialized(engine)) continue; error->ring[i].valid = true; - i915_record_ring_state(dev, error, engine, &error->ring[i]); + i915_record_ring_state(dev_priv, error, engine, &error->ring[i]); request = i915_gem_find_active_request(engine); if (request) { @@ -1301,15 +1297,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); - i915_get_extra_instdone(dev, error->extra_instdone); + i915_get_extra_instdone(dev_priv, error->extra_instdone); } -static void i915_error_capture_msg(struct drm_device *dev, +static void i915_error_capture_msg(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error, u32 engine_mask, const char *error_msg) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 ecode; int ring_id = -1, len; @@ -1317,7 +1312,7 @@ static void i915_error_capture_msg(struct drm_device *dev, len = scnprintf(error->error_msg, sizeof(error->error_msg), "GPU HANG: ecode %d:%d:0x%08x", - INTEL_INFO(dev)->gen, ring_id, ecode); + INTEL_GEN(dev_priv), ring_id, ecode); if (ring_id != -1 && error->ring[ring_id].pid != -1) len += scnprintf(error->error_msg + len, @@ -1352,11 +1347,11 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv, * out a structure which becomes available in debugfs for user level tools * to pick up. */ -void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, +void i915_capture_error_state(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *error_msg) { static bool warned; - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_error_state *error; unsigned long flags; @@ -1372,15 +1367,15 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, i915_capture_gen_state(dev_priv, error); i915_capture_reg_state(dev_priv, error); i915_gem_capture_buffers(dev_priv, error); - i915_gem_record_fences(dev, error); - i915_gem_record_rings(dev, error); + i915_gem_record_fences(dev_priv, error); + i915_gem_record_rings(dev_priv, error); do_gettimeofday(&error->time); - error->overlay = intel_overlay_capture_error_state(dev); - error->display = intel_display_capture_error_state(dev); + error->overlay = intel_overlay_capture_error_state(dev_priv); + error->display = intel_display_capture_error_state(dev_priv); - i915_error_capture_msg(dev, error, engine_mask, error_msg); + i915_error_capture_msg(dev_priv, error, engine_mask, error_msg); DRM_INFO("%s\n", error->error_msg); spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); @@ -1400,7 +1395,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); - DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index); + DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index); warned = true; } } @@ -1450,17 +1445,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) } /* NB: please notice the memset */ -void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) +void i915_get_extra_instdone(struct drm_i915_private *dev_priv, + uint32_t *instdone) { - struct drm_i915_private *dev_priv = dev->dev_private; memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); - if (IS_GEN2(dev) || IS_GEN3(dev)) + if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv)) instdone[0] = I915_READ(GEN2_INSTDONE); - else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { + else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) { instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); instdone[1] = I915_READ(GEN4_INSTDONE1); - } else if (INTEL_INFO(dev)->gen >= 7) { + } else if (INTEL_GEN(dev_priv) >= 7) { instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); instdone[1] = I915_READ(GEN7_SC_INSTDONE); instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 102804710886..a163037ddbd8 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2537,15 +2537,15 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, * Fire an error uevent so userspace can see that a hang or error * was detected. */ -static void i915_reset_and_wakeup(struct drm_device *dev) +static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj; char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; int ret; - kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); + kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); /* * Note that there's only one work item which does gpu resets, so we @@ -2559,8 +2559,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) */ if (i915_reset_in_progress(&dev_priv->gpu_error)) { DRM_DEBUG_DRIVER("resetting chip\n"); - kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, - reset_event); + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); /* * In most cases it's guaranteed that we get here with an RPM @@ -2571,7 +2570,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) */ intel_runtime_pm_get(dev_priv); - intel_prepare_reset(dev); + intel_prepare_reset(dev_priv); /* * All state reset _must_ be completed before we update the @@ -2579,14 +2578,14 @@ static void i915_reset_and_wakeup(struct drm_device *dev) * pending state and not properly drop locks, resulting in * deadlocks with the reset work. */ - ret = i915_reset(dev); + ret = i915_reset(dev_priv); - intel_finish_reset(dev); + intel_finish_reset(dev_priv); intel_runtime_pm_put(dev_priv); if (ret == 0) - kobject_uevent_env(&dev->primary->kdev->kobj, + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); /* @@ -2597,9 +2596,8 @@ static void i915_reset_and_wakeup(struct drm_device *dev) } } -static void i915_report_and_clear_eir(struct drm_device *dev) +static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; uint32_t instdone[I915_NUM_INSTDONE_REG]; u32 eir = I915_READ(EIR); int pipe, i; @@ -2609,9 +2607,9 @@ static void i915_report_and_clear_eir(struct drm_device *dev) pr_err("render error detected, EIR: 0x%08x\n", eir); - i915_get_extra_instdone(dev, instdone); + i915_get_extra_instdone(dev_priv, instdone); - if (IS_G4X(dev)) { + if (IS_G4X(dev_priv)) { if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { u32 ipeir = I915_READ(IPEIR_I965); @@ -2633,7 +2631,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) } } - if (!IS_GEN2(dev)) { + if (!IS_GEN2(dev_priv)) { if (eir & I915_ERROR_PAGE_TABLE) { u32 pgtbl_err = I915_READ(PGTBL_ER); pr_err("page table error\n"); @@ -2655,7 +2653,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); for (i = 0; i < ARRAY_SIZE(instdone); i++) pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); - if (INTEL_INFO(dev)->gen < 4) { + if (INTEL_GEN(dev_priv) < 4) { u32 ipeir = I915_READ(IPEIR); pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); @@ -2699,10 +2697,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev) * so userspace knows something bad happened (should trigger collection * of a ring dump etc.). */ -void i915_handle_error(struct drm_device *dev, u32 engine_mask, +void i915_handle_error(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *fmt, ...) { - struct drm_i915_private *dev_priv = dev->dev_private; va_list args; char error_msg[80]; @@ -2710,8 +2708,8 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask, vscnprintf(error_msg, sizeof(error_msg), fmt, args); va_end(args); - i915_capture_error_state(dev, engine_mask, error_msg); - i915_report_and_clear_eir(dev); + i915_capture_error_state(dev_priv, engine_mask, error_msg); + i915_report_and_clear_eir(dev_priv); if (engine_mask) { atomic_or(I915_RESET_IN_PROGRESS_FLAG, @@ -2733,7 +2731,7 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask, i915_error_wake_up(dev_priv, false); } - i915_reset_and_wakeup(dev); + i915_reset_and_wakeup(dev_priv); } /* Called from drm generic code, passed 'crtc' which @@ -2851,9 +2849,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno) } static bool -ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) +ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr) { - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { return (ipehr >> 23) == 0x1c; } else { ipehr &= ~MI_SEMAPHORE_SYNC_MASK; @@ -2866,10 +2864,10 @@ static struct intel_engine_cs * semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, u64 offset) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct intel_engine_cs *signaller; - if (INTEL_INFO(dev_priv)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { for_each_engine(signaller, dev_priv) { if (engine == signaller) continue; @@ -2898,7 +2896,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, static struct intel_engine_cs * semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 cmd, ipehr, head; u64 offset = 0; int i, backwards; @@ -2924,7 +2922,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) return NULL; ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); - if (!ipehr_is_semaphore_wait(engine->dev, ipehr)) + if (!ipehr_is_semaphore_wait(engine->i915, ipehr)) return NULL; /* @@ -2936,7 +2934,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) * ringbuffer itself. */ head = I915_READ_HEAD(engine) & HEAD_ADDR; - backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4; + backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4; for (i = backwards; i; --i) { /* @@ -2958,7 +2956,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) return NULL; *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1; - if (INTEL_INFO(engine->dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { offset = ioread32(engine->buffer->virtual_start + head + 12); offset <<= 32; offset = ioread32(engine->buffer->virtual_start + head + 8); @@ -2968,7 +2966,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) static int semaphore_passed(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct intel_engine_cs *signaller; u32 seqno; @@ -3010,7 +3008,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine) if (engine->id != RCS) return true; - i915_get_extra_instdone(engine->dev, instdone); + i915_get_extra_instdone(engine->i915, instdone); /* There might be unstable subunit states even when * actual head is not moving. Filter out the unstable ones by @@ -3051,8 +3049,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd) static enum intel_ring_hangcheck_action ring_stuck(struct intel_engine_cs *engine, u64 acthd) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; enum intel_ring_hangcheck_action ha; u32 tmp; @@ -3060,7 +3057,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) if (ha != HANGCHECK_HUNG) return ha; - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return HANGCHECK_HUNG; /* Is the chip hanging on a WAIT_FOR_EVENT? @@ -3070,19 +3067,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) */ tmp = I915_READ_CTL(engine); if (tmp & RING_WAIT) { - i915_handle_error(dev, 0, + i915_handle_error(dev_priv, 0, "Kicking stuck wait on %s", engine->name); I915_WRITE_CTL(engine, tmp); return HANGCHECK_KICK; } - if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { + if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) { switch (semaphore_passed(engine)) { default: return HANGCHECK_HUNG; case 1: - i915_handle_error(dev, 0, + i915_handle_error(dev_priv, 0, "Kicking stuck semaphore on %s", engine->name); I915_WRITE_CTL(engine, tmp); @@ -3097,7 +3094,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) static unsigned kick_waiters(struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = to_i915(engine->dev); + struct drm_i915_private *i915 = engine->i915; unsigned user_interrupts = READ_ONCE(engine->user_interrupts); if (engine->hangcheck.user_interrupts == user_interrupts && @@ -3126,7 +3123,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work) struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), gpu_error.hangcheck_work.work); - struct drm_device *dev = dev_priv->dev; struct intel_engine_cs *engine; enum intel_engine_id id; int busy_count = 0, rings_hung = 0; @@ -3254,22 +3250,22 @@ static void i915_hangcheck_elapsed(struct work_struct *work) } if (rings_hung) { - i915_handle_error(dev, rings_hung, "Engine(s) hung"); + i915_handle_error(dev_priv, rings_hung, "Engine(s) hung"); goto out; } if (busy_count) /* Reset timer case chip hangs without another request * being added */ - i915_queue_hangcheck(dev); + i915_queue_hangcheck(dev_priv); out: ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); } -void i915_queue_hangcheck(struct drm_device *dev) +void i915_queue_hangcheck(struct drm_i915_private *dev_priv) { - struct i915_gpu_error *e = &to_i915(dev)->gpu_error; + struct i915_gpu_error *e = &dev_priv->gpu_error; if (!i915.enable_hangcheck) return; diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index dc0def210097..20b2e4039792 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, ), TP_fast_assign( - __entry->dev = from->dev->primary->index; + __entry->dev = from->i915->dev->primary->index; __entry->sync_from = from->id; __entry->sync_to = to_req->engine->id; __entry->seqno = i915_gem_request_get_seqno(req); @@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch, ), TP_fast_assign( - struct intel_engine_cs *engine = - i915_gem_request_get_engine(req); - __entry->dev = engine->dev->primary->index; - __entry->ring = engine->id; - __entry->seqno = i915_gem_request_get_seqno(req); + __entry->dev = req->i915->dev->primary->index; + __entry->ring = req->engine->id; + __entry->seqno = req->seqno; __entry->flags = flags; - i915_trace_irq_get(engine, req); + i915_trace_irq_get(req->engine, req); ), TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", @@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush, ), TP_fast_assign( - __entry->dev = req->engine->dev->primary->index; + __entry->dev = req->i915->dev->primary->index; __entry->ring = req->engine->id; __entry->invalidate = invalidate; __entry->flush = flush; @@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request, ), TP_fast_assign( - struct intel_engine_cs *engine = - i915_gem_request_get_engine(req); - __entry->dev = engine->dev->primary->index; - __entry->ring = engine->id; - __entry->seqno = i915_gem_request_get_seqno(req); + __entry->dev = req->i915->dev->primary->index; + __entry->ring = req->engine->id; + __entry->seqno = req->seqno; ), TP_printk("dev=%u, ring=%u, seqno=%u", @@ -560,7 +556,7 @@ TRACE_EVENT(i915_gem_request_notify, ), TP_fast_assign( - __entry->dev = engine->dev->primary->index; + __entry->dev = engine->i915->dev->primary->index; __entry->ring = engine->id; __entry->seqno = engine->get_seqno(engine); ), @@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin, * less desirable. */ TP_fast_assign( - struct intel_engine_cs *engine = - i915_gem_request_get_engine(req); - __entry->dev = engine->dev->primary->index; - __entry->ring = engine->id; - __entry->seqno = i915_gem_request_get_seqno(req); + __entry->dev = req->i915->dev->primary->index; + __entry->ring = req->engine->id; + __entry->seqno = req->seqno; __entry->blocking = - mutex_is_locked(&engine->dev->struct_mutex); + mutex_is_locked(&req->i915->dev->struct_mutex); ), TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", @@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm, __entry->ring = engine->id; __entry->to = to; __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; - __entry->dev = engine->dev->primary->index; + __entry->dev = engine->i915->dev->primary->index; ), TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index aa58ecc876ab..a8f5d035941b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3144,28 +3144,26 @@ static void intel_update_primary_planes(struct drm_device *dev) } } -void intel_prepare_reset(struct drm_device *dev) +void intel_prepare_reset(struct drm_i915_private *dev_priv) { /* no reset support for gen2 */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return; /* reset doesn't touch the display */ - if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) return; - drm_modeset_lock_all(dev); + drm_modeset_lock_all(dev_priv->dev); /* * Disabling the crtcs gracefully seems nicer. Also the * g33 docs say we should at least disable all the planes. */ - intel_display_suspend(dev); + intel_display_suspend(dev_priv->dev); } -void intel_finish_reset(struct drm_device *dev) +void intel_finish_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - /* * Flips in the rings will be nuked by the reset, * so complete all pending flips so that user space @@ -3174,11 +3172,11 @@ void intel_finish_reset(struct drm_device *dev) intel_complete_page_flips(dev_priv); /* no reset support for gen2 */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return; /* reset doesn't touch the display */ - if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { /* * Flips in the rings have been nuked by the reset, * so update the base address of all primary @@ -3188,7 +3186,7 @@ void intel_finish_reset(struct drm_device *dev) * FIXME: Atomic will make this obsolete since we won't schedule * CS-based flips (which might get lost in gpu resets) any more. */ - intel_update_primary_planes(dev); + intel_update_primary_planes(dev_priv->dev); return; } @@ -3199,18 +3197,18 @@ void intel_finish_reset(struct drm_device *dev) intel_runtime_pm_disable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv); - intel_modeset_init_hw(dev); + intel_modeset_init_hw(dev_priv->dev); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - intel_display_resume(dev); + intel_display_resume(dev_priv->dev); intel_hpd_init(dev_priv); - drm_modeset_unlock_all(dev); + drm_modeset_unlock_all(dev_priv->dev); } static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) @@ -11255,7 +11253,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine, if (engine == NULL) return true; - if (INTEL_INFO(engine->dev)->gen < 5) + if (INTEL_GEN(engine->i915) < 5) return false; if (i915.use_mmio_flip < 0) @@ -16187,9 +16185,8 @@ struct intel_display_error_state { }; struct intel_display_error_state * -intel_display_capture_error_state(struct drm_device *dev) +intel_display_capture_error_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_display_error_state *error; int transcoders[] = { TRANSCODER_A, @@ -16199,14 +16196,14 @@ intel_display_capture_error_state(struct drm_device *dev) }; int i; - if (INTEL_INFO(dev)->num_pipes == 0) + if (INTEL_INFO(dev_priv)->num_pipes == 0) return NULL; error = kzalloc(sizeof(*error), GFP_ATOMIC); if (error == NULL) return NULL; - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); for_each_pipe(dev_priv, i) { @@ -16222,25 +16219,25 @@ intel_display_capture_error_state(struct drm_device *dev) error->plane[i].control = I915_READ(DSPCNTR(i)); error->plane[i].stride = I915_READ(DSPSTRIDE(i)); - if (INTEL_INFO(dev)->gen <= 3) { + if (INTEL_GEN(dev_priv) <= 3) { error->plane[i].size = I915_READ(DSPSIZE(i)); error->plane[i].pos = I915_READ(DSPPOS(i)); } - if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) + if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) error->plane[i].addr = I915_READ(DSPADDR(i)); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { error->plane[i].surface = I915_READ(DSPSURF(i)); error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); } error->pipe[i].source = I915_READ(PIPESRC(i)); - if (HAS_GMCH_DISPLAY(dev)) + if (HAS_GMCH_DISPLAY(dev_priv)) error->pipe[i].stat = I915_READ(PIPESTAT(i)); } /* Note: this does not include DSI transcoders. */ - error->num_transcoders = INTEL_INFO(dev)->num_pipes; + error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes; if (HAS_DDI(dev_priv)) error->num_transcoders++; /* Account for eDP. */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index b37a04299643..8ff711474b7d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1231,8 +1231,8 @@ u32 intel_compute_tile_offset(int *x, int *y, const struct drm_framebuffer *fb, int plane, unsigned int pitch, unsigned int rotation); -void intel_prepare_reset(struct drm_device *dev); -void intel_finish_reset(struct drm_device *dev); +void intel_prepare_reset(struct drm_i915_private *dev_priv); +void intel_finish_reset(struct drm_i915_private *dev_priv); void hsw_enable_pc8(struct drm_i915_private *dev_priv); void hsw_disable_pc8(struct drm_i915_private *dev_priv); void broxton_init_cdclk(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index d5a7cfec589b..4a527d3cf026 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -827,7 +827,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc) bool enable_by_default = IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv); - if (intel_vgpu_active(dev_priv->dev)) { + if (intel_vgpu_active(dev_priv)) { fbc->no_fbc_reason = "VGPU is active"; return false; } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index b8e9f9d8b205..db10c961e0f4 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -246,21 +246,22 @@ static int intel_lr_context_pin(struct intel_context *ctx, * * Return: 1 if Execlists is supported and has to be enabled. */ -int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists) +int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists) { /* On platforms with execlist available, vGPU will only * support execlist mode, no ring buffer mode. */ - if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev)) + if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv)) return 1; - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) return 1; if (enable_execlists == 0) return 0; - if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) && + if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && + USES_PPGTT(dev_priv) && i915.use_mmio_flip >= 0) return 1; @@ -270,19 +271,19 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists static void logical_ring_init_platform_invariants(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; + struct drm_i915_private *dev_priv = engine->i915; - if (IS_GEN8(dev) || IS_GEN9(dev)) + if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) engine->idle_lite_restore_wa = ~0; - engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && + engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) && (engine->id == VCS || engine->id == VCS2); engine->ctx_desc_template = GEN8_CTX_VALID; - engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << + engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev_priv) << GEN8_CTX_ADDRESSING_MODE_SHIFT; - if (IS_GEN8(dev)) + if (IS_GEN8(dev_priv)) engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE; @@ -342,8 +343,7 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0, { struct intel_engine_cs *engine = rq0->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = rq0->i915; uint64_t desc[2]; if (rq1) { @@ -425,7 +425,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) * If irqs are not active generate a warning as batches that finish * without the irqs may get lost and a GPU Hang may occur. */ - WARN_ON(!intel_irqs_enabled(engine->dev->dev_private)); + WARN_ON(!intel_irqs_enabled(engine->i915)); /* Try to read in pairs */ list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue, @@ -497,7 +497,7 @@ static u32 get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, u32 *context_id) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 status; read_pointer %= GEN8_CSB_ENTRIES; @@ -523,7 +523,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, static void intel_lrc_irq_handler(unsigned long data) { struct intel_engine_cs *engine = (struct intel_engine_cs *)data; - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 status_pointer; unsigned int read_pointer, write_pointer; u32 csb[GEN8_CSB_ENTRIES][2]; @@ -884,7 +884,7 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine) struct drm_i915_gem_request *req, *tmp; LIST_HEAD(cancel_list); - WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); + WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex)); spin_lock_bh(&engine->execlist_lock); list_replace_init(&engine->execlist_queue, &cancel_list); @@ -898,7 +898,7 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine) void intel_logical_ring_stop(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret; if (!intel_engine_initialized(engine)) @@ -964,7 +964,7 @@ static int intel_lr_context_pin(struct intel_context *ctx, lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; ringbuf = ctx->engine[engine->id].ringbuf; - ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf); + ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf); if (ret) goto unpin_map; @@ -1019,9 +1019,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) int ret, i; struct intel_engine_cs *engine = req->engine; struct intel_ringbuffer *ringbuf = req->ringbuf; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_workarounds *w = &dev_priv->workarounds; + struct i915_workarounds *w = &req->i915->workarounds; if (w->count == 0) return 0; @@ -1092,7 +1090,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, * this batch updates GEN8_L3SQCREG4 with default value we need to * set this bit here to retain the WA during flush. */ - if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0)) + if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0)) l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | @@ -1181,7 +1179,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ - if (IS_BROADWELL(engine->dev)) { + if (IS_BROADWELL(engine->i915)) { int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index); if (rc < 0) return rc; @@ -1253,12 +1251,11 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, uint32_t *offset) { int ret; - struct drm_device *dev = engine->dev; uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaDisableCtxRestoreArbitration:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) || + IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ @@ -1279,12 +1276,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, uint32_t *const batch, uint32_t *offset) { - struct drm_device *dev = engine->dev; uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) || + IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) { wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); wa_ctx_emit(batch, index, @@ -1293,7 +1289,7 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, } /* WaClearTdlStateAckDirtyBits:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { + if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) { wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4)); wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK); @@ -1312,8 +1308,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, } /* WaDisableCtxRestoreArbitration:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) || + IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); @@ -1325,7 +1321,7 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size) { int ret; - engine->wa_ctx.obj = i915_gem_object_create(engine->dev, + engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev, PAGE_ALIGN(size)); if (IS_ERR(engine->wa_ctx.obj)) { DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); @@ -1365,9 +1361,9 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) WARN_ON(engine->id != RCS); /* update this when WA for higher Gen are added */ - if (INTEL_INFO(engine->dev)->gen > 9) { + if (INTEL_GEN(engine->i915) > 9) { DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", - INTEL_INFO(engine->dev)->gen); + INTEL_GEN(engine->i915)); return 0; } @@ -1387,7 +1383,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) batch = kmap_atomic(page); offset = 0; - if (INTEL_INFO(engine->dev)->gen == 8) { + if (IS_GEN8(engine->i915)) { ret = gen8_init_indirectctx_bb(engine, &wa_ctx->indirect_ctx, batch, @@ -1401,7 +1397,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) &offset); if (ret) goto out; - } else if (INTEL_INFO(engine->dev)->gen == 9) { + } else if (IS_GEN9(engine->i915)) { ret = gen9_init_indirectctx_bb(engine, &wa_ctx->indirect_ctx, batch, @@ -1427,7 +1423,7 @@ out: static void lrc_init_hws(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; I915_WRITE(RING_HWS_PGA(engine->mmio_base), (u32)engine->status_page.gfx_addr); @@ -1436,8 +1432,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine) static int gen8_init_common_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned int next_context_status_buffer_hw; lrc_init_hws(engine); @@ -1484,8 +1479,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) static int gen8_init_render_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret; ret = gen8_init_common_ring(engine); @@ -1562,7 +1556,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, if (req->ctx->ppgtt && (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) { if (!USES_FULL_48BIT_PPGTT(req->i915) && - !intel_vgpu_active(req->i915->dev)) { + !intel_vgpu_active(req->i915)) { ret = intel_logical_ring_emit_pdps(req); if (ret) return ret; @@ -1590,8 +1584,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1610,8 +1603,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1628,8 +1620,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, { struct intel_ringbuffer *ringbuf = request->ringbuf; struct intel_engine_cs *engine = ringbuf->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = request->i915; uint32_t cmd; int ret; @@ -1697,7 +1688,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL * pipe control. */ - if (IS_GEN9(engine->dev)) + if (IS_GEN9(request->i915)) vf_flush_wa = true; } @@ -1890,7 +1881,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) tasklet_kill(&engine->irq_tasklet); - dev_priv = engine->dev->dev_private; + dev_priv = engine->i915; if (engine->buffer) { intel_logical_ring_stop(engine); @@ -1914,7 +1905,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) engine->ctx_desc_template = 0; lrc_destroy_wa_ctx_obj(engine); - engine->dev = NULL; + engine->i915 = NULL; } static void @@ -1929,7 +1920,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->emit_bb_start = gen8_emit_bb_start; engine->get_seqno = gen8_get_seqno; engine->set_seqno = gen8_set_seqno; - if (IS_BXT_REVID(engine->dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) { engine->irq_seqno_barrier = bxt_a_seqno_barrier; engine->set_seqno = bxt_a_set_seqno; } @@ -2019,7 +2010,7 @@ logical_ring_setup(struct drm_device *dev, enum intel_engine_id id) engine->guc_id = info->guc_id; engine->mmio_base = info->mmio_base; - engine->dev = dev; + engine->i915 = dev_priv; /* Intentionally left blank. */ engine->buffer = NULL; @@ -2052,7 +2043,7 @@ logical_ring_setup(struct drm_device *dev, enum intel_engine_id id) logical_ring_default_irqs(engine, info->irq_shift); intel_engine_init_hangcheck(engine); - i915_gem_batch_pool_init(engine->dev, &engine->batch_pool); + i915_gem_batch_pool_init(dev, &engine->batch_pool); return engine; } @@ -2060,7 +2051,7 @@ logical_ring_setup(struct drm_device *dev, enum intel_engine_id id) static int logical_ring_init(struct intel_engine_cs *engine) { - struct intel_context *dctx = to_i915(engine->dev)->kernel_context; + struct intel_context *dctx = engine->i915->kernel_context; int ret; ret = i915_cmd_parser_init_ring(engine); @@ -2220,7 +2211,7 @@ cleanup_render_ring: } static u32 -make_rpcs(struct drm_device *dev) +make_rpcs(struct drm_i915_private *dev_priv) { u32 rpcs = 0; @@ -2228,7 +2219,7 @@ make_rpcs(struct drm_device *dev) * No explicit RPCS request is needed to ensure full * slice/subslice/EU enablement prior to Gen9. */ - if (INTEL_INFO(dev)->gen < 9) + if (INTEL_GEN(dev_priv) < 9) return 0; /* @@ -2237,24 +2228,24 @@ make_rpcs(struct drm_device *dev) * must make an explicit request through RPCS for full * enablement. */ - if (INTEL_INFO(dev)->has_slice_pg) { + if (INTEL_INFO(dev_priv)->has_slice_pg) { rpcs |= GEN8_RPCS_S_CNT_ENABLE; - rpcs |= INTEL_INFO(dev)->slice_total << + rpcs |= INTEL_INFO(dev_priv)->slice_total << GEN8_RPCS_S_CNT_SHIFT; rpcs |= GEN8_RPCS_ENABLE; } - if (INTEL_INFO(dev)->has_subslice_pg) { + if (INTEL_INFO(dev_priv)->has_subslice_pg) { rpcs |= GEN8_RPCS_SS_CNT_ENABLE; - rpcs |= INTEL_INFO(dev)->subslice_per_slice << + rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice << GEN8_RPCS_SS_CNT_SHIFT; rpcs |= GEN8_RPCS_ENABLE; } - if (INTEL_INFO(dev)->has_eu_pg) { - rpcs |= INTEL_INFO(dev)->eu_per_subslice << + if (INTEL_INFO(dev_priv)->has_eu_pg) { + rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice << GEN8_RPCS_EU_MIN_SHIFT; - rpcs |= INTEL_INFO(dev)->eu_per_subslice << + rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice << GEN8_RPCS_EU_MAX_SHIFT; rpcs |= GEN8_RPCS_ENABLE; } @@ -2266,9 +2257,9 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) { u32 indirect_ctx_offset; - switch (INTEL_INFO(engine->dev)->gen) { + switch (INTEL_GEN(engine->i915)) { default: - MISSING_CASE(INTEL_INFO(engine->dev)->gen); + MISSING_CASE(INTEL_GEN(engine->i915)); /* fall through */ case 9: indirect_ctx_offset = @@ -2289,8 +2280,7 @@ populate_lr_context(struct intel_context *ctx, struct intel_engine_cs *engine, struct intel_ringbuffer *ringbuf) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = ctx->i915; struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; void *vaddr; u32 *reg_state; @@ -2328,7 +2318,7 @@ populate_lr_context(struct intel_context *ctx, RING_CONTEXT_CONTROL(engine), _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - (HAS_RESOURCE_STREAMER(dev) ? + (HAS_RESOURCE_STREAMER(dev_priv) ? CTX_CTRL_RS_CTX_ENABLE : 0))); ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base), 0); @@ -2417,7 +2407,7 @@ populate_lr_context(struct intel_context *ctx, if (engine->id == RCS) { reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, - make_rpcs(dev)); + make_rpcs(dev_priv)); } i915_gem_object_unpin_map(ctx_obj); @@ -2468,11 +2458,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine) { int ret = 0; - WARN_ON(INTEL_INFO(engine->dev)->gen < 8); + WARN_ON(INTEL_GEN(engine->i915) < 8); switch (engine->id) { case RCS: - if (INTEL_INFO(engine->dev)->gen >= 9) + if (INTEL_GEN(engine->i915) >= 9) ret = GEN9_LR_CONTEXT_RENDER_SIZE; else ret = GEN8_LR_CONTEXT_RENDER_SIZE; @@ -2504,7 +2494,6 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine) static int execlists_context_deferred_alloc(struct intel_context *ctx, struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; struct drm_i915_gem_object *ctx_obj; uint32_t context_size; struct intel_ringbuffer *ringbuf; @@ -2518,7 +2507,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ctx, /* One extra page as the sharing data between driver and GuC */ context_size += PAGE_SIZE * LRC_PPHWSP_PN; - ctx_obj = i915_gem_object_create(dev, context_size); + ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size); if (IS_ERR(ctx_obj)) { DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); return PTR_ERR(ctx_obj); diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 229b8a974262..1afba0331dc6 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -112,7 +112,8 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx, struct intel_engine_cs *engine); /* Execlists */ -int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); +int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, + int enable_execlists); struct i915_execbuffer_params; int intel_execlists_submission(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 6ba4bf7f2a89..b765c75f3fcd 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -189,7 +189,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index) */ int intel_mocs_init_engine(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_mocs_table table; unsigned int index; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 8570c60c6fc0..4a1e774ba8cc 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1508,9 +1508,8 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, struct intel_overlay_error_state * -intel_overlay_capture_error_state(struct drm_device *dev) +intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_overlay *overlay = dev_priv->overlay; struct intel_overlay_error_state *error; struct overlay_registers __iomem *regs; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 999ab8e30c35..299b6cd61f69 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6344,7 +6344,7 @@ void intel_enable_gt_powersave(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; /* Powersaving is controlled by the host when inside a VM */ - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) return; if (IS_IRONLAKE_M(dev)) { @@ -7400,8 +7400,7 @@ static void __intel_rps_boost_work(struct work_struct *work) struct drm_i915_gem_request *req = boost->req; if (!i915_gem_request_completed(req, true)) - gen6_rps_boost(to_i915(req->engine->dev), NULL, - req->emitted_jiffies); + gen6_rps_boost(req->i915, NULL, req->emitted_jiffies); i915_gem_request_unreference(req); kfree(boost); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 8f3eb3033da0..e17a682dd621 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -60,7 +60,7 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf) bool intel_engine_stopped(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine); } @@ -106,7 +106,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 flush_domains) { struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; u32 cmd; int ret; @@ -145,7 +144,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, cmd |= MI_EXE_FLUSH; if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && - (IS_G4X(dev) || IS_GEN5(dev))) + (IS_G4X(req->i915) || IS_GEN5(req->i915))) cmd |= MI_INVALIDATE_ISP; ret = intel_ring_begin(req, 2); @@ -431,19 +430,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, static void ring_write_tail(struct intel_engine_cs *engine, u32 value) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; I915_WRITE_TAIL(engine, value); } u64 intel_ring_get_active_head(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u64 acthd; - if (INTEL_INFO(engine->dev)->gen >= 8) + if (INTEL_GEN(dev_priv) >= 8) acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), RING_ACTHD_UDW(engine->mmio_base)); - else if (INTEL_INFO(engine->dev)->gen >= 4) + else if (INTEL_GEN(dev_priv) >= 4) acthd = I915_READ(RING_ACTHD(engine->mmio_base)); else acthd = I915_READ(ACTHD); @@ -453,25 +452,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine) static void ring_setup_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 addr; addr = dev_priv->status_page_dmah->busaddr; - if (INTEL_INFO(engine->dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; I915_WRITE(HWS_PGA, addr); } static void intel_ring_setup_status_page(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; i915_reg_t mmio; /* The ring status page addresses are no longer next to the rest of * the ring registers as of gen7. */ - if (IS_GEN7(dev)) { + if (IS_GEN7(dev_priv)) { switch (engine->id) { case RCS: mmio = RENDER_HWS_PGA_GEN7; @@ -491,7 +489,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) mmio = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN6(engine->dev)) { + } else if (IS_GEN6(dev_priv)) { mmio = RING_HWS_PGA_GEN6(engine->mmio_base); } else { /* XXX: gen8 returns to sanity */ @@ -508,7 +506,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) * arises: do we still need this and if so how should we go about * invalidating the TLB? */ - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { + if (INTEL_GEN(dev_priv) >= 6 && INTEL_GEN(dev_priv) < 8) { i915_reg_t reg = RING_INSTPM(engine->mmio_base); /* ring should be idle before issuing a sync flush*/ @@ -526,9 +524,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) static bool stop_ring(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; - if (!IS_GEN2(engine->dev)) { + if (!IS_GEN2(dev_priv)) { I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { DRM_ERROR("%s : timed out trying to stop ring\n", @@ -546,7 +544,7 @@ static bool stop_ring(struct intel_engine_cs *engine) I915_WRITE_HEAD(engine, 0); engine->write_tail(engine, 0); - if (!IS_GEN2(engine->dev)) { + if (!IS_GEN2(dev_priv)) { (void)I915_READ_CTL(engine); I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); } @@ -561,8 +559,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine) static int init_ring_common(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct intel_ringbuffer *ringbuf = engine->buffer; struct drm_i915_gem_object *obj = ringbuf->obj; int ret = 0; @@ -592,7 +589,7 @@ static int init_ring_common(struct intel_engine_cs *engine) } } - if (I915_NEED_GFX_HWS(dev)) + if (I915_NEED_GFX_HWS(dev_priv)) intel_ring_setup_status_page(engine); else ring_setup_phys_status_page(engine); @@ -649,12 +646,10 @@ out: void intel_fini_pipe_control(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - if (engine->scratch.obj == NULL) return; - if (INTEL_INFO(dev)->gen >= 5) { + if (INTEL_GEN(engine->i915) >= 5) { kunmap(sg_page(engine->scratch.obj->pages->sgl)); i915_gem_object_ggtt_unpin(engine->scratch.obj); } @@ -670,7 +665,7 @@ intel_init_pipe_control(struct intel_engine_cs *engine) WARN_ON(engine->scratch.obj); - engine->scratch.obj = i915_gem_object_create(engine->dev, 4096); + engine->scratch.obj = i915_gem_object_create(engine->i915->dev, 4096); if (IS_ERR(engine->scratch.obj)) { DRM_ERROR("Failed to allocate seqno page\n"); ret = PTR_ERR(engine->scratch.obj); @@ -708,11 +703,9 @@ err: static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) { - int ret, i; struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_workarounds *w = &dev_priv->workarounds; + struct i915_workarounds *w = &req->i915->workarounds; + int ret, i; if (w->count == 0) return 0; @@ -801,7 +794,7 @@ static int wa_add(struct drm_i915_private *dev_priv, static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, i915_reg_t reg) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct i915_workarounds *wa = &dev_priv->workarounds; const uint32_t index = wa->hw_whitelist_count[engine->id]; @@ -817,8 +810,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, static int gen8_init_workarounds(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); @@ -869,9 +861,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine) static int bdw_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen8_init_workarounds(engine); if (ret) @@ -891,16 +882,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine) /* WaForceContextSaveRestoreNonCoherent:bdw */ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ - (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); + (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); return 0; } static int chv_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen8_init_workarounds(engine); if (ret) @@ -917,8 +907,7 @@ static int chv_init_workarounds(struct intel_engine_cs *engine) static int gen9_init_workarounds(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; uint32_t tmp; int ret; @@ -941,14 +930,14 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, GEN9_DG_MIRROR_FIX_ENABLE); /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, GEN9_RHWO_OPTIMIZATION_DISABLE); /* @@ -974,20 +963,20 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) GEN9_CCS_TLB_PREFETCH_ENABLE); /* WaDisableMaskBasedCammingInRCC:skl,bxt */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, PIXEL_MASK_CAMMING_DISABLE); /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; - if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) || - IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_F0, REVID_FOREVER) || + IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ - if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0)) + if (IS_SKYLAKE(dev_priv) || IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, GEN8_SAMPLER_POWER_BYPASS_DIS); @@ -1013,8 +1002,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) static int skl_tune_iz_hashing(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u8 vals[3] = { 0, 0, 0 }; unsigned int i; @@ -1055,9 +1043,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine) static int skl_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen9_init_workarounds(engine); if (ret) @@ -1068,12 +1055,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) * until D0 which is the default case so this is equivalent to * !WaDisablePerCtxtPreemptionGranularityControl:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) { + if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) { I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); } - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0)) { /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ I915_WRITE(FF_SLICE_CS_CHICKEN2, _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); @@ -1082,24 +1069,24 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes * involving this register should also be added to WA batch as required. */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_E0)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) /* WaDisableLSQCROPERFforOCL:skl */ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_RO_PERF_DIS); /* WaEnableGapsTsvCreditFix:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) { + if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) { I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE)); } /* WaDisablePowerCompilerClockGating:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0)) WA_SET_BIT_MASKED(HIZ_CHICKEN, BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); /* This is tied to WaForceContextSaveRestoreNonCoherent */ - if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) { + if (IS_SKL_REVID(dev_priv, 0, REVID_FOREVER)) { /* *Use Force Non-Coherent whenever executing a 3D context. This * is a workaround for a possible hang in the unlikely event @@ -1115,13 +1102,13 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) } /* WaBarrierPerformanceFixDisable:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0)) WA_SET_BIT_MASKED(HDC_CHICKEN0, HDC_FENCE_DEST_SLM_DISABLE | HDC_BARRIER_PERFORMANCE_DISABLE); /* WaDisableSbeCacheDispatchPortSharing:skl */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0)) WA_SET_BIT_MASKED( GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); @@ -1136,9 +1123,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) static int bxt_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen9_init_workarounds(engine); if (ret) @@ -1146,11 +1132,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaStoreMultiplePTEenable:bxt */ /* This is a requirement according to Hardware specification */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); /* WaSetClckGatingDisableMedia:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); } @@ -1160,7 +1146,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) STALL_DOP_GATING_DISABLE); /* WaDisableSbeCacheDispatchPortSharing:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) { WA_SET_BIT_MASKED( GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); @@ -1170,7 +1156,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ /* WaDisableLSQCROPERFforOCL:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); if (ret) return ret; @@ -1181,7 +1167,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) } /* WaProgramL3SqcReg1DefaultForPerf:bxt */ - if (IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) + if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2)); @@ -1190,24 +1176,23 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) int init_workarounds_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; WARN_ON(engine->id != RCS); dev_priv->workarounds.count = 0; dev_priv->workarounds.hw_whitelist_count[RCS] = 0; - if (IS_BROADWELL(dev)) + if (IS_BROADWELL(dev_priv)) return bdw_init_workarounds(engine); - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev_priv)) return chv_init_workarounds(engine); - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev_priv)) return skl_init_workarounds(engine); - if (IS_BROXTON(dev)) + if (IS_BROXTON(dev_priv)) return bxt_init_workarounds(engine); return 0; @@ -1215,14 +1200,13 @@ int init_workarounds_ring(struct intel_engine_cs *engine) static int init_render_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret = init_ring_common(engine); if (ret) return ret; /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ - if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) + if (INTEL_GEN(dev_priv) >= 4 && INTEL_GEN(dev_priv) < 7) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); /* We need to disable the AsyncFlip performance optimisations in order @@ -1231,22 +1215,22 @@ static int init_render_ring(struct intel_engine_cs *engine) * * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv */ - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) + if (INTEL_GEN(dev_priv) >= 6 && INTEL_GEN(dev_priv) < 8) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); /* Required for the hardware to program scanline values for waiting */ /* WaEnableFlushTlbInvalidationMode:snb */ - if (INTEL_INFO(dev)->gen == 6) + if (IS_GEN6(dev_priv)) I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ - if (IS_GEN7(dev)) + if (IS_GEN7(dev_priv)) I915_WRITE(GFX_MODE_GEN7, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); - if (IS_GEN6(dev)) { + if (IS_GEN6(dev_priv)) { /* From the Sandybridge PRM, volume 1 part 3, page 24: * "If this bit is set, STCunit will have LRA as replacement * policy. [...] This bit must be reset. LRA replacement @@ -1256,19 +1240,18 @@ static int init_render_ring(struct intel_engine_cs *engine) _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); } - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) + if (INTEL_GEN(dev_priv) >= 6 && INTEL_GEN(dev_priv) < 8) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); - if (HAS_L3_DPF(dev)) - I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); + if (HAS_L3_DPF(dev_priv)) + I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv)); return init_workarounds_ring(engine); } static void render_ring_cleanup(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; if (dev_priv->semaphore_obj) { i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); @@ -1284,13 +1267,12 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, { #define MBOX_UPDATE_DWORDS 8 struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *waiter; enum intel_engine_id id; int ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; #undef MBOX_UPDATE_DWORDS @@ -1326,13 +1308,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, { #define MBOX_UPDATE_DWORDS 6 struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *waiter; enum intel_engine_id id; int ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; #undef MBOX_UPDATE_DWORDS @@ -1365,14 +1346,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, unsigned int num_dwords) { struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *useless; enum intel_engine_id id; int ret, num_rings; #define MBOX_UPDATE_DWORDS 3 - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); #undef MBOX_UPDATE_DWORDS @@ -1460,10 +1440,9 @@ gen8_render_add_request(struct drm_i915_gem_request *req) return 0; } -static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, +static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv, u32 seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; return dev_priv->last_seqno < seqno; } @@ -1481,7 +1460,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, u32 seqno) { struct intel_engine_cs *waiter = waiter_req->engine; - struct drm_i915_private *dev_priv = waiter->dev->dev_private; + struct drm_i915_private *dev_priv = waiter_req->i915; struct i915_hw_ppgtt *ppgtt; int ret; @@ -1535,7 +1514,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req, return ret; /* If seqno wrap happened, omit the wait with no-ops */ - if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { + if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) { intel_ring_emit(waiter, dw1 | wait_mbox); intel_ring_emit(waiter, seqno); intel_ring_emit(waiter, 0); @@ -1616,7 +1595,7 @@ pc_render_add_request(struct drm_i915_gem_request *req) static void gen6_seqno_barrier(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; /* Workaround to force correct ordering between irq and seqno writes on * ivb (and maybe also on snb) by reading from a CS register (like @@ -1665,8 +1644,7 @@ pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno) static bool gen5_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1683,8 +1661,7 @@ gen5_ring_get_irq(struct intel_engine_cs *engine) static void gen5_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1696,8 +1673,7 @@ gen5_ring_put_irq(struct intel_engine_cs *engine) static bool i9xx_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (!intel_irqs_enabled(dev_priv)) @@ -1717,8 +1693,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *engine) static void i9xx_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1733,8 +1708,7 @@ i9xx_ring_put_irq(struct intel_engine_cs *engine) static bool i8xx_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (!intel_irqs_enabled(dev_priv)) @@ -1754,8 +1728,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *engine) static void i8xx_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1808,8 +1781,7 @@ i9xx_add_request(struct drm_i915_gem_request *req) static bool gen6_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1817,10 +1789,10 @@ gen6_ring_get_irq(struct intel_engine_cs *engine) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) I915_WRITE_IMR(engine, ~(engine->irq_enable_mask | - GT_PARITY_ERROR(dev))); + GT_PARITY_ERROR(dev_priv))); else I915_WRITE_IMR(engine, ~engine->irq_enable_mask); gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); @@ -1833,14 +1805,13 @@ gen6_ring_get_irq(struct intel_engine_cs *engine) static void gen6_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) - I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) + I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv)); else I915_WRITE_IMR(engine, ~0); gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); @@ -1851,8 +1822,7 @@ gen6_ring_put_irq(struct intel_engine_cs *engine) static bool hsw_vebox_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1871,8 +1841,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *engine) static void hsw_vebox_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1886,8 +1855,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *engine) static bool gen8_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1895,7 +1863,7 @@ gen8_ring_get_irq(struct intel_engine_cs *engine) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) { + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) { I915_WRITE_IMR(engine, ~(engine->irq_enable_mask | GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); @@ -1912,13 +1880,12 @@ gen8_ring_get_irq(struct intel_engine_cs *engine) static void gen8_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) { + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) { I915_WRITE_IMR(engine, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); } else { @@ -2040,12 +2007,12 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req, static void cleanup_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; if (!dev_priv->status_page_dmah) return; - drm_pci_free(engine->dev, dev_priv->status_page_dmah); + drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah); engine->status_page.page_addr = NULL; } @@ -2071,7 +2038,7 @@ static int init_status_page(struct intel_engine_cs *engine) unsigned flags; int ret; - obj = i915_gem_object_create(engine->dev, 4096); + obj = i915_gem_object_create(engine->i915->dev, 4096); if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate status page\n"); return PTR_ERR(obj); @@ -2082,7 +2049,7 @@ static int init_status_page(struct intel_engine_cs *engine) goto err_unref; flags = 0; - if (!HAS_LLC(engine->dev)) + if (!HAS_LLC(engine->i915)) /* On g33, we cannot place HWS above 256MiB, so * restrict its pinning to the low mappable arena. * Though this restriction is not documented for @@ -2116,11 +2083,11 @@ err_unref: static int init_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; if (!dev_priv->status_page_dmah) { dev_priv->status_page_dmah = - drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE); + drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE); if (!dev_priv->status_page_dmah) return -ENOMEM; } @@ -2146,10 +2113,9 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) ringbuf->vma = NULL; } -int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, +int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, struct intel_ringbuffer *ringbuf) { - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj = ringbuf->obj; /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ unsigned flags = PIN_OFFSET_BIAS | 4096; @@ -2248,13 +2214,13 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) * of the buffer. */ ring->effective_size = size; - if (IS_I830(engine->dev) || IS_845G(engine->dev)) + if (IS_I830(engine->i915) || IS_845G(engine->i915)) ring->effective_size -= 2 * CACHELINE_BYTES; ring->last_retired_head = -1; intel_ring_update_space(ring); - ret = intel_alloc_ringbuffer_obj(engine->dev, ring); + ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring); if (ret) { DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", engine->name, ret); @@ -2277,12 +2243,13 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring) static int intel_init_ring_buffer(struct drm_device *dev, struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_ringbuffer *ringbuf; int ret; WARN_ON(engine->buffer); - engine->dev = dev; + engine->i915 = dev_priv; INIT_LIST_HEAD(&engine->active_list); INIT_LIST_HEAD(&engine->request_list); INIT_LIST_HEAD(&engine->execlist_queue); @@ -2300,7 +2267,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, } engine->buffer = ringbuf; - if (I915_NEED_GFX_HWS(dev)) { + if (I915_NEED_GFX_HWS(dev_priv)) { ret = init_status_page(engine); if (ret) goto error; @@ -2311,7 +2278,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto error; } - ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); + ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf); if (ret) { DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", engine->name, ret); @@ -2337,11 +2304,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) if (!intel_engine_initialized(engine)) return; - dev_priv = to_i915(engine->dev); + dev_priv = engine->i915; if (engine->buffer) { intel_stop_engine(engine); - WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); + WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); intel_unpin_ringbuffer_obj(engine->buffer); intel_ringbuffer_free(engine->buffer); @@ -2351,7 +2318,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) if (engine->cleanup) engine->cleanup(engine); - if (I915_NEED_GFX_HWS(engine->dev)) { + if (I915_NEED_GFX_HWS(dev_priv)) { cleanup_status_page(engine); } else { WARN_ON(engine->id != RCS); @@ -2360,7 +2327,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) i915_cmd_parser_fini_ring(engine); i915_gem_batch_pool_fini(&engine->batch_pool); - engine->dev = NULL; + engine->i915 = NULL; } int intel_engine_idle(struct intel_engine_cs *engine) @@ -2526,7 +2493,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req) void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; /* Our semaphore implementation is strictly monotonic (i.e. we proceed * so long as the semaphore value in the register/page is greater @@ -2562,7 +2529,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, u32 value) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; /* Every tail move must follow the sequence below */ @@ -2604,7 +2571,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, return ret; cmd = MI_FLUSH_DW; - if (INTEL_INFO(engine->dev)->gen >= 8) + if (INTEL_GEN(req->i915) >= 8) cmd += 1; /* We always require a command barrier so that subsequent @@ -2626,7 +2593,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, intel_ring_emit(engine, cmd); intel_ring_emit(engine, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - if (INTEL_INFO(engine->dev)->gen >= 8) { + if (INTEL_GEN(req->i915) >= 8) { intel_ring_emit(engine, 0); /* upper addr */ intel_ring_emit(engine, 0); /* value */ } else { @@ -2717,7 +2684,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 invalidate, u32 flush) { struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; uint32_t cmd; int ret; @@ -2726,7 +2692,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, return ret; cmd = MI_FLUSH_DW; - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_GEN(req->i915) >= 8) cmd += 1; /* We always require a command barrier so that subsequent @@ -2747,7 +2713,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, intel_ring_emit(engine, cmd); intel_ring_emit(engine, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(req->i915) >= 8) { intel_ring_emit(engine, 0); /* upper addr */ intel_ring_emit(engine, 0); /* value */ } else { @@ -2772,8 +2738,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->hw_id = 0; engine->mmio_base = RENDER_RING_BASE; - if (INTEL_INFO(dev)->gen >= 8) { - if (i915_semaphore_is_enabled(dev)) { + if (INTEL_GEN(dev_priv) >= 8) { + if (i915_semaphore_is_enabled(dev_priv)) { obj = i915_gem_object_create(dev, 4096); if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); @@ -2798,17 +2764,17 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { WARN_ON(!dev_priv->semaphore_obj); engine->semaphore.sync_to = gen8_ring_sync; engine->semaphore.signal = gen8_rcs_signal; GEN8_RING_SEMAPHORE_INIT(engine); } - } else if (INTEL_INFO(dev)->gen >= 6) { + } else if (INTEL_GEN(dev_priv) >= 6) { engine->init_context = intel_rcs_ctx_init; engine->add_request = gen6_add_request; engine->flush = gen7_render_ring_flush; - if (INTEL_INFO(dev)->gen == 6) + if (IS_GEN6(dev_priv)) engine->flush = gen6_render_ring_flush; engine->irq_get = gen6_ring_get_irq; engine->irq_put = gen6_ring_put_irq; @@ -2816,7 +2782,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->irq_seqno_barrier = gen6_seqno_barrier; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen6_ring_sync; engine->semaphore.signal = gen6_signal; /* @@ -2837,7 +2803,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; } - } else if (IS_GEN5(dev)) { + } else if (IS_GEN5(dev_priv)) { engine->add_request = pc_render_add_request; engine->flush = gen4_render_ring_flush; engine->get_seqno = pc_render_get_seqno; @@ -2848,13 +2814,13 @@ int intel_init_render_ring_buffer(struct drm_device *dev) GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; } else { engine->add_request = i9xx_add_request; - if (INTEL_INFO(dev)->gen < 4) + if (INTEL_GEN(dev_priv) < 4) engine->flush = gen2_render_ring_flush; else engine->flush = gen4_render_ring_flush; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (IS_GEN2(dev)) { + if (IS_GEN2(dev_priv)) { engine->irq_get = i8xx_ring_get_irq; engine->irq_put = i8xx_ring_put_irq; } else { @@ -2865,15 +2831,15 @@ int intel_init_render_ring_buffer(struct drm_device *dev) } engine->write_tail = ring_write_tail; - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; - else if (IS_GEN8(dev)) + else if (IS_GEN8(dev_priv)) engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - else if (INTEL_INFO(dev)->gen >= 6) + else if (INTEL_GEN(dev_priv) >= 6) engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - else if (INTEL_INFO(dev)->gen >= 4) + else if (INTEL_GEN(dev_priv) >= 4) engine->dispatch_execbuffer = i965_dispatch_execbuffer; - else if (IS_I830(dev) || IS_845G(dev)) + else if (IS_I830(dev_priv) || IS_845G(dev_priv)) engine->dispatch_execbuffer = i830_dispatch_execbuffer; else engine->dispatch_execbuffer = i915_dispatch_execbuffer; @@ -2881,7 +2847,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->cleanup = render_ring_cleanup; /* Workaround batchbuffer to combat CS tlb bug. */ - if (HAS_BROKEN_CS_TLB(dev)) { + if (HAS_BROKEN_CS_TLB(dev_priv)) { obj = i915_gem_object_create(dev, I830_WA_SIZE); if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate batch bo\n"); @@ -2903,7 +2869,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) if (ret) return ret; - if (INTEL_INFO(dev)->gen >= 5) { + if (INTEL_GEN(dev_priv) >= 5) { ret = intel_init_pipe_control(engine); if (ret) return ret; @@ -2923,24 +2889,24 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) engine->hw_id = 1; engine->write_tail = ring_write_tail; - if (INTEL_INFO(dev)->gen >= 6) { + if (INTEL_GEN(dev_priv) >= 6) { engine->mmio_base = GEN6_BSD_RING_BASE; /* gen6 bsd needs a special wa for tail updates */ - if (IS_GEN6(dev)) + if (IS_GEN6(dev_priv)) engine->write_tail = gen6_bsd_ring_write_tail; engine->flush = gen6_bsd_ring_flush; engine->add_request = gen6_add_request; engine->irq_seqno_barrier = gen6_seqno_barrier; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; engine->irq_get = gen8_ring_get_irq; engine->irq_put = gen8_ring_put_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen8_ring_sync; engine->semaphore.signal = gen8_xcs_signal; GEN8_RING_SEMAPHORE_INIT(engine); @@ -2951,7 +2917,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) engine->irq_put = gen6_ring_put_irq; engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen6_ring_sync; engine->semaphore.signal = gen6_signal; engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; @@ -2972,7 +2938,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) engine->add_request = i9xx_add_request; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (IS_GEN5(dev)) { + if (IS_GEN5(dev_priv)) { engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; engine->irq_get = gen5_ring_get_irq; engine->irq_put = gen5_ring_put_irq; @@ -3014,7 +2980,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) engine->irq_put = gen8_ring_put_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen8_ring_sync; engine->semaphore.signal = gen8_xcs_signal; GEN8_RING_SEMAPHORE_INIT(engine); @@ -3041,13 +3007,13 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) engine->irq_seqno_barrier = gen6_seqno_barrier; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; engine->irq_get = gen8_ring_get_irq; engine->irq_put = gen8_ring_put_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen8_ring_sync; engine->semaphore.signal = gen8_xcs_signal; GEN8_RING_SEMAPHORE_INIT(engine); @@ -3057,7 +3023,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) engine->irq_get = gen6_ring_get_irq; engine->irq_put = gen6_ring_put_irq; engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.signal = gen6_signal; engine->semaphore.sync_to = gen6_ring_sync; /* @@ -3102,13 +3068,13 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; engine->irq_get = gen8_ring_get_irq; engine->irq_put = gen8_ring_put_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen8_ring_sync; engine->semaphore.signal = gen8_xcs_signal; GEN8_RING_SEMAPHORE_INIT(engine); @@ -3118,7 +3084,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) engine->irq_get = hsw_vebox_get_irq; engine->irq_put = hsw_vebox_put_irq; engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen6_ring_sync; engine->semaphore.signal = gen6_signal; engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 723ff6160fbb..929e7b4af2a4 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -141,7 +141,8 @@ struct i915_ctx_workarounds { struct drm_i915_gem_object *obj; }; -struct intel_engine_cs { +struct intel_engine_cs { + struct drm_i915_private *i915; const char *name; enum intel_engine_id { RCS = 0, @@ -156,7 +157,6 @@ struct intel_engine_cs { unsigned int hw_id; unsigned int guc_id; /* XXX same as hw_id? */ u32 mmio_base; - struct drm_device *dev; struct intel_ringbuffer *buffer; struct list_head buffers; @@ -350,7 +350,7 @@ struct intel_engine_cs { static inline bool intel_engine_initialized(struct intel_engine_cs *engine) { - return engine->dev != NULL; + return engine->i915 != NULL; } static inline unsigned @@ -425,7 +425,7 @@ intel_write_status_page(struct intel_engine_cs *engine, struct intel_ringbuffer * intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); -int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, +int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, struct intel_ringbuffer *ringbuf); void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); void intel_ringbuffer_free(struct intel_ringbuffer *ring); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 4f1dfe616856..4ea2bf2c2a4a 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1381,7 +1381,7 @@ void intel_uncore_init(struct drm_device *dev) break; } - if (intel_vgpu_active(dev)) { + if (intel_vgpu_active(dev_priv)) { ASSIGN_WRITE_MMIO_VFUNCS(vgpu); ASSIGN_READ_MMIO_VFUNCS(vgpu); } @@ -1663,8 +1663,8 @@ static int wait_for_register_fw(struct drm_i915_private *dev_priv, static int gen8_request_engine_reset(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_i915_private *dev_priv = engine->dev->dev_private; I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); @@ -1682,7 +1682,7 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine) static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); @@ -1802,10 +1802,10 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, { enum forcewake_domains fw_domains; - if (intel_vgpu_active(dev_priv->dev)) + if (intel_vgpu_active(dev_priv)) return 0; - switch (INTEL_INFO(dev_priv)->gen) { + switch (INTEL_GEN(dev_priv)) { case 9: fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg)); break; @@ -1842,10 +1842,10 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, { enum forcewake_domains fw_domains; - if (intel_vgpu_active(dev_priv->dev)) + if (intel_vgpu_active(dev_priv)) return 0; - switch (INTEL_INFO(dev_priv)->gen) { + switch (INTEL_GEN(dev_priv)) { case 9: fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg)); break; -- cgit v1.2.3-59-g8ed1b From ac840ae53573d9f435c88c131f6707a79aecb466 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 6 May 2016 21:35:55 +0300 Subject: drm/i915: Re-enable GGTT earlier during resume on pre-gen6 platforms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the intel_enable_gtt() call to happen before we touch the GTT during resume. Right now it's done way too late. Before commit ebb7c78d358b ("agp/intel-gtt: Only register fake agp driver for gen1") it was actually done earlier on account of also getting called from the resume hook of the fake agp driver. With the fake agp driver no longer getting registered we must move the call up. The symptoms I've seen on my 830 machine include lowmem corruption, other kinds of memory corruption, and straight up hung machine during or just after resume. Not really sure what causes the memory corruption, but so far I've not seen any with this fix. I think we shouldn't really need to call this during init, but we have been doing that so I've decided to keep the call. However moving that call earlier could be prudent as well. Doing it right after the intel-gtt probe seems appropriate. Also tested this on 946gz,elk,ilk and all seemed quite happy with this change. v2: Reorder init_hw vs. enable_hw functions (Chris) Cc: Daniel Vetter Cc: drm-intel-fixes@lists.freedesktop.org Fixes: ebb7c78d358b ("agp/intel-gtt: Only register fake agp driver for gen1") Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1462559755-353-1-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 6 ++++++ drivers/gpu/drm/i915/i915_drv.c | 5 +++++ drivers/gpu/drm/i915/i915_gem.c | 3 --- drivers/gpu/drm/i915/i915_gem_gtt.c | 8 ++++++++ drivers/gpu/drm/i915/i915_gem_gtt.h | 1 + 5 files changed, 20 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_dma.c') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 46ac1da64a09..4e7ee25b8d3e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1235,6 +1235,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) if (ret) return ret; + ret = i915_ggtt_enable_hw(dev); + if (ret) { + DRM_ERROR("failed to enable GGTT\n"); + goto out_ggtt; + } + /* WARNING: Apparently we must kick fbdev drivers before vgacon, * otherwise the vga fbdev driver falls over. */ ret = i915_kick_out_firmware_fb(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index a116730c39aa..a4cae74d86d8 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -730,9 +730,14 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) static int i915_drm_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + int ret; disable_rpm_wakeref_asserts(dev_priv); + ret = i915_ggtt_enable_hw(dev); + if (ret) + DRM_ERROR("failed to re-enable GGTT\n"); + intel_csr_ucode_resume(dev_priv); mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d986a3598827..d2195c7f78b5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4821,9 +4821,6 @@ i915_gem_init_hw(struct drm_device *dev) struct intel_engine_cs *engine; int ret; - if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) - return -EIO; - /* Double layer security blanket, see i915_gem_init() */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 667f0e859671..52f950b6adda 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3232,6 +3232,14 @@ out_gtt_cleanup: return ret; } +int i915_ggtt_enable_hw(struct drm_device *dev) +{ + if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) + return -EIO; + + return 0; +} + void i915_gem_restore_gtt_mappings(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 18af3af18754..35ff958f9de6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -517,6 +517,7 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n) } int i915_ggtt_init_hw(struct drm_device *dev); +int i915_ggtt_enable_hw(struct drm_device *dev); void i915_gem_init_ggtt(struct drm_device *dev); void i915_ggtt_cleanup_hw(struct drm_device *dev); -- cgit v1.2.3-59-g8ed1b From dc97997a21fe1708fc93021baa4ba90db7a3b57f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 10 May 2016 14:10:04 +0100 Subject: drm/i915: Use drm_i915_private as the native pointer for intel_uncore.c Pass drm_i915_private to the uncore init/fini routines and their subservients as it is their native type. text data bss dec hex filename 6309978 3578778 696320 10585076 a183f4 vmlinux 6309530 3578778 696320 10584628 a18234 vmlinux a modest 400 bytes of saving, but 60 lines of code deleted! Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/1462885804-26750-1-git-send-email-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 4 +- drivers/gpu/drm/i915/i915_dma.c | 13 +- drivers/gpu/drm/i915/i915_drv.c | 26 +-- drivers/gpu/drm/i915/i915_drv.h | 18 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 9 +- drivers/gpu/drm/i915/i915_gem_gtt.h | 2 +- drivers/gpu/drm/i915/i915_guc_submission.c | 3 +- drivers/gpu/drm/i915/i915_irq.c | 5 +- drivers/gpu/drm/i915/i915_sysfs.c | 9 +- drivers/gpu/drm/i915/i915_vgpu.c | 7 +- drivers/gpu/drm/i915/i915_vgpu.h | 2 +- drivers/gpu/drm/i915/intel_display.c | 9 +- drivers/gpu/drm/i915/intel_drv.h | 22 +- drivers/gpu/drm/i915/intel_pm.c | 342 ++++++++++++----------------- drivers/gpu/drm/i915/intel_uncore.c | 162 +++++++------- 15 files changed, 285 insertions(+), 348 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_dma.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 85ae5423de1b..f01848dbebc5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4981,7 +4981,7 @@ i915_max_freq_set(void *data, u64 val) dev_priv->rps.max_freq_softlimit = val; - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); @@ -5048,7 +5048,7 @@ i915_min_freq_set(void *data, u64 val) dev_priv->rps.min_freq_softlimit = val; - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 4e7ee25b8d3e..b55fef3679d9 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -223,8 +223,7 @@ static int i915_getparam(struct drm_device *dev, void *data, return -ENODEV; break; case I915_PARAM_HAS_GPU_RESET: - value = i915.enable_hangcheck && - intel_has_gpu_reset(dev); + value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); break; case I915_PARAM_HAS_RESOURCE_STREAMER: value = HAS_RESOURCE_STREAMER(dev); @@ -427,6 +426,8 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { static void i915_gem_fini(struct drm_device *dev) { + struct drm_i915_private *dev_priv = to_i915(dev); + /* * Neither the BIOS, ourselves or any other kernel * expects the system to be in execlists mode on startup, @@ -447,7 +448,7 @@ static void i915_gem_fini(struct drm_device *dev) * machine in an unusable condition. */ if (HAS_HW_CONTEXTS(dev)) { - int reset = intel_gpu_reset(dev, ALL_ENGINES); + int reset = intel_gpu_reset(dev_priv, ALL_ENGINES); WARN_ON(reset && reset != -ENODEV); } @@ -1189,7 +1190,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) if (ret < 0) goto put_bridge; - intel_uncore_init(dev); + intel_uncore_init(dev_priv); return 0; @@ -1207,7 +1208,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; - intel_uncore_fini(dev); + intel_uncore_fini(dev_priv); i915_mmio_cleanup(dev); pci_dev_put(dev_priv->bridge_dev); } @@ -1288,7 +1289,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); - intel_uncore_sanitize(dev); + intel_uncore_sanitize(dev_priv); intel_opregion_setup(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index a4cae74d86d8..a676c6dbc0e7 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -606,7 +606,7 @@ static int i915_drm_suspend(struct drm_device *dev) intel_guc_suspend(dev); - intel_suspend_gt_powersave(dev); + intel_suspend_gt_powersave(dev_priv); intel_display_suspend(dev); @@ -626,7 +626,7 @@ static int i915_drm_suspend(struct drm_device *dev) opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; intel_opregion_notify_adapter(dev, opregion_target_state); - intel_uncore_forcewake_reset(dev, false); + intel_uncore_forcewake_reset(dev_priv, false); intel_opregion_fini(dev); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); @@ -866,9 +866,9 @@ static int i915_drm_resume_early(struct drm_device *dev) DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", ret); - intel_uncore_early_sanitize(dev, true); + intel_uncore_early_sanitize(dev_priv, true); - if (IS_BROXTON(dev)) { + if (IS_BROXTON(dev_priv)) { if (!dev_priv->suspended_to_idle) gen9_sanitize_dc_state(dev_priv); bxt_disable_dc9(dev_priv); @@ -876,7 +876,7 @@ static int i915_drm_resume_early(struct drm_device *dev) hsw_disable_pc8(dev_priv); } - intel_uncore_sanitize(dev); + intel_uncore_sanitize(dev_priv); if (IS_BROXTON(dev_priv) || !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) @@ -926,7 +926,7 @@ int i915_reset(struct drm_i915_private *dev_priv) unsigned reset_counter; int ret; - intel_reset_gt_powersave(dev); + intel_reset_gt_powersave(dev_priv); mutex_lock(&dev->struct_mutex); @@ -942,7 +942,7 @@ int i915_reset(struct drm_i915_private *dev_priv) i915_gem_reset(dev); - ret = intel_gpu_reset(dev, ALL_ENGINES); + ret = intel_gpu_reset(dev_priv, ALL_ENGINES); /* Also reset the gpu hangman. */ if (error->stop_rings != 0) { @@ -997,7 +997,7 @@ int i915_reset(struct drm_i915_private *dev_priv) * of re-init after reset. */ if (INTEL_INFO(dev)->gen > 5) - intel_enable_gt_powersave(dev); + intel_enable_gt_powersave(dev_priv); return 0; @@ -1474,7 +1474,7 @@ static int intel_runtime_suspend(struct device *device) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) + if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) return -ENODEV; if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) @@ -1513,7 +1513,7 @@ static int intel_runtime_suspend(struct device *device) intel_guc_suspend(dev); - intel_suspend_gt_powersave(dev); + intel_suspend_gt_powersave(dev_priv); intel_runtime_pm_disable_interrupts(dev_priv); ret = 0; @@ -1535,7 +1535,7 @@ static int intel_runtime_suspend(struct device *device) return ret; } - intel_uncore_forcewake_reset(dev, false); + intel_uncore_forcewake_reset(dev_priv, false); enable_rpm_wakeref_asserts(dev_priv); WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); @@ -1616,7 +1616,7 @@ static int intel_runtime_resume(struct device *device) * we can do is to hope that things will still work (and disable RPM). */ i915_gem_init_swizzling(dev); - gen6_update_ring_freq(dev); + gen6_update_ring_freq(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv); @@ -1628,7 +1628,7 @@ static int intel_runtime_resume(struct device *device) if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_init(dev_priv); - intel_enable_gt_powersave(dev); + intel_enable_gt_powersave(dev_priv); enable_rpm_wakeref_asserts(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 26e7de415966..4abd39f32c0f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2777,8 +2777,8 @@ extern void i915_driver_postclose(struct drm_device *dev, extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #endif -extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask); -extern bool intel_has_gpu_reset(struct drm_device *dev); +extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); +extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); extern int i915_reset(struct drm_i915_private *dev_priv); extern int intel_guc_reset(struct drm_i915_private *dev_priv); extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); @@ -2807,14 +2807,15 @@ extern void intel_irq_init(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv); -extern void intel_uncore_sanitize(struct drm_device *dev); -extern void intel_uncore_early_sanitize(struct drm_device *dev, +extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv); +extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, bool restore_forcewake); -extern void intel_uncore_init(struct drm_device *dev); +extern void intel_uncore_init(struct drm_i915_private *dev_priv); extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); -extern void intel_uncore_fini(struct drm_device *dev); -extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); +extern void intel_uncore_fini(struct drm_i915_private *dev_priv); +extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, + bool restore); const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, enum forcewake_domains domains); @@ -3547,11 +3548,10 @@ extern void i915_redisable_vga(struct drm_device *dev); extern void i915_redisable_vga_power_on(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); extern void intel_init_pch_refclk(struct drm_device *dev); -extern void intel_set_rps(struct drm_device *dev, u8 val); +extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); extern void intel_detect_pch(struct drm_device *dev); -extern int intel_enable_rc6(const struct drm_device *dev); extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv); int i915_reg_read_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 52f950b6adda..5fb14c835543 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2279,12 +2279,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) dev_priv->mm.interruptible = interruptible; } -void i915_check_and_clear_faults(struct drm_device *dev) +void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_INFO(dev_priv)->gen < 6) return; for_each_engine(engine, dev_priv) { @@ -2328,7 +2327,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 6) return; - i915_check_and_clear_faults(dev); + i915_check_and_clear_faults(dev_priv); ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, true); @@ -3248,7 +3247,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) struct i915_vma *vma; bool flush; - i915_check_and_clear_faults(dev); + i915_check_and_clear_faults(dev_priv); /* First fill our portion of the GTT with scratch pages */ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 35ff958f9de6..62be77cac5cd 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -536,7 +536,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) kref_put(&ppgtt->ref, i915_ppgtt_release); } -void i915_check_and_clear_faults(struct drm_device *dev); +void i915_check_and_clear_faults(struct drm_i915_private *dev_priv); void i915_gem_suspend_gtt_mappings(struct drm_device *dev); void i915_gem_restore_gtt_mappings(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index a304b0ead099..169242a8adff 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -158,8 +158,7 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; /* WaRsDisableCoarsePowerGating:skl,bxt */ - if (!intel_enable_rc6(dev) || - NEEDS_WaRsDisableCoarsePowerGating(dev)) + if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev)) data[1] = 0; else /* bit 0 and 1 are for Render and Media domain separately */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a163037ddbd8..2fa26159d6e5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -336,9 +336,8 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) __gen6_disable_pm_irq(dev_priv, mask); } -void gen6_reset_rps_interrupts(struct drm_device *dev) +void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; i915_reg_t reg = gen6_pm_iir(dev_priv); spin_lock_irq(&dev_priv->irq_lock); @@ -1168,7 +1167,7 @@ static void gen6_pm_rps_work(struct work_struct *work) new_delay += adj; new_delay = clamp_t(int, new_delay, min, max); - intel_set_rps(dev_priv->dev, new_delay); + intel_set_rps(dev_priv, new_delay); mutex_unlock(&dev_priv->rps.hw_lock); out: diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 2d576b7ff299..37b6444b8e22 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -43,7 +43,7 @@ static u32 calc_residency(struct drm_device *dev, u64 units = 128ULL, div = 100000ULL; u32 ret; - if (!intel_enable_rc6(dev)) + if (!intel_enable_rc6()) return 0; intel_runtime_pm_get(dev_priv); @@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev, static ssize_t show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) { - struct drm_minor *dminor = dev_to_drm_minor(kdev); - return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev)); + return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6()); } static ssize_t @@ -397,7 +396,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, /* We still need *_set_rps to process the new max_delay and * update the interrupt limits and PMINTRMSK even though * frequency request may be unchanged. */ - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); @@ -461,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, /* We still need *_set_rps to process the new min_delay and * update the interrupt limits and PMINTRMSK even though * frequency request may be unchanged. */ - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index d02efb8cad4d..d5a7a5e7ee7e 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -58,15 +58,14 @@ * This function is called at the initialization stage, to detect whether * running on a vGPU. */ -void i915_check_vgpu(struct drm_device *dev) +void i915_check_vgpu(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); uint64_t magic; uint32_t version; BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); - if (!IS_HASWELL(dev)) + if (!IS_HASWELL(dev_priv)) return; magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); @@ -136,7 +135,7 @@ static int vgt_balloon_space(struct drm_mm *mm, /** * intel_vgt_balloon - balloon out reserved graphics address trunks - * @dev: drm device + * @dev_priv: i915 device * * This function is called at the initialization stage, to balloon out the * graphic address space allocated to other vGPUs, by marking these spaces as diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index 3c83b47b5f69..21ffcfea5f5d 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h @@ -110,7 +110,7 @@ struct vgt_if { #define VGT_DRV_DISPLAY_NOT_READY 0 #define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */ -extern void i915_check_vgpu(struct drm_device *dev); +extern void i915_check_vgpu(struct drm_i915_private *dev_priv); extern int intel_vgt_balloon(struct drm_device *dev); extern void intel_vgt_deballoon(void); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d3f6e58c199e..7215dadf2aa3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -15283,7 +15283,7 @@ void intel_modeset_init_hw(struct drm_device *dev) dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; intel_init_clock_gating(dev); - intel_enable_gt_powersave(dev); + intel_enable_gt_powersave(dev_priv); } /* @@ -16011,11 +16011,12 @@ retry: void intel_modeset_gem_init(struct drm_device *dev) { + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *c; struct drm_i915_gem_object *obj; int ret; - intel_init_gt_powersave(dev); + intel_init_gt_powersave(dev_priv); intel_modeset_init_hw(dev); @@ -16062,7 +16063,7 @@ void intel_modeset_cleanup(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_connector *connector; - intel_disable_gt_powersave(dev); + intel_disable_gt_powersave(dev_priv); intel_backlight_unregister(dev); @@ -16094,7 +16095,7 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_cleanup_overlay(dev); - intel_cleanup_gt_powersave(dev); + intel_cleanup_gt_powersave(dev_priv); intel_teardown_gmbus(dev); } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8ff711474b7d..71b30a47ee2d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1032,7 +1032,7 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void gen6_reset_rps_interrupts(struct drm_device *dev); +void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv); void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv); void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv); u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask); @@ -1612,13 +1612,13 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv); void intel_pm_setup(struct drm_device *dev); void intel_gpu_ips_init(struct drm_i915_private *dev_priv); void intel_gpu_ips_teardown(void); -void intel_init_gt_powersave(struct drm_device *dev); -void intel_cleanup_gt_powersave(struct drm_device *dev); -void intel_enable_gt_powersave(struct drm_device *dev); -void intel_disable_gt_powersave(struct drm_device *dev); -void intel_suspend_gt_powersave(struct drm_device *dev); -void intel_reset_gt_powersave(struct drm_device *dev); -void gen6_update_ring_freq(struct drm_device *dev); +void intel_init_gt_powersave(struct drm_i915_private *dev_priv); +void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv); +void intel_enable_gt_powersave(struct drm_i915_private *dev_priv); +void intel_disable_gt_powersave(struct drm_i915_private *dev_priv); +void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv); +void intel_reset_gt_powersave(struct drm_i915_private *dev_priv); +void gen6_update_ring_freq(struct drm_i915_private *dev_priv); void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv); @@ -1633,7 +1633,11 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, struct skl_ddb_allocation *ddb /* out */); uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); bool ilk_disable_lp_wm(struct drm_device *dev); -int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6); +int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); +static inline int intel_enable_rc6(void) +{ + return i915.enable_rc6; +} /* intel_sdvo.c */ bool intel_sdvo_init(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 2365db0e5a58..5d80a2b0ffb4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4419,12 +4419,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) /* gen6_set_rps is called to update the frequency request, but should also be * called when the range (min_delay and max_delay) is modified so that we can * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ -static void gen6_set_rps(struct drm_device *dev, u8 val) +static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) return; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -4437,10 +4435,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) if (val != dev_priv->rps.cur_freq) { gen6_set_rps_thresholds(dev_priv, val); - if (IS_GEN9(dev)) + if (IS_GEN9(dev_priv)) I915_WRITE(GEN6_RPNSWREQ, GEN9_FREQUENCY(val)); - else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(val)); else @@ -4462,15 +4460,13 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); } -static void valleyview_set_rps(struct drm_device *dev, u8 val) +static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val) { - struct drm_i915_private *dev_priv = dev->dev_private; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); WARN_ON(val > dev_priv->rps.max_freq); WARN_ON(val < dev_priv->rps.min_freq); - if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1), + if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1), "Odd GPU freq value\n")) val &= ~1; @@ -4503,7 +4499,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) /* Wake up the media well, as that takes a lot less * power than the Render well. */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); - valleyview_set_rps(dev_priv->dev, val); + valleyview_set_rps(dev_priv, val); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); } @@ -4521,14 +4517,12 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv) void gen6_rps_idle(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; - mutex_lock(&dev_priv->rps.hw_lock); if (dev_priv->rps.enabled) { - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_set_rps_idle(dev_priv); else - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); dev_priv->rps.last_adj = 0; I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); } @@ -4576,49 +4570,39 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv, spin_unlock(&dev_priv->rps.client_lock); } -void intel_set_rps(struct drm_device *dev, u8 val) +void intel_set_rps(struct drm_i915_private *dev_priv, u8 val) { - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) - valleyview_set_rps(dev, val); + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + valleyview_set_rps(dev_priv, val); else - gen6_set_rps(dev, val); + gen6_set_rps(dev_priv, val); } -static void gen9_disable_rc6(struct drm_device *dev) +static void gen9_disable_rc6(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN9_PG_ENABLE, 0); } -static void gen9_disable_rps(struct drm_device *dev) +static void gen9_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RP_CONTROL, 0); } -static void gen6_disable_rps(struct drm_device *dev) +static void gen6_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN6_RPNSWREQ, 1 << 31); I915_WRITE(GEN6_RP_CONTROL, 0); } -static void cherryview_disable_rps(struct drm_device *dev) +static void cherryview_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RC_CONTROL, 0); } -static void valleyview_disable_rps(struct drm_device *dev) +static void valleyview_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* we're doing forcewake before Disabling RC6, * This what the BIOS expects when going into suspend */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); @@ -4628,15 +4612,15 @@ static void valleyview_disable_rps(struct drm_device *dev) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void intel_print_rc6_info(struct drm_device *dev, u32 mode) +static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode) { - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) mode = GEN6_RC_CTL_RC6_ENABLE; else mode = 0; } - if (HAS_RC6p(dev)) + if (HAS_RC6p(dev_priv)) DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", onoff(mode & GEN6_RC_CTL_RC6_ENABLE), onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), @@ -4647,9 +4631,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode) onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); } -static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) +static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; bool enable_rc6 = true; unsigned long rc6_ctx_base; @@ -4690,16 +4673,16 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) return enable_rc6; } -int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) +int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6) { /* No RC6 before Ironlake and code is gone for ilk. */ - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_INFO(dev_priv)->gen < 6) return 0; if (!enable_rc6) return 0; - if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) { + if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) { DRM_INFO("RC6 disabled by BIOS\n"); return 0; } @@ -4708,7 +4691,7 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) if (enable_rc6 >= 0) { int mask; - if (HAS_RC6p(dev)) + if (HAS_RC6p(dev_priv)) mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | INTEL_RC6pp_ENABLE; else @@ -4721,20 +4704,14 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) return enable_rc6 & mask; } - if (IS_IVYBRIDGE(dev)) + if (IS_IVYBRIDGE(dev_priv)) return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); return INTEL_RC6_ENABLE; } -int intel_enable_rc6(const struct drm_device *dev) -{ - return i915.enable_rc6; -} - -static void gen6_init_rps_frequencies(struct drm_device *dev) +static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; uint32_t rp_state_cap; u32 ddcc_status = 0; int ret; @@ -4742,7 +4719,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) /* All of these values are in units of 50MHz */ dev_priv->rps.cur_freq = 0; /* static values from HW: RP0 > RP1 > RPn (min_freq) */ - if (IS_BROXTON(dev)) { + if (IS_BROXTON(dev_priv)) { rp_state_cap = I915_READ(BXT_RP_STATE_CAP); dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; @@ -4758,8 +4735,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; - if (IS_HASWELL(dev) || IS_BROADWELL(dev) || - IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || + IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { ret = sandybridge_pcode_read(dev_priv, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, &ddcc_status); @@ -4771,7 +4748,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.max_freq); } - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { /* Store the frequency values in 16.66 MHZ units, which is the natural hardware unit for SKL */ dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; @@ -4788,7 +4765,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; if (dev_priv->rps.min_freq_softlimit == 0) { - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) dev_priv->rps.min_freq_softlimit = max_t(int, dev_priv->rps.efficient_freq, intel_freq_opcode(dev_priv, 450)); @@ -4799,16 +4776,14 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) } /* See the Gen9_GT_PM_Programming_Guide doc for the below */ -static void gen9_enable_rps(struct drm_device *dev) +static void gen9_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - gen6_init_rps_frequencies(dev); + gen6_init_rps_frequencies(dev_priv); /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { /* * BIOS could leave the Hw Turbo enabled, so need to explicitly * clear out the Control register just to avoid inconsitency @@ -4818,7 +4793,7 @@ static void gen9_enable_rps(struct drm_device *dev) * if the Turbo is left enabled in the Control register, as the * Up/Down interrupts would remain masked. */ - gen9_disable_rps(dev); + gen9_disable_rps(dev_priv); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return; } @@ -4837,14 +4812,13 @@ static void gen9_enable_rps(struct drm_device *dev) * Up/Down EI & threshold registers, as well as the RP_CONTROL, * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void gen9_enable_rc6(struct drm_device *dev) +static void gen9_enable_rc6(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; uint32_t rc6_mask = 0; @@ -4861,7 +4835,7 @@ static void gen9_enable_rc6(struct drm_device *dev) /* 2b: Program RC6 thresholds.*/ /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev_priv)) I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); else I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); @@ -4870,7 +4844,7 @@ static void gen9_enable_rc6(struct drm_device *dev) for_each_engine(engine, dev_priv) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - if (HAS_GUC_UCODE(dev)) + if (HAS_GUC_UCODE(dev_priv)) I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); I915_WRITE(GEN6_RC_SLEEP, 0); @@ -4880,12 +4854,12 @@ static void gen9_enable_rc6(struct drm_device *dev) I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); /* 3a: Enable RC6 */ - if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) + if (intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); /* WaRsUseTimeoutMode */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | GEN7_RC_CTL_TO_MODE | @@ -4901,19 +4875,17 @@ static void gen9_enable_rc6(struct drm_device *dev) * 3b: Enable Coarse Power Gating only when RC6 is enabled. * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. */ - if (NEEDS_WaRsDisableCoarsePowerGating(dev)) + if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) I915_WRITE(GEN9_PG_ENABLE, 0); else I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - } -static void gen8_enable_rps(struct drm_device *dev) +static void gen8_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; uint32_t rc6_mask = 0; @@ -4928,7 +4900,7 @@ static void gen8_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_CONTROL, 0); /* Initialize rps frequencies */ - gen6_init_rps_frequencies(dev); + gen6_init_rps_frequencies(dev_priv); /* 2b: Program RC6 thresholds.*/ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); @@ -4937,16 +4909,16 @@ static void gen8_enable_rps(struct drm_device *dev) for_each_engine(engine, dev_priv) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); - if (IS_BROADWELL(dev)) + if (IS_BROADWELL(dev_priv)) I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ else I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ /* 3: Enable RC6 */ - if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) + if (intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; - intel_print_rc6_info(dev, rc6_mask); - if (IS_BROADWELL(dev)) + intel_print_rc6_info(dev_priv, rc6_mask); + if (IS_BROADWELL(dev_priv)) I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | GEN7_RC_CTL_TO_MODE | rc6_mask); @@ -4987,14 +4959,13 @@ static void gen8_enable_rps(struct drm_device *dev) /* 6: Ring frequency + overclocking (our driver does this later */ dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void gen6_enable_rps(struct drm_device *dev) +static void gen6_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; u32 gtfifodbg; @@ -5021,7 +4992,7 @@ static void gen6_enable_rps(struct drm_device *dev) intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); /* Initialize rps frequencies */ - gen6_init_rps_frequencies(dev); + gen6_init_rps_frequencies(dev_priv); /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -5037,7 +5008,7 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); - if (IS_IVYBRIDGE(dev)) + if (IS_IVYBRIDGE(dev_priv)) I915_WRITE(GEN6_RC6_THRESHOLD, 125000); else I915_WRITE(GEN6_RC6_THRESHOLD, 50000); @@ -5045,12 +5016,12 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ /* Check if we are enabling RC6 */ - rc6_mode = intel_enable_rc6(dev_priv->dev); + rc6_mode = intel_enable_rc6(); if (rc6_mode & INTEL_RC6_ENABLE) rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; /* We don't use those on Haswell */ - if (!IS_HASWELL(dev)) { + if (!IS_HASWELL(dev_priv)) { if (rc6_mode & INTEL_RC6p_ENABLE) rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; @@ -5058,7 +5029,7 @@ static void gen6_enable_rps(struct drm_device *dev) rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; } - intel_print_rc6_info(dev, rc6_mask); + intel_print_rc6_info(dev_priv, rc6_mask); I915_WRITE(GEN6_RC_CONTROL, rc6_mask | @@ -5082,13 +5053,13 @@ static void gen6_enable_rps(struct drm_device *dev) } dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); rc6vids = 0; ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); - if (IS_GEN6(dev) && ret) { + if (IS_GEN6(dev_priv) && ret) { DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); - } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { + } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); rc6vids &= 0xffff00; @@ -5101,9 +5072,8 @@ static void gen6_enable_rps(struct drm_device *dev) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void __gen6_update_ring_freq(struct drm_device *dev) +static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; int min_freq = 15; unsigned int gpu_freq; unsigned int max_ia_freq, min_ring_freq; @@ -5132,7 +5102,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev) /* convert DDR frequency from units of 266.6MHz to bandwidth */ min_ring_freq = mult_frac(min_ring_freq, 8, 3); - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { /* Convert GT frequency to 50 HZ units */ min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; @@ -5150,16 +5120,16 @@ static void __gen6_update_ring_freq(struct drm_device *dev) int diff = max_gpu_freq - gpu_freq; unsigned int ia_freq = 0, ring_freq = 0; - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { /* * ring_freq = 2 * GT. ring_freq is in 100MHz units * No floor required for ring frequency on SKL. */ ring_freq = gpu_freq; - } else if (INTEL_INFO(dev)->gen >= 8) { + } else if (INTEL_INFO(dev_priv)->gen >= 8) { /* max(2 * GT, DDR). NB: GT is 50MHz units */ ring_freq = max(min_ring_freq, gpu_freq); - } else if (IS_HASWELL(dev)) { + } else if (IS_HASWELL(dev_priv)) { ring_freq = mult_frac(gpu_freq, 5, 4); ring_freq = max(min_ring_freq, ring_freq); /* leave ia_freq as the default, chosen by cpufreq */ @@ -5186,26 +5156,23 @@ static void __gen6_update_ring_freq(struct drm_device *dev) } } -void gen6_update_ring_freq(struct drm_device *dev) +void gen6_update_ring_freq(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!HAS_CORE_RING_FREQ(dev)) + if (!HAS_CORE_RING_FREQ(dev_priv)) return; mutex_lock(&dev_priv->rps.hw_lock); - __gen6_update_ring_freq(dev); + __gen6_update_ring_freq(dev_priv); mutex_unlock(&dev_priv->rps.hw_lock); } static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; u32 val, rp0; val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); - switch (INTEL_INFO(dev)->eu_total) { + switch (INTEL_INFO(dev_priv)->eu_total) { case 8: /* (2 * 4) config */ rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); @@ -5316,9 +5283,8 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv) WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); } -static void cherryview_setup_pctx(struct drm_device *dev) +static void cherryview_setup_pctx(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned long pctx_paddr, paddr; u32 pcbr; @@ -5337,15 +5303,14 @@ static void cherryview_setup_pctx(struct drm_device *dev) DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); } -static void valleyview_setup_pctx(struct drm_device *dev) +static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *pctx; unsigned long pctx_paddr; u32 pcbr; int pctx_size = 24*1024; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->dev->struct_mutex); pcbr = I915_READ(VLV_PCBR); if (pcbr) { @@ -5370,7 +5335,7 @@ static void valleyview_setup_pctx(struct drm_device *dev) * overlap with other ranges, such as the frame buffer, protected * memory, or any other relevant ranges. */ - pctx = i915_gem_object_create_stolen(dev, pctx_size); + pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size); if (!pctx) { DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); goto out; @@ -5382,13 +5347,11 @@ static void valleyview_setup_pctx(struct drm_device *dev) out: DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); dev_priv->vlv_pctx = pctx; - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->dev->struct_mutex); } -static void valleyview_cleanup_pctx(struct drm_device *dev) +static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (WARN_ON(!dev_priv->vlv_pctx)) return; @@ -5407,12 +5370,11 @@ static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv) dev_priv->rps.gpll_ref_freq); } -static void valleyview_init_gt_powersave(struct drm_device *dev) +static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 val; - valleyview_setup_pctx(dev); + valleyview_setup_pctx(dev_priv); vlv_init_gpll_ref_freq(dev_priv); @@ -5466,12 +5428,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev) mutex_unlock(&dev_priv->rps.hw_lock); } -static void cherryview_init_gt_powersave(struct drm_device *dev) +static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 val; - cherryview_setup_pctx(dev); + cherryview_setup_pctx(dev_priv); vlv_init_gpll_ref_freq(dev_priv); @@ -5531,14 +5492,13 @@ static void cherryview_init_gt_powersave(struct drm_device *dev) mutex_unlock(&dev_priv->rps.hw_lock); } -static void valleyview_cleanup_gt_powersave(struct drm_device *dev) +static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv) { - valleyview_cleanup_pctx(dev); + valleyview_cleanup_pctx(dev_priv); } -static void cherryview_enable_rps(struct drm_device *dev) +static void cherryview_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; u32 gtfifodbg, val, rc6_mode = 0, pcbr; @@ -5583,8 +5543,8 @@ static void cherryview_enable_rps(struct drm_device *dev) pcbr = I915_READ(VLV_PCBR); /* 3: Enable RC6 */ - if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && - (pcbr >> VLV_PCBR_ADDR_SHIFT)) + if ((intel_enable_rc6() & INTEL_RC6_ENABLE) && + (pcbr >> VLV_PCBR_ADDR_SHIFT)) rc6_mode = GEN7_RC_CTL_TO_MODE; I915_WRITE(GEN6_RC_CONTROL, rc6_mode); @@ -5629,14 +5589,13 @@ static void cherryview_enable_rps(struct drm_device *dev) intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), dev_priv->rps.idle_freq); - valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void valleyview_enable_rps(struct drm_device *dev) +static void valleyview_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; u32 gtfifodbg, val, rc6_mode = 0; @@ -5689,10 +5648,10 @@ static void valleyview_enable_rps(struct drm_device *dev) VLV_MEDIA_RC6_COUNT_EN | VLV_RENDER_RC6_COUNT_EN)); - if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) + if (intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; - intel_print_rc6_info(dev, rc6_mode); + intel_print_rc6_info(dev_priv, rc6_mode); I915_WRITE(GEN6_RC_CONTROL, rc6_mode); @@ -5719,7 +5678,7 @@ static void valleyview_enable_rps(struct drm_device *dev) intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), dev_priv->rps.idle_freq); - valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } @@ -5809,10 +5768,9 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; unsigned long val; - if (INTEL_INFO(dev)->gen != 5) + if (INTEL_INFO(dev_priv)->gen != 5) return 0; spin_lock_irq(&mchdev_lock); @@ -5852,11 +5810,10 @@ static int _pxvid_to_vd(u8 pxvid) static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) { - struct drm_device *dev = dev_priv->dev; const int vd = _pxvid_to_vd(pxvid); const int vm = vd - 1125; - if (INTEL_INFO(dev)->is_mobile) + if (INTEL_INFO(dev_priv)->is_mobile) return vm > 0 ? vm : 0; return vd; @@ -5897,9 +5854,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) void i915_update_gfx_val(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; - - if (INTEL_INFO(dev)->gen != 5) + if (INTEL_INFO(dev_priv)->gen != 5) return; spin_lock_irq(&mchdev_lock); @@ -5948,10 +5903,9 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; unsigned long val; - if (INTEL_INFO(dev)->gen != 5) + if (INTEL_INFO(dev_priv)->gen != 5) return 0; spin_lock_irq(&mchdev_lock); @@ -6140,9 +6094,8 @@ void intel_gpu_ips_teardown(void) spin_unlock_irq(&mchdev_lock); } -static void intel_init_emon(struct drm_device *dev) +static void intel_init_emon(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 lcfuse; u8 pxw[16]; int i; @@ -6211,10 +6164,8 @@ static void intel_init_emon(struct drm_device *dev) dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); } -void intel_init_gt_powersave(struct drm_device *dev) +void intel_init_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* * RPM depends on RC6 to save restore the GT HW context, so make RC6 a * requirement. @@ -6224,20 +6175,18 @@ void intel_init_gt_powersave(struct drm_device *dev) intel_runtime_pm_get(dev_priv); } - if (IS_CHERRYVIEW(dev)) - cherryview_init_gt_powersave(dev); - else if (IS_VALLEYVIEW(dev)) - valleyview_init_gt_powersave(dev); + if (IS_CHERRYVIEW(dev_priv)) + cherryview_init_gt_powersave(dev_priv); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_init_gt_powersave(dev_priv); } -void intel_cleanup_gt_powersave(struct drm_device *dev) +void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev_priv)) return; - else if (IS_VALLEYVIEW(dev)) - valleyview_cleanup_gt_powersave(dev); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_cleanup_gt_powersave(dev_priv); if (!i915.enable_rc6) intel_runtime_pm_put(dev_priv); @@ -6252,16 +6201,14 @@ static void gen6_suspend_rps(struct drm_i915_private *dev_priv) /** * intel_suspend_gt_powersave - suspend PM work and helper threads - * @dev: drm device + * @dev_priv: i915 device * * We don't want to disable RC6 or other features here, we just want * to make sure any work we've queued has finished and won't bother * us while we're suspended. */ -void intel_suspend_gt_powersave(struct drm_device *dev) +void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (INTEL_GEN(dev_priv) < 6) return; @@ -6271,25 +6218,23 @@ void intel_suspend_gt_powersave(struct drm_device *dev) gen6_rps_idle(dev_priv); } -void intel_disable_gt_powersave(struct drm_device *dev) +void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_IRONLAKE_M(dev)) { + if (IS_IRONLAKE_M(dev_priv)) { ironlake_disable_drps(dev_priv); - } else if (INTEL_INFO(dev)->gen >= 6) { - intel_suspend_gt_powersave(dev); + } else if (INTEL_INFO(dev_priv)->gen >= 6) { + intel_suspend_gt_powersave(dev_priv); mutex_lock(&dev_priv->rps.hw_lock); - if (INTEL_INFO(dev)->gen >= 9) { - gen9_disable_rc6(dev); - gen9_disable_rps(dev); - } else if (IS_CHERRYVIEW(dev)) - cherryview_disable_rps(dev); - else if (IS_VALLEYVIEW(dev)) - valleyview_disable_rps(dev); + if (INTEL_INFO(dev_priv)->gen >= 9) { + gen9_disable_rc6(dev_priv); + gen9_disable_rps(dev_priv); + } else if (IS_CHERRYVIEW(dev_priv)) + cherryview_disable_rps(dev_priv); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_disable_rps(dev_priv); else - gen6_disable_rps(dev); + gen6_disable_rps(dev_priv); dev_priv->rps.enabled = false; mutex_unlock(&dev_priv->rps.hw_lock); @@ -6301,27 +6246,26 @@ static void intel_gen6_powersave_work(struct work_struct *work) struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, rps.delayed_resume_work.work); - struct drm_device *dev = dev_priv->dev; mutex_lock(&dev_priv->rps.hw_lock); - gen6_reset_rps_interrupts(dev); + gen6_reset_rps_interrupts(dev_priv); - if (IS_CHERRYVIEW(dev)) { - cherryview_enable_rps(dev); - } else if (IS_VALLEYVIEW(dev)) { - valleyview_enable_rps(dev); - } else if (INTEL_INFO(dev)->gen >= 9) { - gen9_enable_rc6(dev); - gen9_enable_rps(dev); - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) - __gen6_update_ring_freq(dev); - } else if (IS_BROADWELL(dev)) { - gen8_enable_rps(dev); - __gen6_update_ring_freq(dev); + if (IS_CHERRYVIEW(dev_priv)) { + cherryview_enable_rps(dev_priv); + } else if (IS_VALLEYVIEW(dev_priv)) { + valleyview_enable_rps(dev_priv); + } else if (INTEL_INFO(dev_priv)->gen >= 9) { + gen9_enable_rc6(dev_priv); + gen9_enable_rps(dev_priv); + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + __gen6_update_ring_freq(dev_priv); + } else if (IS_BROADWELL(dev_priv)) { + gen8_enable_rps(dev_priv); + __gen6_update_ring_freq(dev_priv); } else { - gen6_enable_rps(dev); - __gen6_update_ring_freq(dev); + gen6_enable_rps(dev_priv); + __gen6_update_ring_freq(dev_priv); } WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); @@ -6339,20 +6283,18 @@ static void intel_gen6_powersave_work(struct work_struct *work) intel_runtime_pm_put(dev_priv); } -void intel_enable_gt_powersave(struct drm_device *dev) +void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* Powersaving is controlled by the host when inside a VM */ if (intel_vgpu_active(dev_priv)) return; - if (IS_IRONLAKE_M(dev)) { + if (IS_IRONLAKE_M(dev_priv)) { ironlake_enable_drps(dev_priv); - mutex_lock(&dev->struct_mutex); - intel_init_emon(dev); - mutex_unlock(&dev->struct_mutex); - } else if (INTEL_INFO(dev)->gen >= 6) { + mutex_lock(&dev_priv->dev->struct_mutex); + intel_init_emon(dev_priv); + mutex_unlock(&dev_priv->dev->struct_mutex); + } else if (INTEL_INFO(dev_priv)->gen >= 6) { /* * PCU communication is slow and this doesn't need to be * done at any specific time, so do this out of our fast path @@ -6371,11 +6313,9 @@ void intel_enable_gt_powersave(struct drm_device *dev) } } -void intel_reset_gt_powersave(struct drm_device *dev) +void intel_reset_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_INFO(dev_priv)->gen < 6) return; gen6_suspend_rps(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 4ea2bf2c2a4a..0c48af2f1b7e 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -248,9 +248,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) return HRTIMER_NORESTART; } -void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) +void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, + bool restore) { - struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; struct intel_uncore_forcewake_domain *domain; int retry_count = 100; @@ -304,7 +304,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) if (fw) dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); - if (IS_GEN6(dev) || IS_GEN7(dev)) + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); } @@ -400,43 +400,42 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) return false; } -static void __intel_uncore_early_sanitize(struct drm_device *dev, +static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, bool restore_forcewake) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* clear out unclaimed reg detection bit */ if (check_for_unclaimed_mmio(dev_priv)) DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); /* clear out old GT FIFO errors */ - if (IS_GEN6(dev) || IS_GEN7(dev)) + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) __raw_i915_write32(dev_priv, GTFIFODBG, __raw_i915_read32(dev_priv, GTFIFODBG)); /* WaDisableShadowRegForCpd:chv */ - if (IS_CHERRYVIEW(dev)) { + if (IS_CHERRYVIEW(dev_priv)) { __raw_i915_write32(dev_priv, GTFIFOCTL, __raw_i915_read32(dev_priv, GTFIFOCTL) | GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | GT_FIFO_CTL_RC6_POLICY_STALL); } - intel_uncore_forcewake_reset(dev, restore_forcewake); + intel_uncore_forcewake_reset(dev_priv, restore_forcewake); } -void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) +void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, + bool restore_forcewake) { - __intel_uncore_early_sanitize(dev, restore_forcewake); - i915_check_and_clear_faults(dev); + __intel_uncore_early_sanitize(dev_priv, restore_forcewake); + i915_check_and_clear_faults(dev_priv); } -void intel_uncore_sanitize(struct drm_device *dev) +void intel_uncore_sanitize(struct drm_i915_private *dev_priv) { - i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); + i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); /* BIOS often leaves RC6 enabled, but disable it for hw init */ - intel_disable_gt_powersave(dev); + intel_disable_gt_powersave(dev_priv); } static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, @@ -1233,14 +1232,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, fw_domain_reset(d); } -static void intel_uncore_fw_domains_init(struct drm_device *dev) +static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (INTEL_INFO(dev_priv)->gen <= 5) return; - if (IS_GEN9(dev)) { + if (IS_GEN9(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get; dev_priv->uncore.funcs.force_wake_put = fw_domains_put; fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, @@ -1251,9 +1248,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) FORCEWAKE_ACK_BLITTER_GEN9); fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); - } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get; - if (!IS_CHERRYVIEW(dev)) + if (!IS_CHERRYVIEW(dev_priv)) dev_priv->uncore.funcs.force_wake_put = fw_domains_put_with_fifo; else @@ -1262,17 +1259,17 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); - } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { + } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get_with_thread_status; - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) dev_priv->uncore.funcs.force_wake_put = fw_domains_put_with_fifo; else dev_priv->uncore.funcs.force_wake_put = fw_domains_put; fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_ACK_HSW); - } else if (IS_IVYBRIDGE(dev)) { + } else if (IS_IVYBRIDGE(dev_priv)) { u32 ecobus; /* IVB configs may use multi-threaded forcewake */ @@ -1302,11 +1299,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_MT_ACK); - mutex_lock(&dev->struct_mutex); fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); ecobus = __raw_i915_read32(dev_priv, ECOBUS); fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); - mutex_unlock(&dev->struct_mutex); if (!(ecobus & FORCEWAKE_MT_ENABLE)) { DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); @@ -1314,7 +1309,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE, FORCEWAKE_ACK); } - } else if (IS_GEN6(dev)) { + } else if (IS_GEN6(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get_with_thread_status; dev_priv->uncore.funcs.force_wake_put = @@ -1327,26 +1322,24 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) WARN_ON(dev_priv->uncore.fw_domains == 0); } -void intel_uncore_init(struct drm_device *dev) +void intel_uncore_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - i915_check_vgpu(dev); + i915_check_vgpu(dev_priv); intel_uncore_edram_detect(dev_priv); - intel_uncore_fw_domains_init(dev); - __intel_uncore_early_sanitize(dev, false); + intel_uncore_fw_domains_init(dev_priv); + __intel_uncore_early_sanitize(dev_priv, false); dev_priv->uncore.unclaimed_mmio_check = 1; - switch (INTEL_INFO(dev)->gen) { + switch (INTEL_INFO(dev_priv)->gen) { default: case 9: ASSIGN_WRITE_MMIO_VFUNCS(gen9); ASSIGN_READ_MMIO_VFUNCS(gen9); break; case 8: - if (IS_CHERRYVIEW(dev)) { + if (IS_CHERRYVIEW(dev_priv)) { ASSIGN_WRITE_MMIO_VFUNCS(chv); ASSIGN_READ_MMIO_VFUNCS(chv); @@ -1357,13 +1350,13 @@ void intel_uncore_init(struct drm_device *dev) break; case 7: case 6: - if (IS_HASWELL(dev)) { + if (IS_HASWELL(dev_priv)) { ASSIGN_WRITE_MMIO_VFUNCS(hsw); } else { ASSIGN_WRITE_MMIO_VFUNCS(gen6); } - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev_priv)) { ASSIGN_READ_MMIO_VFUNCS(vlv); } else { ASSIGN_READ_MMIO_VFUNCS(gen6); @@ -1386,16 +1379,16 @@ void intel_uncore_init(struct drm_device *dev) ASSIGN_READ_MMIO_VFUNCS(vgpu); } - i915_check_and_clear_faults(dev); + i915_check_and_clear_faults(dev_priv); } #undef ASSIGN_WRITE_MMIO_VFUNCS #undef ASSIGN_READ_MMIO_VFUNCS -void intel_uncore_fini(struct drm_device *dev) +void intel_uncore_fini(struct drm_i915_private *dev_priv) { /* Paranoia: make sure we have disabled everything before we exit. */ - intel_uncore_sanitize(dev); - intel_uncore_forcewake_reset(dev, false); + intel_uncore_sanitize(dev_priv); + intel_uncore_forcewake_reset(dev_priv, false); } #define GEN_RANGE(l, h) GENMASK(h, l) @@ -1506,44 +1499,47 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev, return 0; } -static int i915_reset_complete(struct drm_device *dev) +static int i915_reset_complete(struct pci_dev *pdev) { u8 gdrst; - pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); + pci_read_config_byte(pdev, I915_GDRST, &gdrst); return (gdrst & GRDOM_RESET_STATUS) == 0; } -static int i915_do_reset(struct drm_device *dev, unsigned engine_mask) +static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { + struct pci_dev *pdev = dev_priv->dev->pdev; + /* assert reset for at least 20 usec */ - pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); udelay(20); - pci_write_config_byte(dev->pdev, I915_GDRST, 0); + pci_write_config_byte(pdev, I915_GDRST, 0); - return wait_for(i915_reset_complete(dev), 500); + return wait_for(i915_reset_complete(pdev), 500); } -static int g4x_reset_complete(struct drm_device *dev) +static int g4x_reset_complete(struct pci_dev *pdev) { u8 gdrst; - pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); + pci_read_config_byte(pdev, I915_GDRST, &gdrst); return (gdrst & GRDOM_RESET_ENABLE) == 0; } -static int g33_do_reset(struct drm_device *dev, unsigned engine_mask) +static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { - pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); - return wait_for(g4x_reset_complete(dev), 500); + struct pci_dev *pdev = dev_priv->dev->pdev; + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); + return wait_for(g4x_reset_complete(pdev), 500); } -static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) +static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = dev_priv->dev->pdev; int ret; - pci_write_config_byte(dev->pdev, I915_GDRST, + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RENDER | GRDOM_RESET_ENABLE); - ret = wait_for(g4x_reset_complete(dev), 500); + ret = wait_for(g4x_reset_complete(pdev), 500); if (ret) return ret; @@ -1551,9 +1547,9 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); POSTING_READ(VDECCLK_GATE_D); - pci_write_config_byte(dev->pdev, I915_GDRST, + pci_write_config_byte(pdev, I915_GDRST, GRDOM_MEDIA | GRDOM_RESET_ENABLE); - ret = wait_for(g4x_reset_complete(dev), 500); + ret = wait_for(g4x_reset_complete(pdev), 500); if (ret) return ret; @@ -1561,14 +1557,14 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); POSTING_READ(VDECCLK_GATE_D); - pci_write_config_byte(dev->pdev, I915_GDRST, 0); + pci_write_config_byte(pdev, I915_GDRST, 0); return 0; } -static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask) +static int ironlake_do_reset(struct drm_i915_private *dev_priv, + unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; int ret; I915_WRITE(ILK_GDSR, @@ -1612,7 +1608,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, /** * gen6_reset_engines - reset individual engines - * @dev: DRM device + * @dev_priv: i915 device * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset * * This function will reset the individual engines that are set in engine_mask. @@ -1623,9 +1619,9 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, * * Returns 0 on success, nonzero on error. */ -static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask) +static int gen6_reset_engines(struct drm_i915_private *dev_priv, + unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; const u32 hw_engine_mask[I915_NUM_ENGINES] = { [RCS] = GEN6_GRDOM_RENDER, @@ -1647,7 +1643,7 @@ static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask) ret = gen6_hw_domain_reset(dev_priv, hw_mask); - intel_uncore_forcewake_reset(dev, true); + intel_uncore_forcewake_reset(dev_priv, true); return ret; } @@ -1688,16 +1684,16 @@ static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); } -static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask) +static int gen8_reset_engines(struct drm_i915_private *dev_priv, + unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; for_each_engine_masked(engine, dev_priv, engine_mask) if (gen8_request_engine_reset(engine)) goto not_ready; - return gen6_reset_engines(dev, engine_mask); + return gen6_reset_engines(dev_priv, engine_mask); not_ready: for_each_engine_masked(engine, dev_priv, engine_mask) @@ -1706,35 +1702,35 @@ not_ready: return -EIO; } -static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *, - unsigned engine_mask) +typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); + +static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) { if (!i915.reset) return NULL; - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_INFO(dev_priv)->gen >= 8) return gen8_reset_engines; - else if (INTEL_INFO(dev)->gen >= 6) + else if (INTEL_INFO(dev_priv)->gen >= 6) return gen6_reset_engines; - else if (IS_GEN5(dev)) + else if (IS_GEN5(dev_priv)) return ironlake_do_reset; - else if (IS_G4X(dev)) + else if (IS_G4X(dev_priv)) return g4x_do_reset; - else if (IS_G33(dev)) + else if (IS_G33(dev_priv)) return g33_do_reset; - else if (INTEL_INFO(dev)->gen >= 3) + else if (INTEL_INFO(dev_priv)->gen >= 3) return i915_do_reset; else return NULL; } -int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask) +int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { - struct drm_i915_private *dev_priv = to_i915(dev); - int (*reset)(struct drm_device *, unsigned); + reset_func reset; int ret; - reset = intel_get_gpu_reset(dev); + reset = intel_get_gpu_reset(dev_priv); if (reset == NULL) return -ENODEV; @@ -1742,15 +1738,15 @@ int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask) * request may be dropped and never completes (causing -EIO). */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - ret = reset(dev, engine_mask); + ret = reset(dev_priv, engine_mask); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return ret; } -bool intel_has_gpu_reset(struct drm_device *dev) +bool intel_has_gpu_reset(struct drm_i915_private *dev_priv) { - return intel_get_gpu_reset(dev) != NULL; + return intel_get_gpu_reset(dev_priv) != NULL; } int intel_guc_reset(struct drm_i915_private *dev_priv) -- cgit v1.2.3-59-g8ed1b From ae5702d245c2328f1b4afa4ae7d39792a8064d48 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 10 May 2016 10:57:04 +0100 Subject: drm/i915: Make IS_GENx macros work on a mask If instead of numerical comparison me make these test a bitmask, we enable the compiler to optimize all instances of IS_GENx || IS_GENy. v2: Make bit zero of gen mask mean gen 1. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 3 +++ drivers/gpu/drm/i915/i915_drv.h | 17 +++++++++-------- drivers/gpu/drm/i915/intel_uncore.c | 4 ++-- 3 files changed, 14 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_dma.c') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index b55fef3679d9..81856f126622 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1073,6 +1073,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, memcpy(device_info, info, sizeof(dev_priv->info)); device_info->device_id = dev->pdev->device; + BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); + device_info->gen_mask = BIT(device_info->gen - 1); + spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); mutex_init(&dev_priv->backlight_lock); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4abd39f32c0f..d205b05d9a1a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -760,6 +760,7 @@ struct intel_device_info { u8 num_pipes:3; u8 num_sprites[I915_MAX_PIPES]; u8 gen; + u16 gen_mask; u8 ring_mask; /* Rings supported by the HW */ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); /* Register offsets for the various display pipes and transcoders */ @@ -2620,14 +2621,14 @@ struct drm_i915_cmd_table { * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular * chips, etc.). */ -#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) -#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) -#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) -#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) -#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) -#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) -#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) -#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) +#define IS_GEN2(dev) (INTEL_INFO(dev)->gen_mask & BIT(1)) +#define IS_GEN3(dev) (INTEL_INFO(dev)->gen_mask & BIT(2)) +#define IS_GEN4(dev) (INTEL_INFO(dev)->gen_mask & BIT(3)) +#define IS_GEN5(dev) (INTEL_INFO(dev)->gen_mask & BIT(4)) +#define IS_GEN6(dev) (INTEL_INFO(dev)->gen_mask & BIT(5)) +#define IS_GEN7(dev) (INTEL_INFO(dev)->gen_mask & BIT(6)) +#define IS_GEN8(dev) (INTEL_INFO(dev)->gen_mask & BIT(7)) +#define IS_GEN9(dev) (INTEL_INFO(dev)->gen_mask & BIT(8)) #define RENDER_RING (1<offset_ldw) == (reg->offset & -entry->size) && - (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) + (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask)) break; } -- cgit v1.2.3-59-g8ed1b From 7e22dbbbae81bfdf26b468aaa1c9a9570f7476ba Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 10 May 2016 10:57:06 +0100 Subject: drm/i915: Replace "INTEL_INFO->gen == x" checks with IS_GENx This way optimization from a previous patch works even better. v2: Rebase. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/i915_debugfs.c | 4 ++-- drivers/gpu/drm/i915/i915_dma.c | 4 ++-- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_gem_stolen.c | 2 +- drivers/gpu/drm/i915/i915_gem_tiling.c | 2 +- drivers/gpu/drm/i915/i915_gpu_error.c | 2 +- drivers/gpu/drm/i915/i915_irq.c | 4 ++-- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_lvds.c | 2 +- drivers/gpu/drm/i915/intel_pm.c | 4 ++-- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 +- 12 files changed, 16 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_dma.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index f01848dbebc5..aace177dcde0 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2307,12 +2307,12 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - if (INTEL_INFO(dev)->gen == 6) + if (IS_GEN6(dev_priv)) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); for_each_engine(engine, dev_priv) { seq_printf(m, "%s\n", engine->name); - if (INTEL_INFO(dev)->gen == 7) + if (IS_GEN7(dev_priv)) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(engine))); seq_printf(m, "PP_DIR_BASE: 0x%08x\n", diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 81856f126622..0eadeb694fec 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -889,7 +889,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) DRM_INFO("Display disabled (module parameter)\n"); info->num_pipes = 0; } else if (info->num_pipes > 0 && - (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && + (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) && HAS_PCH_SPLIT(dev)) { u32 fuse_strap = I915_READ(FUSE_STRAP); u32 sfuse_strap = I915_READ(SFUSE_STRAP); @@ -913,7 +913,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) DRM_INFO("PipeC fused off\n"); info->num_pipes -= 1; } - } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) { + } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) { u32 dfsm = I915_READ(SKL_DFSM); u8 disabled_mask = 0; bool invalid; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6d717094b987..f33b2e6850e2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2702,7 +2702,7 @@ struct drm_i915_cmd_table { IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ IS_KABYLAKE(dev) || IS_BROXTON(dev)) #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) -#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) +#define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) #define HAS_CSR(dev) (IS_GEN9(dev)) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d2195c7f78b5..8439867dc654 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1981,7 +1981,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) return size; /* Previous chips need a power-of-two fence region when tiling */ - if (INTEL_INFO(dev)->gen == 3) + if (IS_GEN3(dev)) gtt_size = 1024*1024; else gtt_size = 512*1024; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 68fde8fba803..f9253f2b7ba0 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -56,7 +56,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, /* See the comment at the drm_mm_init() call for more about this check. * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ - if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096) + if (IS_GEN8(dev_priv) && start < 4096) start = 4096; mutex_lock(&dev_priv->mm.stolen_lock); diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 829dab69895f..2fcb4afade19 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) if (INTEL_INFO(obj->base.dev)->gen >= 4) return true; - if (INTEL_INFO(obj->base.dev)->gen == 3) { + if (IS_GEN3(obj->base.dev)) { if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) return false; } else { diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 0f6002cb86f4..34ff2459ceea 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); } - if (INTEL_INFO(dev)->gen == 7) + if (IS_GEN7(dev)) err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); for (i = 0; i < ARRAY_SIZE(error->ring); i++) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2fa26159d6e5..f0d941455bed 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -4643,12 +4643,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->disable_vblank = ironlake_disable_vblank; dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; } else { - if (INTEL_INFO(dev_priv)->gen == 2) { + if (IS_GEN2(dev_priv)) { dev->driver->irq_preinstall = i8xx_irq_preinstall; dev->driver->irq_postinstall = i8xx_irq_postinstall; dev->driver->irq_handler = i8xx_irq_handler; dev->driver->irq_uninstall = i8xx_irq_uninstall; - } else if (INTEL_INFO(dev_priv)->gen == 3) { + } else if (IS_GEN3(dev_priv)) { dev->driver->irq_preinstall = i915_irq_preinstall; dev->driver->irq_postinstall = i915_irq_postinstall; dev->driver->irq_uninstall = i915_irq_uninstall; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 7215dadf2aa3..9427de53215d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1204,7 +1204,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, u32 val; /* ILK FDI PLL is always enabled */ - if (INTEL_INFO(dev_priv)->gen == 5) + if (IS_GEN5(dev_priv)) return; /* On Haswell, DDI ports are responsible for the FDI PLL setup */ diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index bc53c0dd34d0..d65fd945607a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -190,7 +190,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) /* Set the dithering flag on LVDS as needed, note that there is no * special lvds dither control bit on pch-split platforms, dithering is * only controlled through the PIPECONF reg. */ - if (INTEL_INFO(dev)->gen == 4) { + if (IS_GEN4(dev_priv)) { /* Bspec wording suggests that LVDS port dithering only exists * for 18bpp panels. */ if (crtc->config->dither && crtc->config->pipe_bpp == 18) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 5d80a2b0ffb4..1bb0f9ac104c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2146,14 +2146,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5]) { /* ILK sprite LP0 latency is 1300 ns */ - if (INTEL_INFO(dev)->gen == 5) + if (IS_GEN5(dev)) wm[0] = 13; } static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) { /* ILK cursor LP0 latency is 1300 ns */ - if (INTEL_INFO(dev)->gen == 5) + if (IS_GEN5(dev)) wm[0] = 13; /* WaDoubleCursorLP3Latency:ivb */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e17a682dd621..84b22a57cc1c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2503,7 +2503,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) * the semaphore value, then when the seqno moves backwards all * future waits will complete instantly (causing rendering corruption). */ - if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) { + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); if (HAS_VEBOX(dev_priv)) -- cgit v1.2.3-59-g8ed1b From 1ee8da6df17dc481948aa2d04005e87bc7d387a4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 May 2016 12:43:23 +0100 Subject: drm/i915: Convert intel_overlay.c to use native drm_i915_private pointers Another day, another long overdue conversion. Not much to update inside intel_overlay.c, but still text data bss dec hex filename 6309547 3578778 696320 10584645 a18245 vmlinux 6309291 3578778 696320 10584389 a18145 vmlinux a couple of hundred bytes of pointer misdirection. Whilst here, rename the ioctl entry points to include the _ioctl suffix so that the user entry points are clear (following the idiom). Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1463053403-25086-1-git-send-email-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_dma.c | 4 +- drivers/gpu/drm/i915/intel_display.c | 4 +- drivers/gpu/drm/i915/intel_drv.h | 12 ++-- drivers/gpu/drm/i915/intel_overlay.c | 132 ++++++++++++++++------------------- 4 files changed, 70 insertions(+), 82 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_dma.c') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 0eadeb694fec..e42fa1769c10 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1623,8 +1623,8 @@ const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9427de53215d..44b15d38f56b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -16020,7 +16020,7 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_modeset_init_hw(dev); - intel_setup_overlay(dev); + intel_setup_overlay(dev_priv); /* * Make sure any fbs we allocated at startup are properly @@ -16093,7 +16093,7 @@ void intel_modeset_cleanup(struct drm_device *dev) drm_mode_config_cleanup(dev); - intel_cleanup_overlay(dev); + intel_cleanup_overlay(dev_priv); intel_cleanup_gt_powersave(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 71b30a47ee2d..cf0e6a6b8b2f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1435,13 +1435,13 @@ void intel_attach_aspect_ratio_property(struct drm_connector *connector); /* intel_overlay.c */ -void intel_setup_overlay(struct drm_device *dev); -void intel_cleanup_overlay(struct drm_device *dev); +void intel_setup_overlay(struct drm_i915_private *dev_priv); +void intel_cleanup_overlay(struct drm_i915_private *dev_priv); int intel_overlay_switch_off(struct intel_overlay *overlay); -int intel_overlay_put_image(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int intel_overlay_attrs(struct drm_device *dev, void *data, - struct drm_file *file_priv); +int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); void intel_overlay_reset(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 4a1e774ba8cc..b9b042cc619f 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -168,7 +168,7 @@ struct overlay_registers { }; struct intel_overlay { - struct drm_device *dev; + struct drm_i915_private *i915; struct intel_crtc *crtc; struct drm_i915_gem_object *vid_bo; struct drm_i915_gem_object *old_vid_bo; @@ -190,14 +190,13 @@ struct intel_overlay { static struct overlay_registers __iomem * intel_overlay_map_regs(struct intel_overlay *overlay) { - struct drm_i915_private *dev_priv = to_i915(overlay->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_private *dev_priv = overlay->i915; struct overlay_registers __iomem *regs; - if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_wc(ggtt->mappable, + regs = io_mapping_map_wc(dev_priv->ggtt.mappable, overlay->flip_addr, PAGE_SIZE); @@ -207,7 +206,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) static void intel_overlay_unmap_regs(struct intel_overlay *overlay, struct overlay_registers __iomem *regs) { - if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915)) io_mapping_unmap(regs); } @@ -233,14 +232,13 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, /* overlay needs to be disable in OCMD reg */ static int intel_overlay_on(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; int ret; WARN_ON(overlay->active); - WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); + WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); req = i915_gem_request_alloc(engine, NULL); if (IS_ERR(req)) @@ -267,8 +265,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) static int intel_overlay_continue(struct intel_overlay *overlay, bool load_polyphase_filter) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; u32 flip_addr = overlay->flip_addr; @@ -336,8 +333,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) /* overlay needs to be disabled in OCMD reg */ static int intel_overlay_off(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; u32 flip_addr = overlay->flip_addr; @@ -366,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) intel_ring_emit(engine, flip_addr); intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); /* turn overlay off */ - if (IS_I830(dev)) { + if (IS_I830(dev_priv)) { /* Workaround: Don't disable the overlay fully, since otherwise * it dies on the next OVERLAY_ON cmd. */ intel_ring_emit(engine, MI_NOOP); @@ -409,12 +405,11 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) */ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; int ret; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + lockdep_assert_held(&dev_priv->dev->struct_mutex); /* Only wait if there is actually an old frame to release to * guarantee forward progress. @@ -538,10 +533,10 @@ static int uv_vsubsampling(u32 format) } } -static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) +static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width) { u32 mask, shift, ret; - if (IS_GEN2(dev)) { + if (IS_GEN2(dev_priv)) { mask = 0x1f; shift = 5; } else { @@ -549,7 +544,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) shift = 6; } ret = ((offset + width + mask) >> shift) - (offset >> shift); - if (!IS_GEN2(dev)) + if (!IS_GEN2(dev_priv)) ret <<= 1; ret -= 1; return ret << 2; @@ -742,12 +737,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, int ret, tmp_width; struct overlay_registers __iomem *regs; bool scale_changed = false; - struct drm_device *dev = overlay->dev; + struct drm_i915_private *dev_priv = overlay->i915; u32 swidth, swidthsw, sheight, ostride; enum pipe pipe = overlay->crtc->pipe; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + lockdep_assert_held(&dev_priv->dev->struct_mutex); + WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex)); ret = intel_overlay_release_old_vid(overlay); if (ret != 0) @@ -770,7 +765,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, goto out_unpin; } oconfig = OCONF_CC_OUT_8BIT; - if (IS_GEN4(overlay->dev)) + if (IS_GEN4(dev_priv)) oconfig |= OCONF_CSC_MODE_BT709; oconfig |= pipe == 0 ? OCONF_PIPE_A : OCONF_PIPE_B; @@ -797,7 +792,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, tmp_width = params->src_w; swidth = params->src_w; - swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); + swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width); sheight = params->src_h; iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y); ostride = params->stride_Y; @@ -807,9 +802,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, int uv_vscale = uv_vsubsampling(params->format); u32 tmp_U, tmp_V; swidth |= (params->src_w/uv_hscale) << 16; - tmp_U = calc_swidthsw(overlay->dev, params->offset_U, + tmp_U = calc_swidthsw(dev_priv, params->offset_U, params->src_w/uv_hscale); - tmp_V = calc_swidthsw(overlay->dev, params->offset_V, + tmp_V = calc_swidthsw(dev_priv, params->offset_V, params->src_w/uv_hscale); swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; sheight |= (params->src_h/uv_vscale) << 16; @@ -841,8 +836,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, overlay->old_vid_bo = overlay->vid_bo; overlay->vid_bo = new_bo; - intel_frontbuffer_flip(dev, - INTEL_FRONTBUFFER_OVERLAY(pipe)); + intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe)); return 0; @@ -853,12 +847,12 @@ out_unpin: int intel_overlay_switch_off(struct intel_overlay *overlay) { + struct drm_i915_private *dev_priv = overlay->i915; struct overlay_registers __iomem *regs; - struct drm_device *dev = overlay->dev; int ret; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + lockdep_assert_held(&dev_priv->dev->struct_mutex); + WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex)); ret = intel_overlay_recover_from_interrupt(overlay); if (ret != 0) @@ -898,15 +892,14 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, static void update_pfit_vscale_ratio(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; u32 pfit_control = I915_READ(PFIT_CONTROL); u32 ratio; /* XXX: This is not the same logic as in the xorg driver, but more in * line with the intel documentation for the i965 */ - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { /* on i965 use the PGM reg to read out the autoscaler values */ ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; } else { @@ -949,7 +942,7 @@ static int check_overlay_scaling(struct put_image_params *rec) return 0; } -static int check_overlay_src(struct drm_device *dev, +static int check_overlay_src(struct drm_i915_private *dev_priv, struct drm_intel_overlay_put_image *rec, struct drm_i915_gem_object *new_bo) { @@ -960,7 +953,7 @@ static int check_overlay_src(struct drm_device *dev, u32 tmp; /* check src dimensions */ - if (IS_845G(dev) || IS_I830(dev)) { + if (IS_845G(dev_priv) || IS_I830(dev_priv)) { if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || rec->src_width > IMAGE_MAX_WIDTH_LEGACY) return -EINVAL; @@ -1012,14 +1005,14 @@ static int check_overlay_src(struct drm_device *dev, return -EINVAL; /* stride checking */ - if (IS_I830(dev) || IS_845G(dev)) + if (IS_I830(dev_priv) || IS_845G(dev_priv)) stride_mask = 255; else stride_mask = 63; if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) return -EINVAL; - if (IS_GEN4(dev) && rec->stride_Y < 512) + if (IS_GEN4(dev_priv) && rec->stride_Y < 512) return -EINVAL; tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? @@ -1064,13 +1057,13 @@ static int check_overlay_src(struct drm_device *dev, * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use */ -static int intel_panel_fitter_pipe(struct drm_device *dev) +static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 pfit_control; /* i830 doesn't have a panel fitter */ - if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) + if (INTEL_GEN(dev_priv) <= 3 && + (IS_I830(dev_priv) || !IS_MOBILE(dev_priv))) return -1; pfit_control = I915_READ(PFIT_CONTROL); @@ -1080,15 +1073,15 @@ static int intel_panel_fitter_pipe(struct drm_device *dev) return -1; /* 965 can place panel fitter on either pipe */ - if (IS_GEN4(dev)) + if (IS_GEN4(dev_priv)) return (pfit_control >> 29) & 0x3; /* older chips can only use pipe 1 */ return 1; } -int intel_overlay_put_image(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_intel_overlay_put_image *put_image_rec = data; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1163,7 +1156,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, /* line too wide, i.e. one-line-mode */ if (mode->hdisplay > 1024 && - intel_panel_fitter_pipe(dev) == crtc->pipe) { + intel_panel_fitter_pipe(dev_priv) == crtc->pipe) { overlay->pfit_active = true; update_pfit_vscale_ratio(overlay); } else @@ -1197,7 +1190,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, goto out_unlock; } - ret = check_overlay_src(dev, put_image_rec, new_bo); + ret = check_overlay_src(dev_priv, put_image_rec, new_bo); if (ret != 0) goto out_unlock; params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK; @@ -1285,8 +1278,8 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs) return 0; } -int intel_overlay_attrs(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_intel_overlay_attrs *attrs = data; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1310,7 +1303,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, attrs->contrast = overlay->contrast; attrs->saturation = overlay->saturation; - if (!IS_GEN2(dev)) { + if (!IS_GEN2(dev_priv)) { attrs->gamma0 = I915_READ(OGAMC0); attrs->gamma1 = I915_READ(OGAMC1); attrs->gamma2 = I915_READ(OGAMC2); @@ -1342,7 +1335,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, intel_overlay_unmap_regs(overlay, regs); if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) goto out_unlock; if (overlay->active) { @@ -1372,37 +1365,36 @@ out_unlock: return ret; } -void intel_setup_overlay(struct drm_device *dev) +void intel_setup_overlay(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_overlay *overlay; struct drm_i915_gem_object *reg_bo; struct overlay_registers __iomem *regs; int ret; - if (!HAS_OVERLAY(dev)) + if (!HAS_OVERLAY(dev_priv)) return; overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); if (!overlay) return; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->dev->struct_mutex); if (WARN_ON(dev_priv->overlay)) goto out_free; - overlay->dev = dev; + overlay->i915 = dev_priv; reg_bo = NULL; - if (!OVERLAY_NEEDS_PHYSICAL(dev)) - reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); + if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) + reg_bo = i915_gem_object_create_stolen(dev_priv->dev, PAGE_SIZE); if (reg_bo == NULL) - reg_bo = i915_gem_object_create(dev, PAGE_SIZE); + reg_bo = i915_gem_object_create(dev_priv->dev, PAGE_SIZE); if (IS_ERR(reg_bo)) goto out_free; overlay->reg_bo = reg_bo; - if (OVERLAY_NEEDS_PHYSICAL(dev)) { + if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) { ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); if (ret) { DRM_ERROR("failed to attach phys overlay regs\n"); @@ -1442,25 +1434,23 @@ void intel_setup_overlay(struct drm_device *dev) intel_overlay_unmap_regs(overlay, regs); dev_priv->overlay = overlay; - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->dev->struct_mutex); DRM_INFO("initialized overlay support\n"); return; out_unpin_bo: - if (!OVERLAY_NEEDS_PHYSICAL(dev)) + if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) i915_gem_object_ggtt_unpin(reg_bo); out_free_bo: drm_gem_object_unreference(®_bo->base); out_free: - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->dev->struct_mutex); kfree(overlay); return; } -void intel_cleanup_overlay(struct drm_device *dev) +void intel_cleanup_overlay(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (!dev_priv->overlay) return; @@ -1483,17 +1473,16 @@ struct intel_overlay_error_state { static struct overlay_registers __iomem * intel_overlay_map_regs_atomic(struct intel_overlay *overlay) { - struct drm_i915_private *dev_priv = to_i915(overlay->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_private *dev_priv = overlay->i915; struct overlay_registers __iomem *regs; - if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) /* Cast to make sparse happy, but it's wc memory anyway, so * equivalent to the wc io mapping on X86. */ regs = (struct overlay_registers __iomem *) overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_atomic_wc(ggtt->mappable, + regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, overlay->flip_addr); return regs; @@ -1502,11 +1491,10 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, struct overlay_registers __iomem *regs) { - if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915)) io_mapping_unmap_atomic(regs); } - struct intel_overlay_error_state * intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) { -- cgit v1.2.3-59-g8ed1b From d538704ba0afdb5fc5b91be0b91e82fd19ad1707 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 13 May 2016 11:57:19 +0100 Subject: drm/i915: Move get-reset-stats ioctl from intel_uncore.c to i915_gem_context.c The get-reset-stats ioctl reports upon the statistics (number of hangs, be it as a victim or the guilty party) of a particular context. It is semantically better as being part of i915_gem_context.c user interface, as opposed to the hardware level access of intel_uncore.c Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: http://patchwork.freedesktop.org/patch/msgid/1463137042-9669-1-git-send-email-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- drivers/gpu/drm/i915/i915_gem_context.c | 39 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_uncore.c | 39 --------------------------------- 4 files changed, 42 insertions(+), 42 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_dma.c') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index e42fa1769c10..05b2b1901d60 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1631,7 +1631,7 @@ const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7a0b51301337..d9d07b70f05c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3362,6 +3362,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct drm_device *dev, @@ -3578,8 +3580,6 @@ extern void intel_detect_pch(struct drm_device *dev); extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv); int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); /* overlay */ extern struct intel_overlay_error_state * diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 0fffebcc0ace..f3c76ca6b411 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -1040,3 +1040,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, return ret; } + +int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, + void *data, struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_reset_stats *args = data; + struct i915_ctx_hang_stats *hs; + struct intel_context *ctx; + int ret; + + if (args->flags || args->pad) + return -EINVAL; + + if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); + if (IS_ERR(ctx)) { + mutex_unlock(&dev->struct_mutex); + return PTR_ERR(ctx); + } + hs = &ctx->hang_stats; + + if (capable(CAP_SYS_ADMIN)) + args->reset_count = i915_reset_count(&dev_priv->gpu_error); + else + args->reset_count = 0; + + args->batch_active = hs->batch_active; + args->batch_pending = hs->batch_pending; + + mutex_unlock(&dev->struct_mutex); + + return 0; +} diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 0b1bd07dd04a..385114bca924 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1460,45 +1460,6 @@ out: return ret; } -int i915_get_reset_stats_ioctl(struct drm_device *dev, - void *data, struct drm_file *file) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_reset_stats *args = data; - struct i915_ctx_hang_stats *hs; - struct intel_context *ctx; - int ret; - - if (args->flags || args->pad) - return -EINVAL; - - if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) - return -EPERM; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); - if (IS_ERR(ctx)) { - mutex_unlock(&dev->struct_mutex); - return PTR_ERR(ctx); - } - hs = &ctx->hang_stats; - - if (capable(CAP_SYS_ADMIN)) - args->reset_count = i915_reset_count(&dev_priv->gpu_error); - else - args->reset_count = 0; - - args->batch_active = hs->batch_active; - args->batch_pending = hs->batch_pending; - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - static int i915_reset_complete(struct pci_dev *pdev) { u8 gdrst; -- cgit v1.2.3-59-g8ed1b From 7d7792e5e2bb5fc176dfb3f56d3c79b9c716519e Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 12 May 2016 16:18:51 +0300 Subject: drm/i915: Handle error return from dma_set_coherent_mask() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A failure from these functions can lead to obscure bugs later, so it's better not to suppress them and just fail module loading. Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1463059132-1720-4-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_dma.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_dma.c') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 05b2b1901d60..547100fa3122 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1262,8 +1262,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) pci_set_master(dev->pdev); /* overlay on gen2 is broken and can't address above 1G */ - if (IS_GEN2(dev)) - dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + if (IS_GEN2(dev)) { + ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + if (ret) { + DRM_ERROR("failed to set DMA mask\n"); + + goto out_ggtt; + } + } + /* 965GM sometimes incorrectly writes to hardware status page (HWS) * using 32bit addressing, overwriting memory if HWS is located @@ -1273,8 +1280,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) * behaviour if any general state is accessed within a page above 4GB, * which also needs to be handled carefully. */ - if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) - dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); + if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) { + ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); + + if (ret) { + DRM_ERROR("failed to set DMA mask\n"); + + goto out_ggtt; + } + } aperture_size = ggtt->mappable_end; -- cgit v1.2.3-59-g8ed1b From e8fcdf1e65c71340972041295fb7b3e59929b761 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 13 May 2016 17:04:38 +0300 Subject: drm/i915: don't mix bitwise and logical operations for has_snoop Also make the code more readable. Reviewed-by: Tvrtko Ursulin Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/1463148278-23193-2-git-send-email-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_dma.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_dma.c') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 547100fa3122..a8c79f6512a4 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -954,9 +954,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev) else if (INTEL_INFO(dev)->gen >= 9) gen9_sseu_info_init(dev); - /* Snooping is broken on BXT A stepping. */ info->has_snoop = !info->has_llc; - info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1); + + /* Snooping is broken on BXT A stepping. */ + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + info->has_snoop = false; DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); -- cgit v1.2.3-59-g8ed1b