diff options
author | 2019-07-12 06:57:16 +1000 | |
---|---|---|
committer | 2019-07-12 06:57:16 +1000 | |
commit | b784d6bff97154af829e46271dbb16967106a493 (patch) | |
tree | 8bd78458e36a2d6dbd033e37a19fcc5e3a6b70bf /drivers/gpu/drm/amd/amdgpu | |
parent | Merge tag 'drm-next-5.3-2019-06-27' of git://people.freedesktop.org/~agd5f/linux into drm-next (diff) | |
parent | drm/amdgpu/navi10: add uclk activity sensor (diff) | |
download | linux-dev-b784d6bff97154af829e46271dbb16967106a493.tar.xz linux-dev-b784d6bff97154af829e46271dbb16967106a493.zip |
Merge tag 'drm-next-5.3-2019-07-09' of git://people.freedesktop.org/~agd5f/linux into drm-next
drm-next-5.3-2019-07-09:
amdgpu:
- GPU reset for navi10
- Powerplay fixes for navi10
- GFX fixes for navi10
- Prepare for hmm_range_register API change
- XGMI fixes
- clang warning fixes
- Fixes for various kconfig scenarios
- Misc fixes and cleanups
amdkfd:
- Add workaround for soft hangs with oversubscribed runlists
- Remove duplicated pcie atomics request
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190710035017.3407-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
22 files changed, 227 insertions, 123 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 7bbcf1b20cfd..56e084367b93 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -54,7 +54,9 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ - amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o + amdgpu_vm_sdma.o amdgpu_discovery.o + +amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 596f7e07b5a8..f88d8141447c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -775,6 +775,7 @@ struct amdgpu_device { struct mutex grbm_idx_mutex; struct dev_pm_domain vga_pm_domain; bool have_disp_power_ref; + bool have_atomics_support; /* BIOS */ bool is_atom_fw; @@ -1216,6 +1217,10 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev ); static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; } #endif + +void amdgpu_register_gpu_instance(struct amdgpu_device *adev); +void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev); + #include "amdgpu_object.h" /* used by df_v3_6.c and amdgpu_pmu.c */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index fab3eb173b05..9fa4f25a3745 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -668,6 +668,13 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) return false; } +bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + return adev->have_atomics_support; +} + #ifndef CONFIG_HSA_AMD bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 93a25c799d75..b6076d19e442 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -135,6 +135,7 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, uint32_t *ib_cmd, uint32_t ib_len); void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle); +bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd); struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void); struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 0aa81456ec32..1d3ee9c42f7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -504,7 +504,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm, goto out; } - ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, bo->tbo.ttm->pages); + ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); if (ret) { pr_err("%s: Failed to get user pages: %d\n", __func__, ret); goto unregister_out; @@ -813,7 +813,7 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info, ret = amdgpu_sync_resv(NULL, sync, pd->tbo.resv, - AMDGPU_FENCE_OWNER_UNDEFINED, false); + AMDGPU_FENCE_OWNER_KFD, false); if (ret) return ret; } @@ -1729,8 +1729,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, bo = mem->bo; /* Get updated user pages */ - ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, - bo->tbo.ttm->pages); + ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); if (ret) { pr_debug("%s: Failed to get user pages: %d\n", __func__, ret); @@ -1740,6 +1739,12 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, } amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); + + /* Mark the BO as valid unless it was invalidated + * again concurrently. + */ + if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid) + return -EAGAIN; } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 37adce981fa3..e069de8b54e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -633,7 +633,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, return -ENOMEM; } - r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, e->user_pages); + r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages); if (r) { kvfree(e->user_pages); e->user_pages = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e886be292f86..7401bc95c15b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2596,6 +2596,17 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (adev->rio_mem == NULL) DRM_INFO("PCI I/O BAR is not found.\n"); + /* enable PCIE atomic ops */ + r = pci_enable_atomic_ops_to_root(adev->pdev, + PCI_EXP_DEVCAP2_ATOMIC_COMP32 | + PCI_EXP_DEVCAP2_ATOMIC_COMP64); + if (r) { + adev->have_atomics_support = false; + DRM_INFO("PCIE atomic ops is not supported\n"); + } else { + adev->have_atomics_support = true; + } + amdgpu_device_get_pcie_info(adev); if (amdgpu_mcbp) @@ -2604,7 +2615,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10) adev->enable_mes = true; - if (amdgpu_discovery) { + if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) { r = amdgpu_discovery_init(adev); if (r) { dev_err(adev->dev, "amdgpu_discovery_init failed\n"); @@ -2798,7 +2809,8 @@ fence_driver_init: return r; } - r = amdgpu_pmu_init(adev); + if (IS_ENABLED(CONFIG_PERF_EVENTS)) + r = amdgpu_pmu_init(adev); if (r) dev_err(adev->dev, "amdgpu_pmu_init failed\n"); @@ -2870,9 +2882,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev) amdgpu_debugfs_regs_cleanup(adev); device_remove_file(adev->dev, &dev_attr_pcie_replay_count); amdgpu_ucode_sysfs_fini(adev); - amdgpu_pmu_fini(adev); + if (IS_ENABLED(CONFIG_PERF_EVENTS)) + amdgpu_pmu_fini(adev); amdgpu_debugfs_preempt_cleanup(adev); - if (amdgpu_discovery) + if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) amdgpu_discovery_fini(adev); } @@ -3559,6 +3572,12 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, if (vram_lost) amdgpu_device_fill_reset_magic(tmp_adev); + /* + * Add this ASIC as tracked as reset was already + * complete successfully. + */ + amdgpu_register_gpu_instance(tmp_adev); + r = amdgpu_device_ip_late_init(tmp_adev); if (r) goto out; @@ -3693,8 +3712,19 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, device_list_handle = &device_list; } + /* + * Mark these ASICs to be reseted as untracked first + * And add them back after reset completed + */ + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) + amdgpu_unregister_gpu_instance(tmp_adev); + /* block all schedulers and reset given job's ring */ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + /* disable ras on ALL IPs */ + if (amdgpu_device_ip_need_full_reset(tmp_adev)) + amdgpu_ras_suspend(tmp_adev); + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 3913a75924c6..1b0613c7cf95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -247,7 +247,8 @@ module_param_named(msi, amdgpu_msi, int, 0444); * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video) * jobs is 10000. And there is no timeout enforced on compute jobs. */ -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and no timeout for compute jobs), " +MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and infinity timeout for compute jobs." + " 0: keep default value. negative: infinity timeout), " "format is [Non-Compute] or [GFX,Compute,SDMA,Video]"); module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444); @@ -581,14 +582,27 @@ MODULE_PARM_DESC(async_gfx_ring, "Asynchronous GFX rings that could be configured with either different priorities (HP3D ring and LP3D ring), or equal priorities (0 = disabled, 1 = enabled (default))"); module_param_named(async_gfx_ring, amdgpu_async_gfx_ring, int, 0444); +/** + * DOC: mcbp (int) + * It is used to enable mid command buffer preemption. (0 = disabled (default), 1 = enabled) + */ MODULE_PARM_DESC(mcbp, "Enable Mid-command buffer preemption (0 = disabled (default), 1 = enabled)"); module_param_named(mcbp, amdgpu_mcbp, int, 0444); +/** + * DOC: discovery (int) + * Allow driver to discover hardware IP information from IP Discovery table at the top of VRAM. + */ MODULE_PARM_DESC(discovery, "Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM"); module_param_named(discovery, amdgpu_discovery, int, 0444); +/** + * DOC: mes (int) + * Enable Micro Engine Scheduler. This is a new hw scheduling engine for gfx, sdma, and compute. + * (0 = disabled (default), 1 = enabled) + */ MODULE_PARM_DESC(mes, "Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)"); module_param_named(mes, amdgpu_mes, int, 0444); @@ -1302,7 +1316,8 @@ int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) * By default timeout for non compute jobs is 10000. * And there is no timeout enforced on compute jobs. */ - adev->gfx_timeout = adev->sdma_timeout = adev->video_timeout = 10000; + adev->gfx_timeout = msecs_to_jiffies(10000); + adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) { @@ -1312,10 +1327,13 @@ int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) if (ret) return ret; - /* Invalidate 0 and negative values */ - if (timeout <= 0) { + if (timeout == 0) { index++; continue; + } else if (timeout < 0) { + timeout = MAX_SCHEDULE_TIMEOUT; + } else { + timeout = msecs_to_jiffies(timeout); } switch (index++) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h index dad2186f4ed5..df8a23554831 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h @@ -31,7 +31,8 @@ struct amdgpu_gds { uint32_t gds_size; uint32_t gws_size; uint32_t oa_size; - uint32_t gds_compute_max_wave_id; + uint32_t gds_compute_max_wave_id; + uint32_t vgt_gs_max_wave_id; }; struct amdgpu_gds_reg_offset { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 1f9f27061e2f..939f8305511b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -327,8 +327,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, } if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { - r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, - bo->tbo.ttm->pages); + r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); if (r) goto release_object; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 5832cd8f4ff1..0cf7e8606fd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -44,7 +44,7 @@ #include "amdgpu_display.h" #include "amdgpu_ras.h" -static void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) +void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) { struct amdgpu_gpu_instance *gpu_instance; int i; @@ -105,7 +105,7 @@ done_free: dev->dev_private = NULL; } -static void amdgpu_register_gpu_instance(struct amdgpu_device *adev) +void amdgpu_register_gpu_instance(struct amdgpu_device *adev) { struct amdgpu_gpu_instance *gpu_instance; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 623f56a1485f..3971c201f320 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -45,49 +45,12 @@ #include <linux/firmware.h> #include <linux/module.h> -#include <linux/hmm.h> -#include <linux/interval_tree.h> - #include <drm/drm.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" /** - * struct amdgpu_mn - * - * @adev: amdgpu device pointer - * @mm: process address space - * @type: type of MMU notifier - * @work: destruction work item - * @node: hash table node to find structure by adev and mn - * @lock: rw semaphore protecting the notifier nodes - * @objects: interval tree containing amdgpu_mn_nodes - * @mirror: HMM mirror function support - * - * Data for each amdgpu device and process address space. - */ -struct amdgpu_mn { - /* constant after initialisation */ - struct amdgpu_device *adev; - struct mm_struct *mm; - enum amdgpu_mn_type type; - - /* only used on destruction */ - struct work_struct work; - - /* protected by adev->mn_lock */ - struct hlist_node node; - - /* objects protected by lock */ - struct rw_semaphore lock; - struct rb_root_cached objects; - - /* HMM mirror */ - struct hmm_mirror mirror; -}; - -/** * struct amdgpu_mn_node * * @it: interval node defining start-last of the affected address range diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h index f5b67c63ed6b..b8ed68943625 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h @@ -24,17 +24,53 @@ #ifndef __AMDGPU_MN_H__ #define __AMDGPU_MN_H__ -/* - * HMM mirror - */ -struct amdgpu_mn; -struct hmm_range; +#include <linux/types.h> +#include <linux/hmm.h> +#include <linux/rwsem.h> +#include <linux/workqueue.h> +#include <linux/interval_tree.h> enum amdgpu_mn_type { AMDGPU_MN_TYPE_GFX, AMDGPU_MN_TYPE_HSA, }; +/** + * struct amdgpu_mn + * + * @adev: amdgpu device pointer + * @mm: process address space + * @type: type of MMU notifier + * @work: destruction work item + * @node: hash table node to find structure by adev and mn + * @lock: rw semaphore protecting the notifier nodes + * @objects: interval tree containing amdgpu_mn_nodes + * @mirror: HMM mirror function support + * + * Data for each amdgpu device and process address space. + */ +struct amdgpu_mn { + /* constant after initialisation */ + struct amdgpu_device *adev; + struct mm_struct *mm; + enum amdgpu_mn_type type; + + /* only used on destruction */ + struct work_struct work; + + /* protected by adev->mn_lock */ + struct hlist_node node; + + /* objects protected by lock */ + struct rw_semaphore lock; + struct rb_root_cached objects; + +#ifdef CONFIG_HMM_MIRROR + /* HMM mirror */ + struct hmm_mirror mirror; +#endif +}; + #if defined(CONFIG_HMM_MIRROR) void amdgpu_mn_lock(struct amdgpu_mn *mn); void amdgpu_mn_unlock(struct amdgpu_mn *mn); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c9faa69cd677..333cfbdf6dd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -731,8 +731,10 @@ struct amdgpu_ttm_tt { #define MAX_RETRY_HMM_RANGE_FAULT 16 -int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) +int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) { + struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL; + struct ttm_tt *ttm = bo->tbo.ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm; struct mm_struct *mm = gtt->usertask->mm; unsigned long start = gtt->userptr; @@ -746,6 +748,12 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) if (!mm) /* Happens during process shutdown */ return -ESRCH; + if (unlikely(!mirror)) { + DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n"); + r = -EFAULT; + goto out; + } + vma = find_vma(mm, start); if (unlikely(!vma || start < vma->vm_start)) { r = -EFAULT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index c2b7669004ba..caa76c693700 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -102,10 +102,11 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) -int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); +int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages); bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm); #else -static inline int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) +static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, + struct page **pages) { return -EPERM; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 2932ade7dbd0..ee41d5592c51 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1544,24 +1544,6 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev) gfx_v10_0_init_compute_vmid(adev); - mutex_lock(&adev->grbm_idx_mutex); - /* - * making sure that the following register writes will be broadcasted - * to all the shaders - */ - gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - - tmp = REG_SET_FIELD(0, PA_SC_FIFO_SIZE, SC_FRONTEND_PRIM_FIFO_SIZE, - adev->gfx.config.sc_prim_fifo_size_frontend); - tmp = REG_SET_FIELD(tmp, PA_SC_FIFO_SIZE, SC_BACKEND_PRIM_FIFO_SIZE, - adev->gfx.config.sc_prim_fifo_size_backend); - tmp = REG_SET_FIELD(tmp, PA_SC_FIFO_SIZE, SC_HIZ_TILE_FIFO_SIZE, - adev->gfx.config.sc_hiz_tile_fifo_size); - tmp = REG_SET_FIELD(tmp, PA_SC_FIFO_SIZE, SC_EARLYZ_TILE_FIFO_SIZE, - adev->gfx.config.sc_earlyz_tile_fifo_size); - WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE, tmp); - - mutex_unlock(&adev->grbm_idx_mutex); } static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, @@ -4215,6 +4197,15 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; + /* Prevent a hw deadlock due to a wave ID mismatch between ME and GDS. + * This resets the wave ID counters. (needed by transform feedback) + * TODO: This might only be needed on a VMID switch when we change + * the GDS OA mapping, not sure. + */ + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + amdgpu_ring_write(ring, mmVGT_GS_MAX_WAVE_ID); + amdgpu_ring_write(ring, ring->adev->gds.vgt_gs_max_wave_id); + if (ib->flags & AMDGPU_IB_FLAG_CE) header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2); else @@ -4252,6 +4243,22 @@ static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); + /* Currently, there is a high possibility to get wave ID mismatch + * between ME and GDS, leading to a hw deadlock, because ME generates + * different wave IDs than the GDS expects. This situation happens + * randomly when at least 5 compute pipes use GDS ordered append. + * The wave IDs generated by ME are also wrong after suspend/resume. + * Those are probably bugs somewhere else in the kernel driver. + * + * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and + * GDS to 0 for this ring (me/pipe). + */ + if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID); + amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); + } + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ amdgpu_ring_write(ring, @@ -4278,11 +4285,7 @@ static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ | PACKET3_RELEASE_MEM_GCR_GL2_WB | - PACKET3_RELEASE_MEM_GCR_GL2_INV | - PACKET3_RELEASE_MEM_GCR_GL2_US | - PACKET3_RELEASE_MEM_GCR_GL1_INV | - PACKET3_RELEASE_MEM_GCR_GLV_INV | - PACKET3_RELEASE_MEM_GCR_GLM_INV | + PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */ PACKET3_RELEASE_MEM_GCR_GLM_WB | PACKET3_RELEASE_MEM_CACHE_POLICY(3) | PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | @@ -4948,7 +4951,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { 5 + /* HDP_INVL */ 8 + 8 + /* FENCE x2 */ 2, /* SWITCH_BUFFER */ - .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */ + .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_gfx */ .emit_ib = gfx_v10_0_ring_emit_ib_gfx, .emit_fence = gfx_v10_0_ring_emit_fence, .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync, @@ -4987,7 +4990,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v10_0_ring_emit_vm_flush */ 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */ - .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_compute */ + .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */ .emit_ib = gfx_v10_0_ring_emit_ib_compute, .emit_fence = gfx_v10_0_ring_emit_fence, .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync, @@ -5020,7 +5023,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v10_0_ring_emit_vm_flush */ 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */ - .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_compute */ + .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */ .emit_ib = gfx_v10_0_ring_emit_ib_compute, .emit_fence = gfx_v10_0_ring_emit_fence_kiq, .test_ring = gfx_v10_0_ring_test_ring, @@ -5096,10 +5099,10 @@ static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev) /* init asic gds info */ switch (adev->asic_type) { case CHIP_NAVI10: - adev->gds.gds_size = 0x10000; - break; default: adev->gds.gds_size = 0x10000; + adev->gds.gds_compute_max_wave_id = 0x4ff; + adev->gds.vgt_gs_max_wave_id = 0x3ff; break; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index a57b5abf96a0..032e76dbc51f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3925,11 +3925,10 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev) int list_size; unsigned int *register_list_format = - kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL); + kmemdup(adev->gfx.rlc.register_list_format, + adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL); if (!register_list_format) return -ENOMEM; - memcpy(register_list_format, adev->gfx.rlc.register_list_format, - adev->gfx.rlc.reg_list_format_size_bytes); gfx_v8_0_parse_ind_reg_list(register_list_format, RLC_FormatDirectRegListLength, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 9b413f6fa588..5ba332376710 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1960,25 +1960,6 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); gfx_v9_0_init_compute_vmid(adev); - - mutex_lock(&adev->grbm_idx_mutex); - /* - * making sure that the following register writes will be broadcasted - * to all the shaders - */ - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - - WREG32_SOC15_RLC(GC, 0, mmPA_SC_FIFO_SIZE, - (adev->gfx.config.sc_prim_fifo_size_frontend << - PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) | - (adev->gfx.config.sc_prim_fifo_size_backend << - PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) | - (adev->gfx.config.sc_hiz_tile_fifo_size << - PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | - (adev->gfx.config.sc_earlyz_tile_fifo_size << - PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); - mutex_unlock(&adev->grbm_idx_mutex); - } static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) @@ -2093,11 +2074,10 @@ static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev) u32 tmp = 0; u32 *register_list_format = - kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL); + kmemdup(adev->gfx.rlc.register_list_format, + adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL); if (!register_list_format) return -ENOMEM; - memcpy(register_list_format, adev->gfx.rlc.register_list_format, - adev->gfx.rlc.reg_list_format_size_bytes); /* setup unique_indirect_regs array and indirect_start_offsets array */ unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index cec7c1fb14bf..5eeb72fcc123 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -245,8 +245,9 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, mutex_lock(&adev->mman.gtt_window_lock); gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB, 0); - if (!adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready || - adev->asic_type != CHIP_NAVI10) { + if (!adev->mman.buffer_funcs_enabled || + !adev->ib_pool_ready || + adev->in_gpu_reset) { gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB, 0); mutex_unlock(&adev->mman.gtt_window_lock); return; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.h b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.h index 17b9b53fa892..9afd6ddb01e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.h +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.h @@ -22,7 +22,7 @@ */ #ifndef __MES_V10_1_H__ -#define __MES_v10_1_H__ +#define __MES_V10_1_H__ extern const struct amdgpu_ip_block_version mes_v10_1_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index af20ffb55c54..ad430cbcd72f 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -31,6 +31,7 @@ #include "amdgpu_vce.h" #include "amdgpu_ucode.h" #include "amdgpu_psp.h" +#include "amdgpu_smu.h" #include "atom.h" #include "amd_pcie.h" @@ -255,6 +256,39 @@ static void nv_gpu_pci_config_reset(struct amdgpu_device *adev) } #endif +static int nv_asic_mode1_reset(struct amdgpu_device *adev) +{ + u32 i; + int ret = 0; + + amdgpu_atombios_scratch_regs_engine_hung(adev, true); + + dev_info(adev->dev, "GPU mode1 reset\n"); + + /* disable BM */ + pci_clear_master(adev->pdev); + + pci_save_state(adev->pdev); + + ret = psp_gpu_reset(adev); + if (ret) + dev_err(adev->dev, "GPU mode1 reset failed\n"); + + pci_restore_state(adev->pdev); + + /* wait for asic to come out of reset */ + for (i = 0; i < adev->usec_timeout; i++) { + u32 memsize = adev->nbio_funcs->get_memsize(adev); + + if (memsize != 0xffffffff) + break; + udelay(1); + } + + amdgpu_atombios_scratch_regs_engine_hung(adev, false); + + return ret; +} static int nv_asic_reset(struct amdgpu_device *adev) { @@ -266,8 +300,15 @@ static int nv_asic_reset(struct amdgpu_device *adev) amdgpu_atombios_scratch_regs_engine_hung(adev, false); #endif + int ret = 0; + struct smu_context *smu = &adev->smu; - return 0; + if (smu_baco_is_support(smu)) + ret = smu_baco_reset(smu); + else + ret = nv_asic_mode1_reset(adev); + + return ret; } static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) @@ -348,8 +389,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); +#if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); +#else +# warning "Enable CONFIG_DRM_AMD_DC for display support on navi." +#endif amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 759b2768c51c..61744e2d16fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -534,7 +534,7 @@ psp_v11_0_sram_map(struct amdgpu_device *adev, case AMDGPU_UCODE_ID_RLC_G: *sram_offset = 0x2000; - if (adev->asic_type != CHIP_NAVI10) { + if (adev->asic_type < CHIP_NAVI10) { *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR); *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA); } else { @@ -545,7 +545,7 @@ psp_v11_0_sram_map(struct amdgpu_device *adev, case AMDGPU_UCODE_ID_SDMA0: *sram_offset = 0x0; - if (adev->asic_type != CHIP_NAVI10) { + if (adev->asic_type < CHIP_NAVI10) { *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR); *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA); } else { |