diff options
Diffstat (limited to 'sys/dev/pci/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r-- | sys/dev/pci/drm/amd/amdgpu/amdgpu_device.c | 2084 |
1 files changed, 1516 insertions, 568 deletions
diff --git a/sys/dev/pci/drm/amd/amdgpu/amdgpu_device.c b/sys/dev/pci/drm/amd/amdgpu/amdgpu_device.c index 7bb89291739..71ef4d4537e 100644 --- a/sys/dev/pci/drm/amd/amdgpu/amdgpu_device.c +++ b/sys/dev/pci/drm/amd/amdgpu/amdgpu_device.c @@ -27,11 +27,12 @@ */ #include <linux/power_supply.h> #include <linux/kthread.h> +#include <linux/module.h> #include <linux/console.h> #include <linux/slab.h> -#include <drm/drmP.h> -#include <drm/drm_crtc_helper.h> + #include <drm/drm_atomic_helper.h> +#include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> @@ -51,6 +52,7 @@ #endif #include "vi.h" #include "soc15.h" +#include "nv.h" #include "bif/bif_4_1_d.h" #include <linux/pci.h> #include <linux/firmware.h> @@ -59,14 +61,27 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_pm.h" +#include "amdgpu_xgmi.h" +#include "amdgpu_ras.h" +#include "amdgpu_pmu.h" + +#include <linux/suspend.h> +#include <drm/task_barrier.h> + MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 -static const char *amdgpu_asic_name[] = { +const char *amdgpu_asic_name[] = { "TAHITI", "PITCAIRN", "VERDE", @@ -90,20 +105,47 @@ static const char *amdgpu_asic_name[] = { "VEGA12", "VEGA20", "RAVEN", + "ARCTURUS", + "RENOIR", + "NAVI10", + "NAVI14", + "NAVI12", "LAST", }; +/** + * DOC: pcie_replay_count + * + * The amdgpu driver provides a sysfs API for reporting the total number + * of PCIe replays (NAKs) + * The file pcie_replay_count is used for this and returns the total + * number of replays as a sum of the NAKs generated and NAKs received + */ + +static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev); + + return snprintf(buf, PAGE_SIZE, "%llu\n", cnt); +} + +static DEVICE_ATTR(pcie_replay_count, S_IRUGO, + amdgpu_device_get_pcie_replay_count, NULL); + static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); /** - * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control + * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control * * @dev: drm_device pointer * * Returns true if the device is a dGPU with HG/PX power control, * otherwise return false. */ -bool amdgpu_device_is_px(struct drm_device *dev) +bool amdgpu_device_supports_boco(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; @@ -112,6 +154,82 @@ bool amdgpu_device_is_px(struct drm_device *dev) return false; } +/** + * amdgpu_device_supports_baco - Does the device support BACO + * + * @dev: drm_device pointer + * + * Returns true if the device supporte BACO, + * otherwise return false. + */ +bool amdgpu_device_supports_baco(struct drm_device *dev) +{ + struct amdgpu_device *adev = dev->dev_private; + + return amdgpu_asic_supports_baco(adev); +} + +/** + * VRAM access helper functions. + * + * amdgpu_device_vram_access - read/write a buffer in vram + * + * @adev: amdgpu_device pointer + * @pos: offset of the buffer in vram + * @buf: virtual address of the buffer in system memory + * @size: read/write size, sizeof(@buf) must > @size + * @write: true - write to vram, otherwise - read from vram + */ +void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, + uint32_t *buf, size_t size, bool write) +{ + unsigned long flags; + uint32_t hi = ~0; + uint64_t last; + + +#ifdef CONFIG_64BIT + last = min(pos + size, adev->gmc.visible_vram_size); + if (last > pos) { + void __iomem *addr = adev->mman.aper_base_kaddr + pos; + size_t count = last - pos; + + if (write) { + memcpy_toio(addr, buf, count); + mb(); + amdgpu_asic_flush_hdp(adev, NULL); + } else { + amdgpu_asic_invalidate_hdp(adev, NULL); + mb(); + memcpy_fromio(buf, addr, count); + } + + if (count == size) + return; + + pos += count; + buf += count / 4; + size -= count; + } +#endif + + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + for (last = pos + size; pos < last; pos += 4) { + uint32_t tmp = pos >> 31; + + WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); + if (tmp != hi) { + WREG32_NO_KIQ(mmMM_INDEX_HI, tmp); + hi = tmp; + } + if (write) + WREG32_NO_KIQ(mmMM_DATA, *buf++); + else + *buf++ = RREG32_NO_KIQ(mmMM_DATA); + } + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); +} + /* * MMIO register access helper functions. */ @@ -129,8 +247,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, { uint32_t ret; - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) - return amdgpu_virt_kiq_rreg(adev, reg); + if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) + return amdgpu_kiq_rreg(adev, reg); if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) ret = bus_space_read_4(adev->rmmio_bst, adev->rmmio_bsh, @@ -193,6 +311,29 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) BUG(); } +void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags) +{ + trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); + + if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) + bus_space_write_4(adev->rmmio_bst, adev->rmmio_bsh, + reg * 4, v); + else { + unsigned long flags; + + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + bus_space_write_4(adev->rmmio_bst, adev->rmmio_bsh, + mmMM_INDEX * 4, reg * 4); + bus_space_write_4(adev->rmmio_bst, adev->rmmio_bsh, + mmMM_DATA * 4, v); + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + } + + if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { + udelay(500); + } +} + /** * amdgpu_mm_wreg - write to a memory mapped IO register * @@ -206,32 +347,33 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags) { - trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); - if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { adev->last_mm_index = v; } - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) - return amdgpu_virt_kiq_wreg(adev, reg, v); + if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) + return amdgpu_kiq_wreg(adev, reg, v); - if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) - bus_space_write_4(adev->rmmio_bst, adev->rmmio_bsh, - reg * 4, v); - else { - unsigned long flags; + amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags); +} - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - bus_space_write_4(adev->rmmio_bst, adev->rmmio_bsh, - mmMM_INDEX * 4, reg * 4); - bus_space_write_4(adev->rmmio_bst, adev->rmmio_bsh, - mmMM_DATA * 4, v); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - } +/* + * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range + * + * this function is invoked only the debugfs register access + * */ +void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v, + uint32_t acc_flags) +{ + if (amdgpu_sriov_fullaccess(adev) && + adev->gfx.rlc.funcs && + adev->gfx.rlc.funcs->is_rlcg_access_range) { - if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { - udelay(500); + if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) + return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v); } + + amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags); } /** @@ -400,6 +542,40 @@ static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32 } /** + * amdgpu_invalid_rreg64 - dummy 64 bit reg read function + * + * @adev: amdgpu device pointer + * @reg: offset of register + * + * Dummy register read function. Used for register blocks + * that certain asics don't have (all asics). + * Returns the value in the register. + */ +static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) +{ + DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg); + BUG(); + return 0; +} + +/** + * amdgpu_invalid_wreg64 - dummy reg write function + * + * @adev: amdgpu device pointer + * @reg: offset of register + * @v: value to write to the register + * + * Dummy register read function. Used for register blocks + * that certain asics don't have (all asics). + */ +static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v) +{ + DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", + reg, v); + BUG(); +} + +/** * amdgpu_block_invalid_rreg - dummy reg read function * * @adev: amdgpu device pointer @@ -498,7 +674,10 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, } else { tmp = RREG32(reg); tmp &= ~and_mask; - tmp |= or_mask; + if (adev->family >= AMDGPU_FAMILY_AI) + tmp |= (or_mask & and_mask); + else + tmp |= or_mask; } WREG32(reg, tmp); } @@ -530,6 +709,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) */ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) { + /* No doorbell on SI hardware generation */ if (adev->asic_type < CHIP_BONAIRE) { adev->doorbell.base = 0; @@ -541,20 +721,33 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) return 0; } -#ifdef __linux__ +#ifdef __linux if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) return -EINVAL; +#endif + + amdgpu_asic_init_doorbell_index(adev); /* doorbell bar mapping */ +#ifdef __linux__ adev->doorbell.base = pci_resource_start(adev->pdev, 2); adev->doorbell.size = pci_resource_len(adev->pdev, 2); #endif adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), - AMDGPU_DOORBELL_MAX_ASSIGNMENT+1); + adev->doorbell_index.max_assignment+1); if (adev->doorbell.num_doorbells == 0) return -EINVAL; + /* For Vega, reserve and map two pages on doorbell BAR since SDMA + * paging queue doorbell use the second page. The + * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the + * doorbells are in the first page. So with paging queue enabled, + * the max num_doorbells should + 1 page (0x400 in dword) + */ + if (adev->asic_type >= CHIP_VEGA10) + adev->doorbell.num_doorbells += 0x400; + #ifdef __linux__ adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * @@ -683,71 +876,6 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) } /** - * amdgpu_device_vram_location - try to find VRAM location - * - * @adev: amdgpu device structure holding all necessary informations - * @mc: memory controller structure holding memory informations - * @base: base address at which to put VRAM - * - * Function will try to place VRAM at base address provided - * as parameter. - */ -void amdgpu_device_vram_location(struct amdgpu_device *adev, - struct amdgpu_gmc *mc, u64 base) -{ - uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; - - mc->vram_start = base; - mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; - if (limit && limit < mc->real_vram_size) - mc->real_vram_size = limit; - dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", - mc->mc_vram_size >> 20, mc->vram_start, - mc->vram_end, mc->real_vram_size >> 20); -} - -/** - * amdgpu_device_gart_location - try to find GART location - * - * @adev: amdgpu device structure holding all necessary informations - * @mc: memory controller structure holding memory informations - * - * Function will place try to place GART before or after VRAM. - * - * If GART size is bigger than space left then we ajust GART size. - * Thus function will never fails. - */ -void amdgpu_device_gart_location(struct amdgpu_device *adev, - struct amdgpu_gmc *mc) -{ - u64 size_af, size_bf; - - mc->gart_size += adev->pm.smu_prv_buffer_size; - - size_af = adev->gmc.mc_mask - mc->vram_end; - size_bf = mc->vram_start; - if (size_bf > size_af) { - if (mc->gart_size > size_bf) { - dev_warn(adev->dev, "limiting GART\n"); - mc->gart_size = size_bf; - } - mc->gart_start = 0; - } else { - if (mc->gart_size > size_af) { - dev_warn(adev->dev, "limiting GART\n"); - mc->gart_size = size_af; - } - /* VCE doesn't like it when BOs cross a 4GB segment, so align - * the GART base on a 4GB boundary as well. - */ - mc->gart_start = roundup2(mc->vram_end + 1, 0x100000000ULL); - } - mc->gart_end = mc->gart_start + mc->gart_size - 1; - dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", - mc->gart_size >> 20, mc->gart_start, mc->gart_end); -} - -/** * amdgpu_device_resize_fb_bar - try to resize FB BAR * * @adev: amdgpu_device pointer @@ -972,7 +1100,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) #ifdef __linux__ struct sysinfo si; #endif - bool is_os_64 = (sizeof(void *) == 8) ? true : false; + bool is_os_64 = (sizeof(void *) == 8); uint64_t total_memory; uint64_t dram_size_seven_GB = 0x1B8000000; uint64_t dram_size_three_GB = 0xB8000000; @@ -1021,7 +1149,7 @@ def_value: * Validates certain module parameters and updates * the associated values used by the driver (all asics). */ -static void amdgpu_device_check_arguments(struct amdgpu_device *adev) +static int amdgpu_device_check_arguments(struct amdgpu_device *adev) { if (amdgpu_sched_jobs < 4) { dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", @@ -1060,19 +1188,9 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev) amdgpu_device_check_block_size(adev); - if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 || - !is_power_of_2(amdgpu_vram_page_split))) { - dev_warn(adev->dev, "invalid VRAM page split (%d)\n", - amdgpu_vram_page_split); - amdgpu_vram_page_split = 1024; - } - - if (amdgpu_lockup_timeout == 0) { - dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n"); - amdgpu_lockup_timeout = 10000; - } - adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); + + return 0; } #ifdef __linux__ @@ -1088,8 +1206,9 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev) static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); + int r; - if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF) + if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF) return; if (state == VGA_SWITCHEROO_ON) { @@ -1097,7 +1216,12 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - amdgpu_device_resume(dev, true, true); + pci_set_power_state(dev->pdev, PCI_D0); + pci_restore_state(dev->pdev); + r = pci_enable_device(dev->pdev); + if (r) + DRM_WARN("pci_enable_device failed (%d)\n", r); + amdgpu_device_resume(dev, true); dev->switch_power_state = DRM_SWITCH_POWER_ON; drm_kms_helper_poll_enable(dev); @@ -1105,7 +1229,11 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero pr_info("amdgpu: switched off\n"); drm_kms_helper_poll_disable(dev); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - amdgpu_device_suspend(dev, true, true); + amdgpu_device_suspend(dev, true); + pci_save_state(dev->pdev); + /* Shut down the device */ + pci_disable_device(dev->pdev); + pci_set_power_state(dev->pdev, PCI_D3cold); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } @@ -1128,7 +1256,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) * locking inversion with the driver load path. And the access here is * completely racy anyway. So don't bother with locking for now. */ - return dev->open_count == 0; + return atomic_read(&dev->open_count) == 0; } static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { @@ -1464,11 +1592,28 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) chip_name = "vega12"; break; case CHIP_RAVEN: - if (adev->pdev->device == 0x15d8) + if (adev->rev_id >= 8) + chip_name = "raven2"; + else if (adev->pdev->device == 0x15d8) chip_name = "picasso"; else chip_name = "raven"; break; + case CHIP_ARCTURUS: + chip_name = "arcturus"; + break; + case CHIP_RENOIR: + chip_name = "renoir"; + break; + case CHIP_NAVI10: + chip_name = "navi10"; + break; + case CHIP_NAVI14: + chip_name = "navi14"; + break; + case CHIP_NAVI12: + chip_name = "navi12"; + break; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); @@ -1497,6 +1642,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) + goto parse_soc_bounding_box; + adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); @@ -1515,6 +1663,27 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); + if (hdr->version_minor >= 1) { + const struct gpu_info_firmware_v1_1 *gpu_info_fw = + (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + adev->gfx.config.num_sc_per_sh = + le32_to_cpu(gpu_info_fw->num_sc_per_sh); + adev->gfx.config.num_packer_per_sc = + le32_to_cpu(gpu_info_fw->num_packer_per_sc); + } + +parse_soc_bounding_box: + /* + * soc bounding box info is not integrated in disocovery table, + * we always need to parse it from gpu info firmware. + */ + if (hdr->version_minor == 2) { + const struct gpu_info_firmware_v1_2 *gpu_info_fw = + (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; + } break; } default: @@ -1594,7 +1763,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: - if (adev->asic_type == CHIP_RAVEN) + case CHIP_ARCTURUS: + case CHIP_RENOIR: + if (adev->asic_type == CHIP_RAVEN || + adev->asic_type == CHIP_RENOIR) adev->family = AMDGPU_FAMILY_RV; else adev->family = AMDGPU_FAMILY_AI; @@ -1603,6 +1775,15 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (r) return r; break; + case CHIP_NAVI10: + case CHIP_NAVI14: + case CHIP_NAVI12: + adev->family = AMDGPU_FAMILY_NV; + + r = nv_set_ip_blocks(adev); + if (r) + return r; + break; default: /* FIXME: not supported yet */ return -EINVAL; @@ -1612,6 +1793,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (r) return r; + if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) + amdgpu_discovery_get_gfx_info(adev); + amdgpu_amdkfd_device_probe(adev); if (amdgpu_sriov_vf(adev)) { @@ -1620,7 +1804,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) return -EAGAIN; } - adev->powerplay.pp_feature = amdgpu_pp_feature_mask; + adev->pm.pp_feature = amdgpu_pp_feature_mask; + if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; for (i = 0; i < adev->num_ip_blocks; i++) { if ((amdgpu_ip_block_mask & (1 << i)) == 0) { @@ -1643,6 +1829,19 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) adev->ip_blocks[i].status.valid = true; } } + /* get the vbios after the asic_funcs are set up */ + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { + /* Read BIOS */ + if (!amdgpu_get_bios(adev)) + return -EINVAL; + + r = amdgpu_atombios_init(adev); + if (r) { + dev_err(adev->dev, "amdgpu_atombios_init failed\n"); + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); + return r; + } + } } adev->cg_flags &= amdgpu_cg_mask; @@ -1651,6 +1850,94 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) return 0; } +static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) +{ + int i, r; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.sw) + continue; + if (adev->ip_blocks[i].status.hw) + continue; + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || + (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { + r = adev->ip_blocks[i].version->funcs->hw_init(adev); + if (r) { + DRM_ERROR("hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + adev->ip_blocks[i].status.hw = true; + } + } + + return 0; +} + +static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) +{ + int i, r; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.sw) + continue; + if (adev->ip_blocks[i].status.hw) + continue; + r = adev->ip_blocks[i].version->funcs->hw_init(adev); + if (r) { + DRM_ERROR("hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + adev->ip_blocks[i].status.hw = true; + } + + return 0; +} + +static int amdgpu_device_fw_loading(struct amdgpu_device *adev) +{ + int r = 0; + int i; + uint32_t smu_version; + + if (adev->asic_type >= CHIP_VEGA10) { + for (i = 0; i < adev->num_ip_blocks; i++) { + if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) + continue; + + /* no need to do the fw loading again if already done*/ + if (adev->ip_blocks[i].status.hw == true) + break; + + if (adev->in_gpu_reset || adev->in_suspend) { + r = adev->ip_blocks[i].version->funcs->resume(adev); + if (r) { + DRM_ERROR("resume of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + } else { + r = adev->ip_blocks[i].version->funcs->hw_init(adev); + if (r) { + DRM_ERROR("hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + } + + adev->ip_blocks[i].status.hw = true; + break; + } + } + + if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA) + r = amdgpu_pm_load_smu_firmware(adev, &smu_version); + + return r; +} + /** * amdgpu_device_ip_init - run init for hardware IPs * @@ -1666,6 +1953,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) { int i, r; + r = amdgpu_ras_init(adev); + if (r) + return r; + for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) continue; @@ -1673,7 +1964,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (r) { DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); - return r; + goto init_failed; } adev->ip_blocks[i].status.sw = true; @@ -1682,53 +1973,81 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) r = amdgpu_device_vram_scratch_init(adev); if (r) { DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); - return r; + goto init_failed; } r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); if (r) { DRM_ERROR("hw_init %d failed %d\n", i, r); - return r; + goto init_failed; } r = amdgpu_device_wb_init(adev); if (r) { DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); - return r; + goto init_failed; } adev->ip_blocks[i].status.hw = true; /* right after GMC hw init, we create CSA */ - if (amdgpu_sriov_vf(adev)) { - r = amdgpu_allocate_static_csa(adev); + if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { + r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_CSA_SIZE); if (r) { DRM_ERROR("allocate CSA failed %d\n", r); - return r; + goto init_failed; } } } } - for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.sw) - continue; - if (adev->ip_blocks[i].status.hw) - continue; - r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); - if (r) { - DRM_ERROR("hw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - return r; - } - adev->ip_blocks[i].status.hw = true; + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_init_data_exchange(adev); + + r = amdgpu_ib_pool_init(adev); + if (r) { + dev_err(adev->dev, "IB initialization failed (%d).\n", r); + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); + goto init_failed; } + r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/ + if (r) + goto init_failed; + + r = amdgpu_device_ip_hw_init_phase1(adev); + if (r) + goto init_failed; + + r = amdgpu_device_fw_loading(adev); + if (r) + goto init_failed; + + r = amdgpu_device_ip_hw_init_phase2(adev); + if (r) + goto init_failed; + + /* + * retired pages will be loaded from eeprom and reserved here, + * it should be called after amdgpu_device_ip_hw_init_phase2 since + * for some ASICs the RAS EEPROM code relies on SMU fully functioning + * for I2C communication which only true at this point. + * recovery_init may fail, but it can free all resources allocated by + * itself and its failure should not stop amdgpu init process. + * + * Note: theoretically, this should be called before all vram allocations + * to protect retired page from abusing + */ + amdgpu_ras_recovery_init(adev); + + if (adev->gmc.xgmi.num_physical_nodes > 1) + amdgpu_xgmi_add_device(adev); amdgpu_amdkfd_device_init(adev); - if (amdgpu_sriov_vf(adev)) { - amdgpu_virt_init_data_exchange(adev); +init_failed: + if (amdgpu_sriov_vf(adev)) amdgpu_virt_release_full_gpu(adev, true); - } - return 0; + return r; } /** @@ -1757,39 +2076,60 @@ static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) */ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) { - return !!memcmp(adev->gart.ptr, adev->reset_magic, - AMDGPU_RESET_MAGIC_NUM); + if (memcmp(adev->gart.ptr, adev->reset_magic, + AMDGPU_RESET_MAGIC_NUM)) + return true; + + if (!adev->in_gpu_reset) + return false; + + /* + * For all ASICs with baco/mode1 reset, the VRAM is + * always assumed to be lost. + */ + switch (amdgpu_asic_reset_method(adev)) { + case AMD_RESET_METHOD_BACO: + case AMD_RESET_METHOD_MODE1: + return true; + default: + return false; + } } /** - * amdgpu_device_ip_late_set_cg_state - late init for clockgating + * amdgpu_device_set_cg_state - set clockgating for amdgpu device * * @adev: amdgpu_device pointer + * @state: clockgating state (gate or ungate) * - * Late initialization pass enabling clockgating for hardware IPs. * The list of all the hardware IPs that make up the asic is walked and the - * set_clockgating_state callbacks are run. This stage is run late - * in the init process. + * set_clockgating_state callbacks are run. + * Late initialization pass enabling clockgating for hardware IPs. + * Fini or suspend, pass disabling clockgating for hardware IPs. * Returns 0 on success, negative error code on failure. */ -static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) + +static int amdgpu_device_set_cg_state(struct amdgpu_device *adev, + enum amd_clockgating_state state) { - int i = 0, r; + int i, j, r; if (amdgpu_emu_mode == 1) return 0; - for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) + for (j = 0; j < adev->num_ip_blocks; j++) { + i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; + if (!adev->ip_blocks[i].status.late_initialized) continue; /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && adev->ip_blocks[i].version->funcs->set_clockgating_state) { /* enable clockgating to save power */ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_GATE); + state); if (r) { DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -1801,24 +2141,26 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) return 0; } -static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev) +static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state) { - int i = 0, r; + int i, j, r; if (amdgpu_emu_mode == 1) return 0; - for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) + for (j = 0; j < adev->num_ip_blocks; j++) { + i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; + if (!adev->ip_blocks[i].status.late_initialized) continue; /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && adev->ip_blocks[i].version->funcs->set_powergating_state) { /* enable powergating to save power */ r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, - AMD_PG_STATE_GATE); + state); if (r) { DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -1829,6 +2171,43 @@ static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev) return 0; } +static int amdgpu_device_enable_mgpu_fan_boost(void) +{ + struct amdgpu_gpu_instance *gpu_ins; + struct amdgpu_device *adev; + int i, ret = 0; + + mutex_lock(&mgpu_info.mutex); + + /* + * MGPU fan boost feature should be enabled + * only when there are two or more dGPUs in + * the system + */ + if (mgpu_info.num_dgpu < 2) + goto out; + + for (i = 0; i < mgpu_info.num_dgpu; i++) { + gpu_ins = &(mgpu_info.gpu_ins[i]); + adev = gpu_ins->adev; + if (!(adev->flags & AMD_IS_APU) && + !gpu_ins->mgpu_fan_enabled && + adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->enable_mgpu_fan_boost) { + ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); + if (ret) + break; + + gpu_ins->mgpu_fan_enabled = 1; + } + } + +out: + mutex_unlock(&mgpu_info.mutex); + + return ret; +} + /** * amdgpu_device_ip_late_init - run late init for hardware IPs * @@ -1843,10 +2222,11 @@ static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev) */ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) { + struct amdgpu_gpu_instance *gpu_instance; int i = 0, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) + if (!adev->ip_blocks[i].status.hw) continue; if (adev->ip_blocks[i].version->funcs->late_init) { r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); @@ -1855,18 +2235,53 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) adev->ip_blocks[i].version->funcs->name, r); return r; } - adev->ip_blocks[i].status.late_initialized = true; } + adev->ip_blocks[i].status.late_initialized = true; } - amdgpu_device_ip_late_set_cg_state(adev); - amdgpu_device_ip_late_set_pg_state(adev); - - queue_delayed_work(system_wq, &adev->late_init_work, - msecs_to_jiffies(AMDGPU_RESUME_MS)); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); amdgpu_device_fill_reset_magic(adev); + r = amdgpu_device_enable_mgpu_fan_boost(); + if (r) + DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); + + + if (adev->gmc.xgmi.num_physical_nodes > 1) { + mutex_lock(&mgpu_info.mutex); + + /* + * Reset device p-state to low as this was booted with high. + * + * This should be performed only after all devices from the same + * hive get initialized. + * + * However, it's unknown how many device in the hive in advance. + * As this is counted one by one during devices initializations. + * + * So, we wait for all XGMI interlinked devices initialized. + * This may bring some delays as those devices may come from + * different hives. But that should be OK. + */ + if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { + for (i = 0; i < mgpu_info.num_gpu; i++) { + gpu_instance = &(mgpu_info.gpu_ins[i]); + if (gpu_instance->adev->flags & AMD_IS_APU) + continue; + + r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0); + if (r) { + DRM_ERROR("pstate setting failed (%d).\n", r); + break; + } + } + } + + mutex_unlock(&mgpu_info.mutex); + } + return 0; } @@ -1885,23 +2300,21 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) { int i, r; + amdgpu_ras_pre_fini(adev); + + if (adev->gmc.xgmi.num_physical_nodes > 1) + amdgpu_xgmi_remove_device(adev); + amdgpu_amdkfd_device_fini(adev); + + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); + /* need to disable SMC first */ for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.hw) continue; - if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC && - adev->ip_blocks[i].version->funcs->set_clockgating_state) { - /* ungate blocks before hw fini so that we can shutdown the blocks safely */ - r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_UNGATE); - if (r) { - DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - return r; - } - if (adev->powerplay.pp_funcs->set_powergating_by_smu) - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false); + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); /* XXX handle errors */ if (r) { @@ -1917,20 +2330,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.hw) continue; - if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && - adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && - adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && - adev->ip_blocks[i].version->funcs->set_clockgating_state) { - /* ungate blocks before hw fini so that we can shutdown the blocks safely */ - r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_UNGATE); - if (r) { - DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - return r; - } - } - r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); /* XXX handle errors */ if (r) { @@ -1947,9 +2346,11 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { - amdgpu_free_static_csa(adev); + amdgpu_ucode_free_bo(adev); + amdgpu_free_static_csa(&adev->virt.csa_obj); amdgpu_device_wb_fini(adev); amdgpu_device_vram_scratch_fini(adev); + amdgpu_ib_pool_fini(adev); } r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); @@ -1970,6 +2371,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) adev->ip_blocks[i].status.late_initialized = false; } + amdgpu_ras_fini(adev); + if (amdgpu_sriov_vf(adev)) if (amdgpu_virt_release_full_gpu(adev, false)) DRM_ERROR("failed to release exclusive mode on fini\n"); @@ -1978,18 +2381,14 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) } /** - * amdgpu_device_ip_late_init_func_handler - work handler for clockgating - * - * @work: work_struct + * amdgpu_device_delayed_init_work_handler - work handler for IB tests * - * Work handler for amdgpu_device_ip_late_set_cg_state. We put the - * clockgating setup into a worker thread to speed up driver init and - * resume from suspend. + * @work: work_struct. */ -static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) +static void amdgpu_device_delayed_init_work_handler(struct work_struct *work) { struct amdgpu_device *adev = - container_of(work, struct amdgpu_device, late_init_work.work); + container_of(work, struct amdgpu_device, delayed_init_work.work); int r; r = amdgpu_ib_ring_tests(adev); @@ -1997,6 +2396,19 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) DRM_ERROR("ib ring test failed (%d).\n", r); } +static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) +{ + struct amdgpu_device *adev = + container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); + + mutex_lock(&adev->gfx.gfx_off_mutex); + if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) { + if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) + adev->gfx.gfx_off_state = true; + } + mutex_unlock(&adev->gfx.gfx_off_mutex); +} + /** * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1) * @@ -2012,36 +2424,26 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) { int i, r; - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_request_full_gpu(adev, false); + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; /* displays are handled separately */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { - /* ungate blocks so that suspend can properly shut them down */ - if (adev->ip_blocks[i].version->funcs->set_clockgating_state) { - r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_UNGATE); - if (r) { - DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } - } /* XXX handle errors */ r = adev->ip_blocks[i].version->funcs->suspend(adev); /* XXX handle errors */ if (r) { DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); + return r; } + adev->ip_blocks[i].status.hw = false; } } - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_release_full_gpu(adev, false); - return 0; } @@ -2060,35 +2462,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) { int i, r; - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_request_full_gpu(adev, false); - - /* ungate SMC block first */ - r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC, - AMD_CG_STATE_UNGATE); - if (r) { - DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r); - } - - /* call smu to disable gfx off feature first when suspend */ - if (adev->powerplay.pp_funcs->set_powergating_by_smu) - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false); - for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; /* displays are handled in phase1 */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) continue; - /* ungate blocks so that suspend can properly shut them down */ - if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC && - adev->ip_blocks[i].version->funcs->set_clockgating_state) { - r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_UNGATE); - if (r) { - DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } + /* PSP lost connection when err_event_athub occurs */ + if (amdgpu_ras_intr_triggered() && + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { + adev->ip_blocks[i].status.hw = false; + continue; } /* XXX handle errors */ r = adev->ip_blocks[i].version->funcs->suspend(adev); @@ -2097,11 +2481,21 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); } + adev->ip_blocks[i].status.hw = false; + /* handle putting the SMC in the appropriate state */ + if(!amdgpu_sriov_vf(adev)){ + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { + r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); + if (r) { + DRM_ERROR("SMC failed to set mp1 state %d, %d\n", + adev->mp1_state, r); + return r; + } + } + } + adev->ip_blocks[i].status.hw = false; } - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_release_full_gpu(adev, false); - return 0; } @@ -2120,11 +2514,17 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev) { int r; + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_request_full_gpu(adev, false); + r = amdgpu_device_ip_suspend_phase1(adev); if (r) return r; r = amdgpu_device_ip_suspend_phase2(adev); + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_release_full_gpu(adev, false); + return r; } @@ -2146,14 +2546,16 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) for (j = 0; j < adev->num_ip_blocks; j++) { block = &adev->ip_blocks[j]; + block->status.hw = false; if (block->version->type != ip_order[i] || !block->status.valid) continue; r = block->version->funcs->hw_init(adev); - DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); + DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); if (r) return r; + block->status.hw = true; } } @@ -2170,7 +2572,8 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_GFX, AMD_IP_BLOCK_TYPE_SDMA, AMD_IP_BLOCK_TYPE_UVD, - AMD_IP_BLOCK_TYPE_VCE + AMD_IP_BLOCK_TYPE_VCE, + AMD_IP_BLOCK_TYPE_VCN }; for (i = 0; i < ARRAY_SIZE(ip_order); i++) { @@ -2181,13 +2584,19 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) block = &adev->ip_blocks[j]; if (block->version->type != ip_order[i] || - !block->status.valid) + !block->status.valid || + block->status.hw) continue; - r = block->version->funcs->hw_init(adev); - DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); + if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) + r = block->version->funcs->resume(adev); + else + r = block->version->funcs->hw_init(adev); + + DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); if (r) return r; + block->status.hw = true; } } @@ -2211,17 +2620,19 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) int i, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) + if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { + r = adev->ip_blocks[i].version->funcs->resume(adev); if (r) { DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); return r; } + adev->ip_blocks[i].status.hw = true; } } @@ -2246,11 +2657,12 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) int i, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) + if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) continue; r = adev->ip_blocks[i].version->funcs->resume(adev); if (r) { @@ -2258,6 +2670,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) adev->ip_blocks[i].version->funcs->name, r); return r; } + adev->ip_blocks[i].status.hw = true; } return 0; @@ -2282,6 +2695,11 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) r = amdgpu_device_ip_resume_phase1(adev); if (r) return r; + + r = amdgpu_device_fw_loading(adev); + if (r) + return r; + r = amdgpu_device_ip_resume_phase2(adev); return r; @@ -2346,12 +2764,19 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case CHIP_RAVEN: + case CHIP_NAVI10: + case CHIP_NAVI14: + case CHIP_NAVI12: + case CHIP_RENOIR: #endif return amdgpu_dc != 0; #endif default: + if (amdgpu_dc > 0) + DRM_INFO("Display Core has been requested via kernel parameter " + "but isn't supported by ASIC, ignoring\n"); return false; } } @@ -2371,6 +2796,121 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) return amdgpu_device_asic_has_dc_support(adev->asic_type); } + +static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) +{ + struct amdgpu_device *adev = + container_of(__work, struct amdgpu_device, xgmi_reset_work); + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); + + /* It's a bug to not have a hive within this function */ + if (WARN_ON(!hive)) + return; + + /* + * Use task barrier to synchronize all xgmi reset works across the + * hive. task_barrier_enter and task_barrier_exit will block + * until all the threads running the xgmi reset works reach + * those points. task_barrier_full will do both blocks. + */ + if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { + + task_barrier_enter(&hive->tb); + adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev); + + if (adev->asic_reset_res) + goto fail; + + task_barrier_exit(&hive->tb); + adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev); + + if (adev->asic_reset_res) + goto fail; + + if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count) + adev->mmhub.funcs->reset_ras_error_count(adev); + } else { + + task_barrier_full(&hive->tb); + adev->asic_reset_res = amdgpu_asic_reset(adev); + } + +fail: + if (adev->asic_reset_res) + DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", + adev->asic_reset_res, adev->ddev->unique); +} + +static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) +{ + char *input = amdgpu_lockup_timeout; + char *timeout_setting = NULL; + int index = 0; + long timeout; + int ret = 0; + + /* + * By default timeout for non compute jobs is 10000. + * And there is no timeout enforced on compute jobs. + * In SR-IOV or passthrough mode, timeout for compute + * jobs are 10000 by default. + */ + adev->gfx_timeout = msecs_to_jiffies(10000); + adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; + if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) + adev->compute_timeout = adev->gfx_timeout; + else + adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; + +#ifdef notyet + if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { + while ((timeout_setting = strsep(&input, ",")) && + strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { + ret = kstrtol(timeout_setting, 0, &timeout); + if (ret) + return ret; + + if (timeout == 0) { + index++; + continue; + } else if (timeout < 0) { + timeout = MAX_SCHEDULE_TIMEOUT; + } else { + timeout = msecs_to_jiffies(timeout); + } + + switch (index++) { + case 0: + adev->gfx_timeout = timeout; + break; + case 1: + adev->compute_timeout = timeout; + break; + case 2: + adev->sdma_timeout = timeout; + break; + case 3: + adev->video_timeout = timeout; + break; + default: + break; + } + } + /* + * There is only one value specified and + * it should apply to all non-compute jobs. + */ + if (index == 1) { + adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; + if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) + adev->compute_timeout = adev->gfx_timeout; + } + } +#endif + + return ret; +} + /** * amdgpu_device_init - initialize the driver * @@ -2389,7 +2929,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, uint32_t flags) { int r, i; - bool runtime = false; + bool boco = false; u32 max_MBps; adev->shutdown = false; @@ -2399,17 +2939,22 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->ddev = ddev; adev->pdev = pdev; adev->flags = flags; - adev->asic_type = flags & AMD_ASIC_MASK; + + if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST) + adev->asic_type = amdgpu_force_asic_type; + else + adev->asic_type = flags & AMD_ASIC_MASK; + adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; if (amdgpu_emu_mode == 1) - adev->usec_timeout *= 2; + adev->usec_timeout *= 10; adev->gmc.gart_size = 512 * 1024 * 1024; adev->accel_working = false; adev->num_rings = 0; adev->mman.buffer_funcs = NULL; adev->mman.buffer_funcs_ring = NULL; adev->vm_manager.vm_pte_funcs = NULL; - adev->vm_manager.vm_pte_num_rings = 0; + adev->vm_manager.vm_pte_num_scheds = 0; adev->gmc.gmc_funcs = NULL; adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); @@ -2420,6 +2965,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->pcie_wreg = &amdgpu_invalid_wreg; adev->pciep_rreg = &amdgpu_invalid_rreg; adev->pciep_wreg = &amdgpu_invalid_wreg; + adev->pcie_rreg64 = &amdgpu_invalid_rreg64; + adev->pcie_wreg64 = &amdgpu_invalid_wreg64; adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; adev->didt_rreg = &amdgpu_invalid_rreg; @@ -2441,13 +2988,18 @@ int amdgpu_device_init(struct amdgpu_device *adev, rw_init(&adev->gfx.gpu_clock_mutex, "gfxclk"); rw_init(&adev->srbm_mutex, "srbm"); rw_init(&adev->gfx.pipe_reserve_mutex, "pipers"); + rw_init(&adev->gfx.gfx_off_mutex, "gfxoff"); rw_init(&adev->grbm_idx_mutex, "grbmidx"); rw_init(&adev->mn_lock, "agpumn"); rw_init(&adev->virt.vf_errors.lock, "vferr"); hash_init(adev->mn_hash); rw_init(&adev->lock_reset, "aglkrst"); + rw_init(&adev->psp.mutex, "agpsp"); + rw_init(&adev->notifier_lock, "agnf"); - amdgpu_device_check_arguments(adev); + r = amdgpu_device_check_arguments(adev); + if (r) + return r; mtx_init(&adev->mmio_idx_lock, IPL_TTY); mtx_init(&adev->smc_idx_lock, IPL_TTY); @@ -2465,9 +3017,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&adev->ring_lru_list); mtx_init(&adev->ring_lru_list_lock, IPL_TTY); - INIT_DELAYED_WORK(&adev->late_init_work, - amdgpu_device_ip_late_init_func_handler); + INIT_DELAYED_WORK(&adev->delayed_init_work, + amdgpu_device_delayed_init_work_handler); + INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, + amdgpu_device_delay_enable_gfx_off); + + INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); + adev->gfx.gfx_off_req_count = 1; adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false; #ifdef __linux__ @@ -2489,9 +3046,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); - /* doorbell bar mapping */ - amdgpu_device_doorbell_init(adev); - /* io port mapping */ #ifdef __linux__ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { @@ -2505,13 +3059,51 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("PCI I/O BAR is not found.\n"); #endif + /* enable PCIE atomic ops */ +#ifdef notyet + r = pci_enable_atomic_ops_to_root(adev->pdev, + PCI_EXP_DEVCAP2_ATOMIC_COMP32 | + PCI_EXP_DEVCAP2_ATOMIC_COMP64); + if (r) { + adev->have_atomics_support = false; + DRM_INFO("PCIE atomic ops is not supported\n"); + } else { + adev->have_atomics_support = true; + } +#else + adev->have_atomics_support = false; +#endif + amdgpu_device_get_pcie_info(adev); + if (amdgpu_mcbp) + DRM_INFO("MCBP is enabled\n"); + + if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10) + adev->enable_mes = true; + + if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) { + r = amdgpu_discovery_init(adev); + if (r) { + dev_err(adev->dev, "amdgpu_discovery_init failed\n"); + return r; + } + } + /* early init functions */ r = amdgpu_device_ip_early_init(adev); if (r) return r; + r = amdgpu_device_get_job_timeout_settings(adev); + if (r) { + dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); + return r; + } + + /* doorbell bar mapping and doorbell index init*/ + amdgpu_device_doorbell_init(adev); + /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ /* this will fail for cards that aren't VGA class devices, just * ignore it */ @@ -2519,15 +3111,16 @@ int amdgpu_device_init(struct amdgpu_device *adev, vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); #endif - if (amdgpu_device_is_px(ddev)) - runtime = true; -#ifdef notyet - if (!pci_is_thunderbolt_attached(adev->pdev)) + if (amdgpu_device_supports_boco(ddev)) + boco = true; + if (amdgpu_has_atpx() && + (amdgpu_is_atpx_hybrid() || + amdgpu_has_atpx_dgpu_power_cntl()) && + !pci_is_thunderbolt_attached(adev->pdev)) vga_switcheroo_register_client(adev->pdev, - &amdgpu_switcheroo_ops, runtime); - if (runtime) + &amdgpu_switcheroo_ops, boco); + if (boco) vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); -#endif if (amdgpu_emu_mode == 1) { /* post the asic on emulation mode */ @@ -2535,22 +3128,20 @@ int amdgpu_device_init(struct amdgpu_device *adev, goto fence_driver_init; } - /* Read BIOS */ - if (!amdgpu_get_bios(adev)) { - r = -EINVAL; - goto failed; - } - - r = amdgpu_atombios_init(adev); - if (r) { - dev_err(adev->dev, "amdgpu_atombios_init failed\n"); - amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); - goto failed; - } - /* detect if we are with an SRIOV vbios */ amdgpu_device_detect_sriov_bios(adev); + /* check if we need to reset the asic + * E.g., driver was not cleanly unloaded previously, etc. + */ + if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { + r = amdgpu_asic_reset(adev); + if (r) { + dev_err(adev->dev, "asic reset on init failed\n"); + goto failed; + } + } + /* Post card if necessary */ if (amdgpu_device_need_post(adev)) { if (!adev->bios) { @@ -2618,6 +3209,14 @@ fence_driver_init: goto failed; } + DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n", + adev->gfx.config.max_shader_engines, + adev->gfx.config.max_sh_per_se, + adev->gfx.config.max_cu_per_sh, + adev->gfx.cu_info.number); + + amdgpu_ctx_init_sched(adev); + adev->accel_working = true; amdgpu_vm_check_compute_bug(adev); @@ -2630,34 +3229,21 @@ fence_driver_init: /* Get a log2 for easy divisions. */ adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); - r = amdgpu_ib_pool_init(adev); - if (r) { - dev_err(adev->dev, "IB initialization failed (%d).\n", r); - amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); - goto failed; - } - amdgpu_fbdev_init(adev); r = amdgpu_pm_sysfs_init(adev); - if (r) + if (r) { + adev->pm_sysfs_en = false; DRM_ERROR("registering pm debugfs failed (%d).\n", r); + } else + adev->pm_sysfs_en = true; - r = amdgpu_debugfs_gem_init(adev); - if (r) - DRM_ERROR("registering gem debugfs failed (%d).\n", r); - - r = amdgpu_debugfs_regs_init(adev); - if (r) - DRM_ERROR("registering register debugfs failed (%d).\n", r); - - r = amdgpu_debugfs_firmware_init(adev); - if (r) - DRM_ERROR("registering firmware debugfs failed (%d).\n", r); - - r = amdgpu_debugfs_init(adev); - if (r) - DRM_ERROR("Creating debugfs files failed (%d).\n", r); + r = amdgpu_ucode_sysfs_init(adev); + if (r) { + adev->ucode_sysfs_en = false; + DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); + } else + adev->ucode_sysfs_en = true; if ((amdgpu_testing & 1)) { if (adev->accel_working) @@ -2672,6 +3258,13 @@ fence_driver_init: DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n"); } + /* + * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost. + * Otherwise the mgpu fan boost feature will be skipped due to the + * gpu instance is counted less. + */ + amdgpu_register_gpu_instance(adev); + /* enable clockgating, etc. after ib tests, etc. since some blocks require * explicit gating rather than handling it automatically. */ @@ -2682,11 +3275,28 @@ fence_driver_init: goto failed; } + /* must succeed. */ + amdgpu_ras_resume(adev); + + queue_delayed_work(system_wq, &adev->delayed_init_work, + msecs_to_jiffies(AMDGPU_RESUME_MS)); + + r = device_create_file(adev->dev, &dev_attr_pcie_replay_count); + if (r) { + dev_err(adev->dev, "Could not create pcie_replay_count"); + return r; + } + + if (IS_ENABLED(CONFIG_PERF_EVENTS)) + r = amdgpu_pmu_init(adev); + if (r) + dev_err(adev->dev, "amdgpu_pmu_init failed\n"); + return 0; failed: amdgpu_vf_error_trans_all(adev); - if (runtime) + if (boco) vga_switcheroo_fini_domain_pm_ops(adev->dev); return r; @@ -2705,18 +3315,26 @@ void amdgpu_device_fini(struct amdgpu_device *adev) int r; DRM_INFO("amdgpu: finishing device.\n"); + flush_delayed_work(&adev->delayed_init_work); adev->shutdown = true; + + /* make sure IB test finished before entering exclusive mode + * to avoid preemption on IB test + * */ + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_request_full_gpu(adev, false); + /* disable all interrupts */ amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized){ if (!amdgpu_device_has_dc_support(adev)) - drm_crtc_force_disable_all(adev->ddev); + drm_helper_force_disable_all(adev->ddev); else drm_atomic_helper_shutdown(adev->ddev); } - amdgpu_ib_pool_fini(adev); amdgpu_fence_driver_fini(adev); - amdgpu_pm_sysfs_fini(adev); + if (adev->pm_sysfs_en) + amdgpu_pm_sysfs_fini(adev); amdgpu_fbdev_fini(adev); r = amdgpu_device_ip_fini(adev); if (adev->firmware.gpu_info_fw) { @@ -2724,7 +3342,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->firmware.gpu_info_fw = NULL; } adev->accel_working = false; - cancel_delayed_work_sync(&adev->late_init_work); /* free i2c buses */ if (!amdgpu_device_has_dc_support(adev)) amdgpu_i2c_fini(adev); @@ -2734,9 +3351,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev) kfree(adev->bios); adev->bios = NULL; - if (!pci_is_thunderbolt_attached(adev->pdev)) + if (amdgpu_has_atpx() && + (amdgpu_is_atpx_hybrid() || + amdgpu_has_atpx_dgpu_power_cntl()) && + !pci_is_thunderbolt_attached(adev->pdev)) vga_switcheroo_unregister_client(adev->pdev); - if (adev->flags & AMD_IS_PX) + if (amdgpu_device_supports_boco(adev->ddev)) vga_switcheroo_fini_domain_pm_ops(adev->dev); vga_client_register(adev->pdev, NULL, NULL, NULL); #ifdef __linux__ @@ -2757,7 +3377,14 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->rmmio_size = 0; #endif amdgpu_device_doorbell_fini(adev); - amdgpu_debugfs_regs_cleanup(adev); + + device_remove_file(adev->dev, &dev_attr_pcie_replay_count); + if (adev->ucode_sysfs_en) + amdgpu_ucode_sysfs_fini(adev); + if (IS_ENABLED(CONFIG_PERF_EVENTS)) + amdgpu_pmu_fini(adev); + if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) + amdgpu_discovery_fini(adev); } @@ -2775,11 +3402,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev) * Returns 0 for success or an error on failure. * Called at driver suspend. */ -int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) +int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) { struct amdgpu_device *adev; struct drm_crtc *crtc; struct drm_connector *connector; + struct drm_connector_list_iter iter; int r; if (dev == NULL || dev->dev_private == NULL) { @@ -2795,17 +3423,22 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) return 0; #endif + adev->in_suspend = true; drm_kms_helper_poll_disable(dev); if (fbcon) amdgpu_fbdev_set_suspend(adev, 1); + cancel_delayed_work_sync(&adev->delayed_init_work); + if (!amdgpu_device_has_dc_support(adev)) { /* turn off display hw */ drm_modeset_lock_all(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - } + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) + drm_helper_connector_dpms(connector, + DRM_MODE_DPMS_OFF); + drm_connector_list_iter_end(&iter); drm_modeset_unlock_all(dev); /* unpin the front buffers and cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -2813,7 +3446,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) struct drm_framebuffer *fb = crtc->primary->fb; struct amdgpu_bo *robj; - if (amdgpu_crtc->cursor_bo) { + if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); r = amdgpu_bo_reserve(aobj, true); if (r == 0) { @@ -2837,10 +3470,12 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) } } - amdgpu_amdkfd_suspend(adev); + amdgpu_ras_suspend(adev); r = amdgpu_device_ip_suspend_phase1(adev); + amdgpu_amdkfd_suspend(adev, !fbcon); + /* evict vram memory */ amdgpu_bo_evict_vram(adev); @@ -2854,17 +3489,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) */ amdgpu_bo_evict_vram(adev); - pci_save_state(dev->pdev); - if (suspend) { - /* Shut down the device */ - pci_disable_device(dev->pdev); - pci_set_power_state(dev->pdev, PCI_D3hot); - } else { - r = amdgpu_asic_reset(adev); - if (r) - DRM_ERROR("amdgpu asic reset failed\n"); - } - return 0; } @@ -2879,9 +3503,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) * Returns 0 for success or an error on failure. * Called at driver resume. */ -int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) +int amdgpu_device_resume(struct drm_device *dev, bool fbcon) { struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_device *adev = dev->dev_private; struct drm_crtc *crtc; int r = 0; @@ -2891,14 +3516,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) return 0; #endif - if (resume) { - pci_set_power_state(dev->pdev, PCI_D0); - pci_restore_state(dev->pdev); - r = pci_enable_device(dev->pdev); - if (r) - return r; - } - /* post card */ if (amdgpu_device_need_post(adev)) { r = amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -2918,12 +3535,15 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) if (r) return r; + queue_delayed_work(system_wq, &adev->delayed_init_work, + msecs_to_jiffies(AMDGPU_RESUME_MS)); + if (!amdgpu_device_has_dc_support(adev)) { /* pin cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - if (amdgpu_crtc->cursor_bo) { + if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); r = amdgpu_bo_reserve(aobj, true); if (r == 0) { @@ -2936,12 +3556,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) } } } - r = amdgpu_amdkfd_resume(adev); + r = amdgpu_amdkfd_resume(adev, !fbcon); if (r) return r; /* Make sure IB tests flushed */ - flush_delayed_work(&adev->late_init_work); + flush_delayed_work(&adev->delayed_init_work); /* blat the mode back in */ if (fbcon) { @@ -2951,9 +3571,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) /* turn on display hw */ drm_modeset_lock_all(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - } + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) + drm_helper_connector_dpms(connector, + DRM_MODE_DPMS_ON); + drm_connector_list_iter_end(&iter); + drm_modeset_unlock_all(dev); } amdgpu_fbdev_set_suspend(adev, 0); @@ -2961,6 +3585,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) drm_kms_helper_poll_enable(dev); + amdgpu_ras_resume(adev); + /* * Most of the connector probing functions try to acquire runtime pm * refs to ensure that the GPU is powered on when connector polling is @@ -2980,6 +3606,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) #if defined(CONFIG_PM) && defined(__linux__) dev->dev->power.disable_depth--; #endif + adev->in_suspend = false; + return 0; } @@ -3138,71 +3766,22 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) } /** - * amdgpu_device_recover_vram_from_shadow - restore shadowed VRAM buffers - * - * @adev: amdgpu_device pointer - * @ring: amdgpu_ring for the engine handling the buffer operations - * @bo: amdgpu_bo buffer whose shadow is being restored - * @fence: dma_fence associated with the operation - * - * Restores the VRAM buffer contents from the shadow in GTT. Used to - * restore things like GPUVM page tables after a GPU reset where - * the contents of VRAM might be lost. - * Returns 0 on success, negative error code on failure. - */ -static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_bo *bo, - struct dma_fence **fence) -{ - uint32_t domain; - int r; - - if (!bo->shadow) - return 0; - - r = amdgpu_bo_reserve(bo, true); - if (r) - return r; - domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); - /* if bo has been evicted, then no need to recover */ - if (domain == AMDGPU_GEM_DOMAIN_VRAM) { - r = amdgpu_bo_validate(bo->shadow); - if (r) { - DRM_ERROR("bo validate failed!\n"); - goto err; - } - - r = amdgpu_bo_restore_from_shadow(adev, ring, bo, - NULL, fence, true); - if (r) { - DRM_ERROR("recover page table failed!\n"); - goto err; - } - } -err: - amdgpu_bo_unreserve(bo); - return r; -} - -/** - * amdgpu_device_handle_vram_lost - Handle the loss of VRAM contents + * amdgpu_device_recover_vram - Recover some VRAM contents * * @adev: amdgpu_device pointer * * Restores the contents of VRAM buffers from the shadows in GTT. Used to * restore things like GPUVM page tables after a GPU reset where * the contents of VRAM might be lost. - * Returns 0 on success, 1 on failure. + * + * Returns: + * 0 on success, negative error code on failure. */ -static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev) +static int amdgpu_device_recover_vram(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - struct amdgpu_bo *bo, *tmp; struct dma_fence *fence = NULL, *next = NULL; - long r = 1; - int i = 0; - long tmo; + struct amdgpu_bo *shadow; + long r = 1, tmo; if (amdgpu_sriov_runtime(adev)) tmo = msecs_to_jiffies(8000); @@ -3211,123 +3790,49 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev) DRM_INFO("recover vram bo from shadow start\n"); mutex_lock(&adev->shadow_list_lock); - list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { - next = NULL; - amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next); + list_for_each_entry(shadow, &adev->shadow_list, shadow_list) { + + /* No need to recover an evicted BO */ + if (shadow->tbo.mem.mem_type != TTM_PL_TT || + shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET || + shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) + continue; + + r = amdgpu_bo_restore_shadow(shadow, &next); + if (r) + break; + if (fence) { - r = dma_fence_wait_timeout(fence, false, tmo); - if (r == 0) - pr_err("wait fence %p[%d] timeout\n", fence, i); - else if (r < 0) - pr_err("wait fence %p[%d] interrupted\n", fence, i); - if (r < 1) { - dma_fence_put(fence); - fence = next; + tmo = dma_fence_wait_timeout(fence, false, tmo); + dma_fence_put(fence); + fence = next; + if (tmo == 0) { + r = -ETIMEDOUT; + break; + } else if (tmo < 0) { + r = tmo; break; } - i++; + } else { + fence = next; } - - dma_fence_put(fence); - fence = next; } mutex_unlock(&adev->shadow_list_lock); - if (fence) { - r = dma_fence_wait_timeout(fence, false, tmo); - if (r == 0) - pr_err("wait fence %p[%d] timeout\n", fence, i); - else if (r < 0) - pr_err("wait fence %p[%d] interrupted\n", fence, i); - - } + if (fence) + tmo = dma_fence_wait_timeout(fence, false, tmo); dma_fence_put(fence); - if (r > 0) - DRM_INFO("recover vram bo from shadow done\n"); - else - DRM_ERROR("recover vram bo from shadow failed\n"); - - return (r > 0) ? 0 : 1; -} - -/** - * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough - * - * @adev: amdgpu device pointer - * - * attempt to do soft-reset or full-reset and reinitialize Asic - * return 0 means succeeded otherwise failed - */ -static int amdgpu_device_reset(struct amdgpu_device *adev) -{ - bool need_full_reset, vram_lost = 0; - int r; - - need_full_reset = amdgpu_device_ip_need_full_reset(adev); - - if (!need_full_reset) { - amdgpu_device_ip_pre_soft_reset(adev); - r = amdgpu_device_ip_soft_reset(adev); - amdgpu_device_ip_post_soft_reset(adev); - if (r || amdgpu_device_ip_check_soft_reset(adev)) { - DRM_INFO("soft reset failed, will fallback to full reset!\n"); - need_full_reset = true; - } - } - - if (need_full_reset) { - r = amdgpu_device_ip_suspend(adev); - -retry: - r = amdgpu_asic_reset(adev); - /* post card */ - amdgpu_atom_asic_init(adev->mode_info.atom_context); - - if (!r) { - dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); - r = amdgpu_device_ip_resume_phase1(adev); - if (r) - goto out; - - vram_lost = amdgpu_device_check_vram_lost(adev); - if (vram_lost) { - DRM_ERROR("VRAM is lost!\n"); - atomic_inc(&adev->vram_lost_counter); - } - - r = amdgpu_gtt_mgr_recover( - &adev->mman.bdev.man[TTM_PL_TT]); - if (r) - goto out; - - r = amdgpu_device_ip_resume_phase2(adev); - if (r) - goto out; - - if (vram_lost) - amdgpu_device_fill_reset_magic(adev); - } - } - -out: - if (!r) { - amdgpu_irq_gpu_reset_resume_helper(adev); - r = amdgpu_ib_ring_tests(adev); - if (r) { - dev_err(adev->dev, "ib ring test failed (%d).\n", r); - r = amdgpu_device_ip_suspend(adev); - need_full_reset = true; - goto retry; - } + if (r < 0 || tmo <= 0) { + DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); + return -EIO; } - if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost)) - r = amdgpu_device_handle_vram_lost(adev); - - return r; + DRM_INFO("recover vram bo from shadow done\n"); + return 0; } + /** * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf * @@ -3354,9 +3859,14 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) goto error; + amdgpu_virt_init_data_exchange(adev); /* we need recover gart prior to run SMC/CP/SDMA resume */ amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); + r = amdgpu_device_fw_loading(adev); + if (r) + return r; + /* now we are okay to resume SMC/CP/SDMA */ r = amdgpu_device_ip_reinit_late_sriov(adev); if (r) @@ -3364,55 +3874,79 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, amdgpu_irq_gpu_reset_resume_helper(adev); r = amdgpu_ib_ring_tests(adev); + amdgpu_amdkfd_post_reset(adev); error: - amdgpu_virt_init_data_exchange(adev); amdgpu_virt_release_full_gpu(adev, true); if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { - atomic_inc(&adev->vram_lost_counter); - r = amdgpu_device_handle_vram_lost(adev); + amdgpu_inc_vram_lost(adev); + r = amdgpu_device_recover_vram(adev); } return r; } /** - * amdgpu_device_gpu_recover - reset the asic and recover scheduler + * amdgpu_device_should_recover_gpu - check if we should try GPU recovery * * @adev: amdgpu device pointer - * @job: which job trigger hang - * @force: forces reset regardless of amdgpu_gpu_recovery * - * Attempt to reset the GPU if it has hung (all asics). - * Returns 0 for success or an error on failure. + * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover + * a hung GPU. */ -int amdgpu_device_gpu_recover(struct amdgpu_device *adev, - struct amdgpu_job *job, bool force) +bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) { - int i, r, resched; - - if (!force && !amdgpu_device_ip_check_soft_reset(adev)) { - DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); - return 0; + if (!amdgpu_device_ip_check_soft_reset(adev)) { + DRM_INFO("Timeout, but no hardware hang detected.\n"); + return false; } - if (!force && (amdgpu_gpu_recovery == 0 || - (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) { - DRM_INFO("GPU recovery disabled.\n"); - return 0; + if (amdgpu_gpu_recovery == 0) + goto disabled; + + if (amdgpu_sriov_vf(adev)) + return true; + + if (amdgpu_gpu_recovery == -1) { + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_TOPAZ: + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_VEGAM: + case CHIP_VEGA20: + case CHIP_VEGA10: + case CHIP_VEGA12: + case CHIP_RAVEN: + case CHIP_ARCTURUS: + case CHIP_RENOIR: + case CHIP_NAVI10: + case CHIP_NAVI14: + case CHIP_NAVI12: + break; + default: + goto disabled; + } } - dev_info(adev->dev, "GPU reset begin!\n"); + return true; - mutex_lock(&adev->lock_reset); - atomic_inc(&adev->gpu_reset_counter); - adev->in_gpu_reset = 1; +disabled: + DRM_INFO("GPU recovery disabled.\n"); + return false; +} - /* Block kfd */ - amdgpu_amdkfd_pre_reset(adev); - /* block TTM */ - resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); +static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, + struct amdgpu_job *job, + bool *need_full_reset_arg) +{ + int i, r = 0; + bool need_full_reset = *need_full_reset_arg; /* block all schedulers and reset given job's ring */ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -3421,57 +3955,437 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, if (!ring || !ring->sched.thread) continue; - kthread_park(ring->sched.thread); + /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ + amdgpu_fence_driver_force_completion(ring); + } + + if(job) + drm_sched_increase_karma(&job->base); - if (job && job->base.sched == &ring->sched) - continue; + /* Don't suspend on bare metal if we are not going to HW reset the ASIC */ + if (!amdgpu_sriov_vf(adev)) { - drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL); + if (!need_full_reset) + need_full_reset = amdgpu_device_ip_need_full_reset(adev); - /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ - amdgpu_fence_driver_force_completion(ring); + if (!need_full_reset) { + amdgpu_device_ip_pre_soft_reset(adev); + r = amdgpu_device_ip_soft_reset(adev); + amdgpu_device_ip_post_soft_reset(adev); + if (r || amdgpu_device_ip_check_soft_reset(adev)) { + DRM_INFO("soft reset failed, will fallback to full reset!\n"); + need_full_reset = true; + } + } + + if (need_full_reset) + r = amdgpu_device_ip_suspend(adev); + + *need_full_reset_arg = need_full_reset; } - if (amdgpu_sriov_vf(adev)) - r = amdgpu_device_reset_sriov(adev, job ? false : true); - else - r = amdgpu_device_reset(adev); + return r; +} - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; +static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, + struct list_head *device_list_handle, + bool *need_full_reset_arg) +{ + struct amdgpu_device *tmp_adev = NULL; + bool need_full_reset = *need_full_reset_arg, vram_lost = false; + int r = 0; - if (!ring || !ring->sched.thread) - continue; + /* + * ASIC reset has to be done on all HGMI hive nodes ASAP + * to allow proper links negotiation in FW (within 1 sec) + */ + if (need_full_reset) { + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + /* For XGMI run all resets in parallel to speed up the process */ + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) + r = -EALREADY; + } else + r = amdgpu_asic_reset(tmp_adev); + + if (r) { + DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", + r, tmp_adev->ddev->unique); + break; + } + } + + /* For XGMI wait for all resets to complete before proceed */ + if (!r) { + list_for_each_entry(tmp_adev, device_list_handle, + gmc.xgmi.head) { + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + flush_work(&tmp_adev->xgmi_reset_work); + r = tmp_adev->asic_reset_res; + if (r) + break; + } + } + } + } + + if (!r && amdgpu_ras_intr_triggered()) { + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + if (tmp_adev->mmhub.funcs && + tmp_adev->mmhub.funcs->reset_ras_error_count) + tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev); + } + + amdgpu_ras_intr_cleared(); + } + + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + if (need_full_reset) { + /* post card */ + if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context)) + DRM_WARN("asic atom init failed!"); + + if (!r) { + dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); + r = amdgpu_device_ip_resume_phase1(tmp_adev); + if (r) + goto out; + + vram_lost = amdgpu_device_check_vram_lost(tmp_adev); + if (vram_lost) { + DRM_INFO("VRAM is lost due to GPU reset!\n"); + amdgpu_inc_vram_lost(tmp_adev); + } + + r = amdgpu_gtt_mgr_recover( + &tmp_adev->mman.bdev.man[TTM_PL_TT]); + if (r) + goto out; + + r = amdgpu_device_fw_loading(tmp_adev); + if (r) + return r; + + r = amdgpu_device_ip_resume_phase2(tmp_adev); + if (r) + goto out; + + if (vram_lost) + amdgpu_device_fill_reset_magic(tmp_adev); + + /* + * Add this ASIC as tracked as reset was already + * complete successfully. + */ + amdgpu_register_gpu_instance(tmp_adev); + + r = amdgpu_device_ip_late_init(tmp_adev); + if (r) + goto out; + + amdgpu_fbdev_set_suspend(tmp_adev, 0); + + /* must succeed. */ + amdgpu_ras_resume(tmp_adev); + + /* Update PSP FW topology after reset */ + if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1) + r = amdgpu_xgmi_update_topology(hive, tmp_adev); + } + } + + +out: + if (!r) { + amdgpu_irq_gpu_reset_resume_helper(tmp_adev); + r = amdgpu_ib_ring_tests(tmp_adev); + if (r) { + dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); + r = amdgpu_device_ip_suspend(tmp_adev); + need_full_reset = true; + r = -EAGAIN; + goto end; + } + } + + if (!r) + r = amdgpu_device_recover_vram(tmp_adev); + else + tmp_adev->asic_reset_res = r; + } + +end: + *need_full_reset_arg = need_full_reset; + return r; +} + +static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock) +{ + if (trylock) { + if (!mutex_trylock(&adev->lock_reset)) + return false; + } else + mutex_lock(&adev->lock_reset); + + atomic_inc(&adev->gpu_reset_counter); + adev->in_gpu_reset = true; + switch (amdgpu_asic_reset_method(adev)) { + case AMD_RESET_METHOD_MODE1: + adev->mp1_state = PP_MP1_STATE_SHUTDOWN; + break; + case AMD_RESET_METHOD_MODE2: + adev->mp1_state = PP_MP1_STATE_RESET; + break; + default: + adev->mp1_state = PP_MP1_STATE_NONE; + break; + } + + return true; +} + +static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) +{ + amdgpu_vf_error_trans_all(adev); + adev->mp1_state = PP_MP1_STATE_NONE; + adev->in_gpu_reset = false; + mutex_unlock(&adev->lock_reset); +} + +/** + * amdgpu_device_gpu_recover - reset the asic and recover scheduler + * + * @adev: amdgpu device pointer + * @job: which job trigger hang + * + * Attempt to reset the GPU if it has hung (all asics). + * Attempt to do soft-reset or full-reset and reinitialize Asic + * Returns 0 for success or an error on failure. + */ + +int amdgpu_device_gpu_recover(struct amdgpu_device *adev, + struct amdgpu_job *job) +{ + struct list_head device_list, *device_list_handle = NULL; + bool need_full_reset, job_signaled; + struct amdgpu_hive_info *hive = NULL; + struct amdgpu_device *tmp_adev = NULL; + int i, r = 0; + bool in_ras_intr = amdgpu_ras_intr_triggered(); + bool use_baco = + (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ? + true : false; + + /* + * Flush RAM to disk so that after reboot + * the user can read log and see why the system rebooted. + */ + if (in_ras_intr && !use_baco && amdgpu_ras_get_context(adev)->reboot) { + + DRM_WARN("Emergency reboot."); + +#ifdef notyet + ksys_sync_helper(); + emergency_restart(); +#else + panic("emergency_restart"); +#endif + } + + need_full_reset = job_signaled = false; + INIT_LIST_HEAD(&device_list); + + dev_info(adev->dev, "GPU %s begin!\n", + (in_ras_intr && !use_baco) ? "jobs stop":"reset"); + + cancel_delayed_work_sync(&adev->delayed_init_work); + + hive = amdgpu_get_xgmi_hive(adev, false); + + /* + * Here we trylock to avoid chain of resets executing from + * either trigger by jobs on different adevs in XGMI hive or jobs on + * different schedulers for same device while this TO handler is running. + * We always reset all schedulers for device and all devices for XGMI + * hive so that should take care of them too. + */ + + if (hive && !mutex_trylock(&hive->reset_lock)) { + DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", + job ? job->base.id : -1, hive->hive_id); + return 0; + } + + /* Start with adev pre asic reset first for soft reset check.*/ + if (!amdgpu_device_lock_adev(adev, !hive)) { + DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress", + job ? job->base.id : -1); + return 0; + } + + /* Block kfd: SRIOV would do it separately */ + if (!amdgpu_sriov_vf(adev)) + amdgpu_amdkfd_pre_reset(adev); + + /* Build list of devices to reset */ + if (adev->gmc.xgmi.num_physical_nodes > 1) { + if (!hive) { + /*unlock kfd: SRIOV would do it separately */ + if (!amdgpu_sriov_vf(adev)) + amdgpu_amdkfd_post_reset(adev); + amdgpu_device_unlock_adev(adev); + return -ENODEV; + } + + /* + * In case we are in XGMI hive mode device reset is done for all the + * nodes in the hive to retrain all XGMI links and hence the reset + * sequence is executed in loop on all nodes. + */ + device_list_handle = &hive->device_list; + } else { + list_add_tail(&adev->gmc.xgmi.head, &device_list); + device_list_handle = &device_list; + } + + /* block all schedulers and reset given job's ring */ + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + if (tmp_adev != adev) { + amdgpu_device_lock_adev(tmp_adev, false); + if (!amdgpu_sriov_vf(tmp_adev)) + amdgpu_amdkfd_pre_reset(tmp_adev); + } - /* only need recovery sched of the given job's ring - * or all rings (in the case @job is NULL) - * after above amdgpu_reset accomplished + /* + * Mark these ASICs to be reseted as untracked first + * And add them back after reset completed */ - if ((!job || job->base.sched == &ring->sched) && !r) - drm_sched_job_recovery(&ring->sched); + amdgpu_unregister_gpu_instance(tmp_adev); + + amdgpu_fbdev_set_suspend(adev, 1); + + /* disable ras on ALL IPs */ + if (!(in_ras_intr && !use_baco) && + amdgpu_device_ip_need_full_reset(tmp_adev)) + amdgpu_ras_suspend(tmp_adev); + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_ring *ring = tmp_adev->rings[i]; - kthread_unpark(ring->sched.thread); + if (!ring || !ring->sched.thread) + continue; + + drm_sched_stop(&ring->sched, job ? &job->base : NULL); + + if (in_ras_intr && !use_baco) + amdgpu_job_stop_all_jobs_on_sched(&ring->sched); + } } - if (!amdgpu_device_has_dc_support(adev)) { - drm_helper_resume_force_mode(adev->ddev); + + if (in_ras_intr && !use_baco) + goto skip_sched_resume; + + /* + * Must check guilty signal here since after this point all old + * HW fences are force signaled. + * + * job->base holds a reference to parent fence + */ + if (job && job->base.s_fence->parent && + dma_fence_is_signaled(job->base.s_fence->parent)) + job_signaled = true; + + if (job_signaled) { + dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); + goto skip_hw_reset; } - ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); + /* Guilty job will be freed after this*/ + r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset); if (r) { - /* bad news, how to tell it to userspace ? */ - dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter)); - amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); + /*TODO Should we stop ?*/ + DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", + r, adev->ddev->unique); + adev->asic_reset_res = r; + } + +retry: /* Rest of adevs pre asic reset from XGMI hive. */ + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + + if (tmp_adev == adev) + continue; + + r = amdgpu_device_pre_asic_reset(tmp_adev, + NULL, + &need_full_reset); + /*TODO Should we stop ?*/ + if (r) { + DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", + r, tmp_adev->ddev->unique); + tmp_adev->asic_reset_res = r; + } + } + + /* Actual ASIC resets if needed.*/ + /* TODO Implement XGMI hive reset logic for SRIOV */ + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_device_reset_sriov(adev, job ? false : true); + if (r) + adev->asic_reset_res = r; } else { - dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter)); + r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset); + if (r && r == -EAGAIN) + goto retry; } - /*unlock kfd */ - amdgpu_amdkfd_post_reset(adev); - amdgpu_vf_error_trans_all(adev); - adev->in_gpu_reset = 0; - mutex_unlock(&adev->lock_reset); +skip_hw_reset: + + /* Post ASIC reset for all devs .*/ + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_ring *ring = tmp_adev->rings[i]; + + if (!ring || !ring->sched.thread) + continue; + + /* No point to resubmit jobs if we didn't HW reset*/ + if (!tmp_adev->asic_reset_res && !job_signaled) + drm_sched_resubmit_jobs(&ring->sched); + + drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); + } + + if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { + drm_helper_resume_force_mode(tmp_adev->ddev); + } + + tmp_adev->asic_reset_res = 0; + + if (r) { + /* bad news, how to tell it to userspace ? */ + dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter)); + amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); + } else { + dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); + } + } + +skip_sched_resume: + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + /*unlock kfd: SRIOV would do it separately */ + if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev)) + amdgpu_amdkfd_post_reset(tmp_adev); + amdgpu_device_unlock_adev(tmp_adev); + } + + if (hive) + mutex_unlock(&hive->reset_lock); + + if (r) + dev_info(adev->dev, "GPU reset end with ret = %d\n", r); return r; } @@ -3487,8 +4401,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) { struct pci_dev *pdev; - enum pci_bus_speed speed_cap; - enum pcie_link_width link_width; + enum pci_bus_speed speed_cap, platform_speed_cap; + enum pcie_link_width platform_link_width; if (amdgpu_pcie_gen_cap) adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; @@ -3505,6 +4419,12 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) return; } + if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) + return; + + pcie_bandwidth_available(adev->pdev, NULL, + &platform_speed_cap, &platform_link_width); + if (adev->pm.pcie_gen_mask == 0) { /* asic caps */ pdev = adev->pdev; @@ -3530,22 +4450,20 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; } /* platform caps */ - pdev = adev->ddev->pdev->bus->self; - speed_cap = pcie_get_speed_cap(pdev); - if (speed_cap == PCI_SPEED_UNKNOWN) { + if (platform_speed_cap == PCI_SPEED_UNKNOWN) { adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); } else { - if (speed_cap == PCIE_SPEED_16_0GT) + if (platform_speed_cap == PCIE_SPEED_16_0GT) adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4); - else if (speed_cap == PCIE_SPEED_8_0GT) + else if (platform_speed_cap == PCIE_SPEED_8_0GT) adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3); - else if (speed_cap == PCIE_SPEED_5_0GT) + else if (platform_speed_cap == PCIE_SPEED_5_0GT) adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); else @@ -3554,12 +4472,10 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) } } if (adev->pm.pcie_mlw_mask == 0) { - pdev = adev->ddev->pdev->bus->self; - link_width = pcie_get_width_cap(pdev); - if (link_width == PCIE_LNK_WIDTH_UNKNOWN) { + if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) { adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; } else { - switch (link_width) { + switch (platform_link_width) { case PCIE_LNK_X32: adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | @@ -3609,3 +4525,35 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) } } +int amdgpu_device_baco_enter(struct drm_device *dev) +{ + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + if (!amdgpu_device_supports_baco(adev->ddev)) + return -ENOTSUPP; + + if (ras && ras->supported) + adev->nbio.funcs->enable_doorbell_interrupt(adev, false); + + return amdgpu_dpm_baco_enter(adev); +} + +int amdgpu_device_baco_exit(struct drm_device *dev) +{ + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + int ret = 0; + + if (!amdgpu_device_supports_baco(adev->ddev)) + return -ENOTSUPP; + + ret = amdgpu_dpm_baco_exit(adev); + if (ret) + return ret; + + if (ras && ras->supported) + adev->nbio.funcs->enable_doorbell_interrupt(adev, true); + + return 0; +} |