diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 457 |
1 files changed, 341 insertions, 116 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index d04d0b123212..3b7e7af09ead 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -25,18 +25,19 @@ #include "gmc_v9_0.h" #include "amdgpu_atomfirmware.h" -#include "vega10/soc15ip.h" -#include "vega10/HDP/hdp_4_0_offset.h" -#include "vega10/HDP/hdp_4_0_sh_mask.h" -#include "vega10/GC/gc_9_0_sh_mask.h" -#include "vega10/DC/dce_12_0_offset.h" -#include "vega10/DC/dce_12_0_sh_mask.h" -#include "vega10/vega10_enum.h" - +#include "hdp/hdp_4_0_offset.h" +#include "hdp/hdp_4_0_sh_mask.h" +#include "gc/gc_9_0_sh_mask.h" +#include "dce/dce_12_0_offset.h" +#include "dce/dce_12_0_sh_mask.h" +#include "vega10_enum.h" +#include "mmhub/mmhub_1_0_offset.h" +#include "athub/athub_1_0_offset.h" + +#include "soc15.h" #include "soc15_common.h" +#include "umc/umc_6_0_sh_mask.h" -#include "nbio_v6_1.h" -#include "nbio_v7_0.h" #include "gfxhub_v1_0.h" #include "mmhub_v1_0.h" @@ -71,13 +72,140 @@ static const u32 golden_settings_vega10_hdp[] = 0xf6e, 0x0fffffff, 0x00000000, }; +static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = +{ + SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa), + SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565) +}; + +static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = +{ + SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800), + SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008) +}; + +/* Ecc related register addresses, (BASE + reg offset) */ +/* Universal Memory Controller caps (may be fused). */ +/* UMCCH:UmcLocalCap */ +#define UMCLOCALCAPS_ADDR0 (0x00014306 + 0x00000000) +#define UMCLOCALCAPS_ADDR1 (0x00014306 + 0x00000800) +#define UMCLOCALCAPS_ADDR2 (0x00014306 + 0x00001000) +#define UMCLOCALCAPS_ADDR3 (0x00014306 + 0x00001800) +#define UMCLOCALCAPS_ADDR4 (0x00054306 + 0x00000000) +#define UMCLOCALCAPS_ADDR5 (0x00054306 + 0x00000800) +#define UMCLOCALCAPS_ADDR6 (0x00054306 + 0x00001000) +#define UMCLOCALCAPS_ADDR7 (0x00054306 + 0x00001800) +#define UMCLOCALCAPS_ADDR8 (0x00094306 + 0x00000000) +#define UMCLOCALCAPS_ADDR9 (0x00094306 + 0x00000800) +#define UMCLOCALCAPS_ADDR10 (0x00094306 + 0x00001000) +#define UMCLOCALCAPS_ADDR11 (0x00094306 + 0x00001800) +#define UMCLOCALCAPS_ADDR12 (0x000d4306 + 0x00000000) +#define UMCLOCALCAPS_ADDR13 (0x000d4306 + 0x00000800) +#define UMCLOCALCAPS_ADDR14 (0x000d4306 + 0x00001000) +#define UMCLOCALCAPS_ADDR15 (0x000d4306 + 0x00001800) + +/* Universal Memory Controller Channel config. */ +/* UMCCH:UMC_CONFIG */ +#define UMCCH_UMC_CONFIG_ADDR0 (0x00014040 + 0x00000000) +#define UMCCH_UMC_CONFIG_ADDR1 (0x00014040 + 0x00000800) +#define UMCCH_UMC_CONFIG_ADDR2 (0x00014040 + 0x00001000) +#define UMCCH_UMC_CONFIG_ADDR3 (0x00014040 + 0x00001800) +#define UMCCH_UMC_CONFIG_ADDR4 (0x00054040 + 0x00000000) +#define UMCCH_UMC_CONFIG_ADDR5 (0x00054040 + 0x00000800) +#define UMCCH_UMC_CONFIG_ADDR6 (0x00054040 + 0x00001000) +#define UMCCH_UMC_CONFIG_ADDR7 (0x00054040 + 0x00001800) +#define UMCCH_UMC_CONFIG_ADDR8 (0x00094040 + 0x00000000) +#define UMCCH_UMC_CONFIG_ADDR9 (0x00094040 + 0x00000800) +#define UMCCH_UMC_CONFIG_ADDR10 (0x00094040 + 0x00001000) +#define UMCCH_UMC_CONFIG_ADDR11 (0x00094040 + 0x00001800) +#define UMCCH_UMC_CONFIG_ADDR12 (0x000d4040 + 0x00000000) +#define UMCCH_UMC_CONFIG_ADDR13 (0x000d4040 + 0x00000800) +#define UMCCH_UMC_CONFIG_ADDR14 (0x000d4040 + 0x00001000) +#define UMCCH_UMC_CONFIG_ADDR15 (0x000d4040 + 0x00001800) + +/* Universal Memory Controller Channel Ecc config. */ +/* UMCCH:EccCtrl */ +#define UMCCH_ECCCTRL_ADDR0 (0x00014053 + 0x00000000) +#define UMCCH_ECCCTRL_ADDR1 (0x00014053 + 0x00000800) +#define UMCCH_ECCCTRL_ADDR2 (0x00014053 + 0x00001000) +#define UMCCH_ECCCTRL_ADDR3 (0x00014053 + 0x00001800) +#define UMCCH_ECCCTRL_ADDR4 (0x00054053 + 0x00000000) +#define UMCCH_ECCCTRL_ADDR5 (0x00054053 + 0x00000800) +#define UMCCH_ECCCTRL_ADDR6 (0x00054053 + 0x00001000) +#define UMCCH_ECCCTRL_ADDR7 (0x00054053 + 0x00001800) +#define UMCCH_ECCCTRL_ADDR8 (0x00094053 + 0x00000000) +#define UMCCH_ECCCTRL_ADDR9 (0x00094053 + 0x00000800) +#define UMCCH_ECCCTRL_ADDR10 (0x00094053 + 0x00001000) +#define UMCCH_ECCCTRL_ADDR11 (0x00094053 + 0x00001800) +#define UMCCH_ECCCTRL_ADDR12 (0x000d4053 + 0x00000000) +#define UMCCH_ECCCTRL_ADDR13 (0x000d4053 + 0x00000800) +#define UMCCH_ECCCTRL_ADDR14 (0x000d4053 + 0x00001000) +#define UMCCH_ECCCTRL_ADDR15 (0x000d4053 + 0x00001800) + +static const uint32_t ecc_umclocalcap_addrs[] = { + UMCLOCALCAPS_ADDR0, + UMCLOCALCAPS_ADDR1, + UMCLOCALCAPS_ADDR2, + UMCLOCALCAPS_ADDR3, + UMCLOCALCAPS_ADDR4, + UMCLOCALCAPS_ADDR5, + UMCLOCALCAPS_ADDR6, + UMCLOCALCAPS_ADDR7, + UMCLOCALCAPS_ADDR8, + UMCLOCALCAPS_ADDR9, + UMCLOCALCAPS_ADDR10, + UMCLOCALCAPS_ADDR11, + UMCLOCALCAPS_ADDR12, + UMCLOCALCAPS_ADDR13, + UMCLOCALCAPS_ADDR14, + UMCLOCALCAPS_ADDR15, +}; + +static const uint32_t ecc_umcch_umc_config_addrs[] = { + UMCCH_UMC_CONFIG_ADDR0, + UMCCH_UMC_CONFIG_ADDR1, + UMCCH_UMC_CONFIG_ADDR2, + UMCCH_UMC_CONFIG_ADDR3, + UMCCH_UMC_CONFIG_ADDR4, + UMCCH_UMC_CONFIG_ADDR5, + UMCCH_UMC_CONFIG_ADDR6, + UMCCH_UMC_CONFIG_ADDR7, + UMCCH_UMC_CONFIG_ADDR8, + UMCCH_UMC_CONFIG_ADDR9, + UMCCH_UMC_CONFIG_ADDR10, + UMCCH_UMC_CONFIG_ADDR11, + UMCCH_UMC_CONFIG_ADDR12, + UMCCH_UMC_CONFIG_ADDR13, + UMCCH_UMC_CONFIG_ADDR14, + UMCCH_UMC_CONFIG_ADDR15, +}; + +static const uint32_t ecc_umcch_eccctrl_addrs[] = { + UMCCH_ECCCTRL_ADDR0, + UMCCH_ECCCTRL_ADDR1, + UMCCH_ECCCTRL_ADDR2, + UMCCH_ECCCTRL_ADDR3, + UMCCH_ECCCTRL_ADDR4, + UMCCH_ECCCTRL_ADDR5, + UMCCH_ECCCTRL_ADDR6, + UMCCH_ECCCTRL_ADDR7, + UMCCH_ECCCTRL_ADDR8, + UMCCH_ECCCTRL_ADDR9, + UMCCH_ECCCTRL_ADDR10, + UMCCH_ECCCTRL_ADDR11, + UMCCH_ECCCTRL_ADDR12, + UMCCH_ECCCTRL_ADDR13, + UMCCH_ECCCTRL_ADDR14, + UMCCH_ECCCTRL_ADDR15, +}; + static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) { struct amdgpu_vmhub *hub; - u32 tmp, reg, bits, i; + u32 tmp, reg, bits, i, j; bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | @@ -89,43 +217,26 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - /* MM HUB */ - hub = &adev->vmhub[AMDGPU_MMHUB]; - for (i = 0; i< 16; i++) { - reg = hub->vm_context0_cntl + i; - tmp = RREG32(reg); - tmp &= ~bits; - WREG32(reg, tmp); - } - - /* GFX HUB */ - hub = &adev->vmhub[AMDGPU_GFXHUB]; - for (i = 0; i < 16; i++) { - reg = hub->vm_context0_cntl + i; - tmp = RREG32(reg); - tmp &= ~bits; - WREG32(reg, tmp); + for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) { + hub = &adev->vmhub[j]; + for (i = 0; i < 16; i++) { + reg = hub->vm_context0_cntl + i; + tmp = RREG32(reg); + tmp &= ~bits; + WREG32(reg, tmp); + } } break; case AMDGPU_IRQ_STATE_ENABLE: - /* MM HUB */ - hub = &adev->vmhub[AMDGPU_MMHUB]; - for (i = 0; i< 16; i++) { - reg = hub->vm_context0_cntl + i; - tmp = RREG32(reg); - tmp |= bits; - WREG32(reg, tmp); - } - - /* GFX HUB */ - hub = &adev->vmhub[AMDGPU_GFXHUB]; - for (i = 0; i < 16; i++) { - reg = hub->vm_context0_cntl + i; - tmp = RREG32(reg); - tmp |= bits; - WREG32(reg, tmp); + for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) { + hub = &adev->vmhub[j]; + for (i = 0; i < 16; i++) { + reg = hub->vm_context0_cntl + i; + tmp = RREG32(reg); + tmp |= bits; + WREG32(reg, tmp); + } } - break; default: break; } @@ -137,7 +248,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src]; + struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; uint32_t status = 0; u64 addr; @@ -151,9 +262,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, if (printk_ratelimit()) { dev_err(adev->dev, - "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n", - entry->vm_id_src ? "mmhub" : "gfxhub", - entry->src_id, entry->ring_id, entry->vm_id, + "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pas_id:%u)\n", + entry->vmid_src ? "mmhub" : "gfxhub", + entry->src_id, entry->ring_id, entry->vmid, entry->pas_id); dev_err(adev->dev, " at page 0x%016llx from %d\n", addr, entry->client_id); @@ -177,13 +288,13 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs; } -static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id) +static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid) { u32 req = 0; - /* invalidate using legacy mode on vm_id*/ + /* invalidate using legacy mode on vmid*/ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, - PER_VMID_INVALIDATE_REQ, 1 << vm_id); + PER_VMID_INVALIDATE_REQ, 1 << vmid); req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0); req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); @@ -219,10 +330,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, unsigned i, j; /* flush hdp cache */ - if (adev->flags & AMD_IS_APU) - nbio_v7_0_hdp_flush(adev); - else - nbio_v6_1_hdp_flush(adev); + adev->nbio_funcs->hdp_flush(adev); spin_lock(&adev->mc.invalidate_lock); @@ -361,11 +469,28 @@ static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev, return pte_flag; } -static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr) +static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, + uint64_t *addr, uint64_t *flags) { - addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start; - BUG_ON(addr & 0xFFFF00000000003FULL); - return addr; + if (!(*flags & AMDGPU_PDE_PTE)) + *addr = adev->vm_manager.vram_base_offset + *addr - + adev->mc.vram_start; + BUG_ON(*addr & 0xFFFF00000000003FULL); + + if (!adev->mc.translate_further) + return; + + if (level == AMDGPU_VM_PDB1) { + /* Set the block fragment size */ + if (!(*flags & AMDGPU_PDE_PTE)) + *flags |= AMDGPU_PDE_BFS(0x9); + + } else if (level == AMDGPU_VM_PDB0) { + if (*flags & AMDGPU_PDE_PTE) + *flags &= ~AMDGPU_PDE_PTE; + else + *flags |= AMDGPU_PTE_TF; + } } static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = { @@ -389,14 +514,111 @@ static int gmc_v9_0_early_init(void *handle) gmc_v9_0_set_gart_funcs(adev); gmc_v9_0_set_irq_funcs(adev); + adev->mc.shared_aperture_start = 0x2000000000000000ULL; + adev->mc.shared_aperture_end = + adev->mc.shared_aperture_start + (4ULL << 30) - 1; + adev->mc.private_aperture_start = + adev->mc.shared_aperture_end + 1; + adev->mc.private_aperture_end = + adev->mc.private_aperture_start + (4ULL << 30) - 1; + return 0; } +static int gmc_v9_0_ecc_available(struct amdgpu_device *adev) +{ + uint32_t reg_val; + uint32_t reg_addr; + uint32_t field_val; + size_t i; + uint32_t fv2; + size_t lost_sheep; + + DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n"); + + lost_sheep = 0; + for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) { + reg_addr = ecc_umclocalcap_addrs[i]; + DRM_DEBUG("ecc: " + "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n", + i, reg_addr); + reg_val = RREG32(reg_addr); + field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap, + EccDis); + DRM_DEBUG("ecc: " + "reg_val: 0x%08x, " + "EccDis: 0x%08x, ", + reg_val, field_val); + if (field_val) { + DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n"); + ++lost_sheep; + } + } + + for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) { + reg_addr = ecc_umcch_umc_config_addrs[i]; + DRM_DEBUG("ecc: " + "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x", + i, reg_addr); + reg_val = RREG32(reg_addr); + field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG, + DramReady); + DRM_DEBUG("ecc: " + "reg_val: 0x%08x, " + "DramReady: 0x%08x\n", + reg_val, field_val); + + if (!field_val) { + DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n"); + ++lost_sheep; + } + } + + for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) { + reg_addr = ecc_umcch_eccctrl_addrs[i]; + DRM_DEBUG("ecc: " + "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ", + i, reg_addr); + reg_val = RREG32(reg_addr); + field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl, + WrEccEn); + fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl, + RdEccEn); + DRM_DEBUG("ecc: " + "reg_val: 0x%08x, " + "WrEccEn: 0x%08x, " + "RdEccEn: 0x%08x\n", + reg_val, field_val, fv2); + + if (!field_val) { + DRM_DEBUG("ecc: WrEccEn is not set\n"); + ++lost_sheep; + } + if (!fv2) { + DRM_DEBUG("ecc: RdEccEn is not set\n"); + ++lost_sheep; + } + } + + DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep); + return lost_sheep == 0; +} + static int gmc_v9_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 }; + /* + * The latest engine allocation on gfx9 is: + * Engine 0, 1: idle + * Engine 2, 3: firmware + * Engine 4~13: amdgpu ring, subject to change when ring number changes + * Engine 14~15: idle + * Engine 16: kfd tlb invalidation + * Engine 17: Gart flushes + */ + unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 }; unsigned i; + int r; for(i = 0; i < adev->num_rings; ++i) { struct amdgpu_ring *ring = adev->rings[i]; @@ -408,9 +630,21 @@ static int gmc_v9_0_late_init(void *handle) ring->funcs->vmhub); } - /* Engine 17 is used for GART flushes */ + /* Engine 16 is used for KFD and 17 for GART flushes */ for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) - BUG_ON(vm_inv_eng[i] > 17); + BUG_ON(vm_inv_eng[i] > 16); + + if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) { + r = gmc_v9_0_ecc_available(adev); + if (r == 1) { + DRM_INFO("ECC is active.\n"); + } else if (r == 0) { + DRM_INFO("ECC is not present.\n"); + } else { + DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r); + return r; + } + } return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); } @@ -421,8 +655,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, u64 base = 0; if (!amdgpu_sriov_vf(adev)) base = mmhub_v1_0_get_fb_location(adev); - amdgpu_vram_location(adev, &adev->mc, base); - amdgpu_gart_location(adev, mc); + amdgpu_device_vram_location(adev, &adev->mc, base); + amdgpu_device_gart_location(adev, mc); /* base offset of vram pages */ if (adev->flags & AMD_IS_APU) adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); @@ -443,11 +677,15 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) { u32 tmp; int chansize, numchan; + int r; adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); if (!adev->mc.vram_width) { /* hbm memory channel size */ - chansize = 128; + if (adev->flags & AMD_IS_APU) + chansize = 64; + else + chansize = 128; tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; @@ -485,17 +723,21 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) adev->mc.vram_width = numchan * chansize; } - /* Could aper size report 0 ? */ - adev->mc.aper_base = pci_resource_start(adev->pdev, 0); - adev->mc.aper_size = pci_resource_len(adev->pdev, 0); /* size in MB on si */ adev->mc.mc_vram_size = - ((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) : - nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL; + adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL; adev->mc.real_vram_size = adev->mc.mc_vram_size; - adev->mc.visible_vram_size = adev->mc.aper_size; + + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_device_resize_fb_bar(adev); + if (r) + return r; + } + adev->mc.aper_base = pci_resource_start(adev->pdev, 0); + adev->mc.aper_size = pci_resource_len(adev->pdev, 0); /* In case the PCI BAR is larger than the actual amount of vram */ + adev->mc.visible_vram_size = adev->mc.aper_size; if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; @@ -552,14 +794,12 @@ static int gmc_v9_0_sw_init(void *handle) case CHIP_RAVEN: adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { - adev->vm_manager.vm_size = 1U << 18; - adev->vm_manager.block_size = 9; - adev->vm_manager.num_level = 3; - amdgpu_vm_set_fragment_size(adev, 9); + amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); } else { - /* vm_size is 64GB for legacy 2-level page support */ - amdgpu_vm_adjust_size(adev, 64, 9); - adev->vm_manager.num_level = 1; + /* vm_size is 128TB + 512GB for legacy 3-level page support */ + amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48); + adev->mc.translate_further = + adev->vm_manager.num_level > 1; } break; case CHIP_VEGA10: @@ -570,20 +810,12 @@ static int gmc_v9_0_sw_init(void *handle) * vm size is 256TB (48bit), maximum size of Vega10, * block size 512 (9bit) */ - adev->vm_manager.vm_size = 1U << 18; - adev->vm_manager.block_size = 9; - adev->vm_manager.num_level = 3; - amdgpu_vm_set_fragment_size(adev, 9); + amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); break; default: break; } - DRM_INFO("vm size is %llu GB, block size is %u-bit,fragment size is %u-bit\n", - adev->vm_manager.vm_size, - adev->vm_manager.block_size, - adev->vm_manager.fragment_size); - /* This interrupt is VMC page fault.*/ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0, &adev->mc.vm_fault); @@ -593,8 +825,6 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; - adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; - /* Set the internal MC address mask * This is the max address of the GPU's * internal address space. @@ -654,7 +884,7 @@ static int gmc_v9_0_sw_init(void *handle) } /** - * gmc_v8_0_gart_fini - vm fini callback + * gmc_v9_0_gart_fini - vm fini callback * * @adev: amdgpu_device pointer * @@ -670,9 +900,9 @@ static int gmc_v9_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); gmc_v9_0_gart_fini(adev); - amdgpu_gem_force_release(adev); amdgpu_bo_fini(adev); return 0; @@ -680,10 +910,20 @@ static int gmc_v9_0_sw_fini(void *handle) static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) { + switch (adev->asic_type) { case CHIP_VEGA10: + soc15_program_register_sequence(adev, + golden_settings_mmhub_1_0_0, + ARRAY_SIZE(golden_settings_mmhub_1_0_0)); + soc15_program_register_sequence(adev, + golden_settings_athub_1_0_0, + ARRAY_SIZE(golden_settings_athub_1_0_0)); break; case CHIP_RAVEN: + soc15_program_register_sequence(adev, + golden_settings_athub_1_0_0, + ARRAY_SIZE(golden_settings_athub_1_0_0)); break; default: break; @@ -701,9 +941,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) bool value; u32 tmp; - amdgpu_program_register_sequence(adev, - golden_settings_vega10_hdp, - (const u32)ARRAY_SIZE(golden_settings_vega10_hdp)); + amdgpu_device_program_register_sequence(adev, + golden_settings_vega10_hdp, + ARRAY_SIZE(golden_settings_vega10_hdp)); if (adev->gart.robj == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); @@ -713,12 +953,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) if (r) return r; - /* After HDP is initialized, flush HDP.*/ - if (adev->flags & AMD_IS_APU) - nbio_v7_0_hdp_flush(adev); - else - nbio_v6_1_hdp_flush(adev); - switch (adev->asic_type) { case CHIP_RAVEN: mmhub_v1_0_initialize_power_gating(adev); @@ -736,13 +970,13 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) if (r) return r; - tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL); - tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK; - WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp); + WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); + /* After HDP is initialized, flush HDP.*/ + adev->nbio_funcs->hdp_flush(adev); if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) value = false; @@ -751,7 +985,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) gfxhub_v1_0_set_fault_enable_default(adev, value); mmhub_v1_0_set_fault_enable_default(adev, value); - gmc_v9_0_gart_flush_gpu_tlb(adev, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", @@ -770,17 +1003,11 @@ static int gmc_v9_0_hw_init(void *handle) gmc_v9_0_init_golden_registers(adev); if (adev->mode_info.num_crtc) { - u32 tmp; - /* Lockout access through VGA aperture*/ - tmp = RREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL); - tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); - WREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL, tmp); + WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); /* disable VGA render */ - tmp = RREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL); - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - WREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL, tmp); + WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); } r = gmc_v9_0_gart_enable(adev); @@ -822,9 +1049,7 @@ static int gmc_v9_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - gmc_v9_0_hw_fini(adev); - - return 0; + return gmc_v9_0_hw_fini(adev); } static int gmc_v9_0_resume(void *handle) @@ -836,7 +1061,7 @@ static int gmc_v9_0_resume(void *handle) if (r) return r; - amdgpu_vm_reset_all_ids(adev); + amdgpu_vmid_reset_all(adev); return 0; } |