aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/soc15.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/soc15.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c252
1 files changed, 227 insertions, 25 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 04fbf05d7176..f70658a536a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -63,6 +63,8 @@
#include "uvd_v7_0.h"
#include "vce_v4_0.h"
#include "vcn_v1_0.h"
+#include "vcn_v2_0.h"
+#include "vcn_v2_5.h"
#include "dce_virtual.h"
#include "mxgpu_ai.h"
#include "amdgpu_smu.h"
@@ -115,6 +117,49 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
+static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags, address, data;
+ u64 r;
+ address = adev->nbio_funcs->get_pcie_index_offset(adev);
+ data = adev->nbio_funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ /* read low 32 bit */
+ WREG32(address, reg);
+ (void)RREG32(address);
+ r = RREG32(data);
+
+ /* read high 32 bit*/
+ WREG32(address, reg + 4);
+ (void)RREG32(address);
+ r |= ((u64)RREG32(data) << 32);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ return r;
+}
+
+static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
+{
+ unsigned long flags, address, data;
+
+ address = adev->nbio_funcs->get_pcie_index_offset(adev);
+ data = adev->nbio_funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ /* write low 32 bit */
+ WREG32(address, reg);
+ (void)RREG32(address);
+ WREG32(data, (u32)(v & 0xffffffffULL));
+ (void)RREG32(data);
+
+ /* write high 32 bit */
+ WREG32(address, reg + 4);
+ (void)RREG32(address);
+ WREG32(data, (u32)(v >> 32));
+ (void)RREG32(data);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
@@ -464,12 +509,23 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
return 0;
}
-static int soc15_asic_reset(struct amdgpu_device *adev)
+static int soc15_mode2_reset(struct amdgpu_device *adev)
+{
+ if (!adev->powerplay.pp_funcs ||
+ !adev->powerplay.pp_funcs->asic_reset_mode_2)
+ return -ENOENT;
+
+ return adev->powerplay.pp_funcs->asic_reset_mode_2(adev->powerplay.pp_handle);
+}
+
+static enum amd_reset_method
+soc15_asic_reset_method(struct amdgpu_device *adev)
{
- int ret;
bool baco_reset;
switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ return AMD_RESET_METHOD_MODE2;
case CHIP_VEGA10:
case CHIP_VEGA12:
soc15_asic_get_baco_capability(adev, &baco_reset);
@@ -493,11 +549,23 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
}
if (baco_reset)
- ret = soc15_asic_baco_reset(adev);
+ return AMD_RESET_METHOD_BACO;
else
- ret = soc15_asic_mode1_reset(adev);
+ return AMD_RESET_METHOD_MODE1;
+}
- return ret;
+static int soc15_asic_reset(struct amdgpu_device *adev)
+{
+ switch (soc15_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_BACO:
+ amdgpu_inc_vram_lost(adev);
+ return soc15_asic_baco_reset(adev);
+ case AMD_RESET_METHOD_MODE2:
+ return soc15_mode2_reset(adev);
+ default:
+ amdgpu_inc_vram_lost(adev);
+ return soc15_asic_mode1_reset(adev);
+ }
}
/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
@@ -581,26 +649,31 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
vega10_reg_base_init(adev);
break;
case CHIP_VEGA20:
vega20_reg_base_init(adev);
break;
+ case CHIP_ARCTURUS:
+ arct_reg_base_init(adev);
+ break;
default:
return -EINVAL;
}
- if (adev->asic_type == CHIP_VEGA20)
+ if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
adev->gmc.xgmi.supported = true;
if (adev->flags & AMD_IS_APU)
adev->nbio_funcs = &nbio_v7_0_funcs;
- else if (adev->asic_type == CHIP_VEGA20)
+ else if (adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS)
adev->nbio_funcs = &nbio_v7_4_funcs;
else
adev->nbio_funcs = &nbio_v6_1_funcs;
- if (adev->asic_type == CHIP_VEGA20)
+ if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
adev->df_funcs = &df_v3_6_funcs;
else
adev->df_funcs = &df_v1_7_funcs;
@@ -672,6 +745,37 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
#endif
amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
break;
+ case CHIP_ARCTURUS:
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ break;
+ case CHIP_RENOIR:
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
+ if (is_support_sw_smu(adev))
+ amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+#else
+# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+#endif
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ break;
default:
return -EINVAL;
}
@@ -688,7 +792,7 @@ static void soc15_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+ WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
else
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
@@ -714,14 +818,9 @@ static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
/* Set the 2 events that we wish to watch, defined above */
/* Reg 40 is # received msgs */
+ /* Reg 104 is # of posted requests sent */
perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
- /* Pre-VG20, Reg 104 is # of posted requests sent. On VG20 it's 108 */
- if (adev->asic_type == CHIP_VEGA20)
- perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK,
- EVENT1_SEL, 108);
- else
- perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK,
- EVENT1_SEL, 104);
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
/* Write to enable desired perf counters */
WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
@@ -751,6 +850,55 @@ static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
}
+static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
+ uint64_t *count1)
+{
+ uint32_t perfctr = 0;
+ uint64_t cnt0_of, cnt1_of;
+ int tmp;
+
+ /* This reports 0 on APUs, so return to avoid writing/reading registers
+ * that may or may not be different from their GPU counterparts
+ */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ /* Set the 2 events that we wish to watch, defined above */
+ /* Reg 40 is # received msgs */
+ /* Reg 108 is # of posted requests sent on VG20 */
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
+ EVENT0_SEL, 40);
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
+ EVENT1_SEL, 108);
+
+ /* Write to enable desired perf counters */
+ WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
+ /* Zero out and enable the perf counters
+ * Write 0x5:
+ * Bit 0 = Start all counters(1)
+ * Bit 2 = Global counter reset enable(1)
+ */
+ WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
+
+ msleep(1000);
+
+ /* Load the shadow and disable the perf counters
+ * Write 0x2:
+ * Bit 0 = Stop counters(0)
+ * Bit 1 = Load the shadow counters(1)
+ */
+ WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
+
+ /* Read register values to get any >32bit overflow */
+ tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
+ cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
+ cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
+
+ /* Get the values and add the overflow */
+ *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
+ *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
+}
+
static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
{
u32 sol_reg;
@@ -792,6 +940,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.read_bios_from_rom = &soc15_read_bios_from_rom,
.read_register = &soc15_read_register,
.reset = &soc15_asic_reset,
+ .reset_method = &soc15_asic_reset_method,
.set_vga_state = &soc15_vga_set_state,
.get_xclk = &soc15_get_xclk,
.set_uvd_clocks = &soc15_set_uvd_clocks,
@@ -821,9 +970,10 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.invalidate_hdp = &soc15_invalidate_hdp,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &vega20_doorbell_index_init,
- .get_pcie_usage = &soc15_get_pcie_usage,
+ .get_pcie_usage = &vega20_get_pcie_usage,
.need_reset_on_init = &soc15_need_reset_on_init,
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
+ .reset_method = &soc15_asic_reset_method
};
static int soc15_common_early_init(void *handle)
@@ -837,6 +987,8 @@ static int soc15_common_early_init(void *handle)
adev->smc_wreg = NULL;
adev->pcie_rreg = &soc15_pcie_rreg;
adev->pcie_wreg = &soc15_pcie_wreg;
+ adev->pcie_rreg64 = &soc15_pcie_rreg64;
+ adev->pcie_wreg64 = &soc15_pcie_wreg64;
adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
adev->didt_rreg = &soc15_didt_rreg;
@@ -993,6 +1145,53 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
}
break;
+ case CHIP_ARCTURUS:
+ adev->asic_funcs = &vega20_asic_funcs;
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS;
+ adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 0x32;
+ break;
+ case CHIP_RENOIR:
+ adev->asic_funcs = &soc15_asic_funcs;
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ROM_MGCG |
+ AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_DF_MGCG;
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA |
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG;
+ adev->external_rev_id = adev->rev_id + 0x91;
+
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_RLC_SMU_HS;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@@ -1038,21 +1237,18 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev)
int i;
struct amdgpu_ring *ring;
- /* Two reasons to skip
- * 1, Host driver already programmed them
- * 2, To avoid registers program violations in SR-IOV
- */
- if (!amdgpu_virt_support_skip_setting(adev)) {
+ /* sdma/ih doorbell range are programed by hypervisor */
+ if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
adev->nbio_funcs->sdma_doorbell_range(adev, i,
ring->use_doorbell, ring->doorbell_index,
adev->doorbell_index.sdma_doorbell_range);
}
- }
- adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
adev->irq.ih.doorbell_index);
+ }
}
static int soc15_common_hw_init(void *handle)
@@ -1129,7 +1325,8 @@ static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable
{
uint32_t def, data;
- if (adev->asic_type == CHIP_VEGA20) {
+ if (adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS) {
def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
@@ -1248,6 +1445,7 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE ? true : false);
break;
case CHIP_RAVEN:
+ case CHIP_RENOIR:
adev->nbio_funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
adev->nbio_funcs->update_medium_grain_light_sleep(adev,
@@ -1261,6 +1459,10 @@ static int soc15_common_set_clockgating_state(void *handle,
soc15_update_rom_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
+ case CHIP_ARCTURUS:
+ soc15_update_hdp_light_sleep(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ break;
default:
break;
}