aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c158
1 files changed, 107 insertions, 51 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index d0d966d6080a..28105e4af507 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -21,13 +21,13 @@
*
*/
#include "amdgpu.h"
+#include "amdgpu_ras.h"
#include "mmhub_v1_0.h"
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
#include "mmhub/mmhub_1_0_default.h"
-#include "athub/athub_1_0_offset.h"
-#include "athub/athub_1_0_sh_mask.h"
+#include "mmhub/mmhub_9_4_0_offset.h"
#include "vega10_enum.h"
#include "soc15_common.h"
@@ -35,6 +35,9 @@
#define mmDAGB0_CNTL_MISC2_RV 0x008f
#define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
+#define EA_EDC_CNT_MASK 0x3
+#define EA_EDC_CNT_SHIFT 0x2
+
u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
{
u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
@@ -111,6 +114,9 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
adev->vm_manager.vram_base_offset;
@@ -156,6 +162,9 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
@@ -163,7 +172,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
@@ -182,6 +191,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
}
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
@@ -196,11 +206,16 @@ static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
}
static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
{
+ if (amdgpu_sriov_vf(adev))
+ return;
+
WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0XFFFFFFFF);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
@@ -255,7 +270,8 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ !amdgpu_noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
@@ -337,11 +353,13 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
0);
WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
- /* Setup L2 cache */
- tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
- WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
- WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
+ if (!amdgpu_sriov_vf(adev)) {
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
+ }
}
/**
@@ -353,6 +371,10 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{
u32 tmp;
+
+ if (amdgpu_sriov_vf(adev))
+ return;
+
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
@@ -390,7 +412,7 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
void mmhub_v1_0_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(MMHUB, 0,
@@ -398,6 +420,8 @@ void mmhub_v1_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(MMHUB, 0,
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
@@ -474,22 +498,6 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
}
-static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable)
-{
- uint32_t def, data;
-
- def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
- data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
- else
- data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
-
- if (def != data)
- WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
-}
-
static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
@@ -506,23 +514,6 @@ static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
}
-static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
- bool enable)
-{
- uint32_t def, data;
-
- def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
- (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
- data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
- else
- data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
-
- if(def != data)
- WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
-}
-
int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
@@ -534,14 +525,11 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
mmhub_v1_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- athub_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
mmhub_v1_0_update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
- athub_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
break;
default:
break;
@@ -552,18 +540,86 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
{
- int data;
+ int data, data1;
if (amdgpu_sriov_vf(adev))
*flags = 0;
+ data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
+
+ data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+
/* AMD_CG_SUPPORT_MC_MGCG */
- data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
- if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
+ if ((data & ATC_L2_MISC_CG__ENABLE_MASK) &&
+ !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
*flags |= AMD_CG_SUPPORT_MC_MGCG;
/* AMD_CG_SUPPORT_MC_LS */
- data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_MC_LS;
}
+
+static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ int i;
+ uint32_t ea0_edc_cnt, ea0_edc_cnt2;
+ uint32_t ea1_edc_cnt, ea1_edc_cnt2;
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ /* EDC CNT will be cleared automatically after read */
+ ea0_edc_cnt = RREG32_SOC15(MMHUB, 0, mmMMEA0_EDC_CNT_VG20);
+ ea0_edc_cnt2 = RREG32_SOC15(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20);
+ ea1_edc_cnt = RREG32_SOC15(MMHUB, 0, mmMMEA1_EDC_CNT_VG20);
+ ea1_edc_cnt2 = RREG32_SOC15(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20);
+
+ /* error count of each error type is recorded by 2 bits,
+ * ce and ue count in EDC_CNT
+ */
+ for (i = 0; i < 5; i++) {
+ err_data->ce_count += (ea0_edc_cnt & EA_EDC_CNT_MASK);
+ err_data->ce_count += (ea1_edc_cnt & EA_EDC_CNT_MASK);
+ ea0_edc_cnt >>= EA_EDC_CNT_SHIFT;
+ ea1_edc_cnt >>= EA_EDC_CNT_SHIFT;
+ err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK);
+ err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK);
+ ea0_edc_cnt >>= EA_EDC_CNT_SHIFT;
+ ea1_edc_cnt >>= EA_EDC_CNT_SHIFT;
+ }
+ /* successive ue count in EDC_CNT */
+ for (i = 0; i < 5; i++) {
+ err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK);
+ err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK);
+ ea0_edc_cnt >>= EA_EDC_CNT_SHIFT;
+ ea1_edc_cnt >>= EA_EDC_CNT_SHIFT;
+ }
+
+ /* ce and ue count in EDC_CNT2 */
+ for (i = 0; i < 3; i++) {
+ err_data->ce_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK);
+ err_data->ce_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK);
+ ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
+ ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
+ err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK);
+ err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK);
+ ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
+ ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
+ }
+ /* successive ue count in EDC_CNT2 */
+ for (i = 0; i < 6; i++) {
+ err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK);
+ err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK);
+ ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
+ ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
+ }
+}
+
+const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
+ .ras_late_init = amdgpu_mmhub_ras_late_init,
+ .query_ras_error_count = mmhub_v1_0_query_ras_error_count,
+};