aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/log_helpers.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c106
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h1
45 files changed, 569 insertions, 268 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5afaf6016b4a..0b14b5373783 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -717,7 +717,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr);
int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr,
- int hw_ip, int instance, int ring,
+ u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring);
/*
@@ -1572,18 +1572,14 @@ struct amdgpu_device {
/* sdma */
struct amdgpu_sdma sdma;
- union {
- struct {
- /* uvd */
- struct amdgpu_uvd uvd;
+ /* uvd */
+ struct amdgpu_uvd uvd;
- /* vce */
- struct amdgpu_vce vce;
- };
+ /* vce */
+ struct amdgpu_vce vce;
- /* vcn */
- struct amdgpu_vcn vcn;
- };
+ /* vcn */
+ struct amdgpu_vcn vcn;
/* firmwares */
struct amdgpu_firmware firmware;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 47d1c132ac40..1e3e9be7d77e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -379,29 +379,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
+ unsigned long end_jiffies;
uint32_t sdma_base_addr;
+ uint32_t data;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
- m->sdma_rlc_virtual_addr);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
- m->sdma_rlc_rb_base);
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies))
+ return -ETIME;
+ usleep_range(500, 1000);
+ }
+ if (m->sdma_engine_id) {
+ data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
+ data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
+ RESUME_CTX, 0);
+ WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
+ } else {
+ data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
+ data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
+ RESUME_CTX, 0);
+ WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
+ }
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
+ m->sdma_rlc_doorbell);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ m->sdma_rlc_virtual_addr);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
m->sdma_rlc_rb_base_hi);
-
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdma_rlc_rb_rptr_addr_lo);
-
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
-
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
- m->sdma_rlc_doorbell);
-
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
m->sdma_rlc_rb_cntl);
@@ -574,9 +595,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
}
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index c21adf60a7f2..057e1ecd83ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -59,12 +59,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size)
return false;
}
- tmp = bios[0x18] | (bios[0x19] << 8);
- if (bios[tmp + 0x14] != 0x0) {
- DRM_INFO("Not an x86 BIOS ROM\n");
- return false;
- }
-
bios_header_start = bios[0x48] | (bios[0x49] << 8);
if (!bios_header_start) {
DRM_INFO("Can't locate bios header\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 6c78623e1386..57abf7abd7a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -409,6 +409,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
if (candidate->robj == validated)
break;
+ /* We can't move pinned BOs here */
+ if (bo->pin_count)
+ continue;
+
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
/* Check if this BO is in one of the domains we need space for */
@@ -1495,8 +1499,11 @@ out:
memset(wait, 0, sizeof(*wait));
wait->out.status = (r > 0);
wait->out.first_signaled = first;
- /* set return value 0 to indicate success */
- r = array[first]->error;
+
+ if (first < fence_count && array[first])
+ r = array[first]->error;
+ else
+ r = 0;
err_free_fence_array:
for (i = 0; i < fence_count; i++)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2d792cdc094c..3573ecdb06ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3261,9 +3261,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
pm_pg_lock = (*pos >> 23) & 1;
if (*pos & (1ULL << 62)) {
- se_bank = (*pos >> 24) & 0x3FF;
- sh_bank = (*pos >> 34) & 0x3FF;
- instance_bank = (*pos >> 44) & 0x3FF;
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
@@ -3337,9 +3337,9 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
pm_pg_lock = (*pos >> 23) & 1;
if (*pos & (1ULL << 62)) {
- se_bank = (*pos >> 24) & 0x3FF;
- sh_bank = (*pos >> 34) & 0x3FF;
- instance_bank = (*pos >> 44) & 0x3FF;
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
@@ -3687,12 +3687,12 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
return -EINVAL;
/* decode offset */
- offset = (*pos & 0x7F);
- se = ((*pos >> 7) & 0xFF);
- sh = ((*pos >> 15) & 0xFF);
- cu = ((*pos >> 23) & 0xFF);
- wave = ((*pos >> 31) & 0xFF);
- simd = ((*pos >> 37) & 0xFF);
+ offset = (*pos & GENMASK_ULL(6, 0));
+ se = (*pos & GENMASK_ULL(14, 7)) >> 7;
+ sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
+ cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
+ wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
+ simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
@@ -3737,14 +3737,14 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
return -EINVAL;
/* decode offset */
- offset = (*pos & 0xFFF); /* in dwords */
- se = ((*pos >> 12) & 0xFF);
- sh = ((*pos >> 20) & 0xFF);
- cu = ((*pos >> 28) & 0xFF);
- wave = ((*pos >> 36) & 0xFF);
- simd = ((*pos >> 44) & 0xFF);
- thread = ((*pos >> 52) & 0xFF);
- bank = ((*pos >> 60) & 1);
+ offset = *pos & GENMASK_ULL(11, 0);
+ se = (*pos & GENMASK_ULL(19, 12)) >> 12;
+ sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
+ cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
+ wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
+ simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
+ thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
+ bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
if (!data)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index ec96bb1f9eaf..c2f414ffb2cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -536,7 +536,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
/* Raven */
- {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
{0, 0, 0}
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index bd5b8065c32e..2fa95aef74d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -268,9 +268,10 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
*
* Checks for fence activity.
*/
-static void amdgpu_fence_fallback(unsigned long arg)
+static void amdgpu_fence_fallback(struct timer_list *t)
{
- struct amdgpu_ring *ring = (void *)arg;
+ struct amdgpu_ring *ring = from_timer(ring, t,
+ fence_drv.fallback_timer);
amdgpu_fence_process(ring);
}
@@ -422,8 +423,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
atomic_set(&ring->fence_drv.last_seq, 0);
ring->fence_drv.initialized = false;
- setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
- (unsigned long)ring);
+ timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
spin_lock_init(&ring->fence_drv.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index a418df1b9422..e87eedcc0da9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -63,6 +63,11 @@ retry:
flags, NULL, resv, 0, &bo);
if (r) {
if (r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
+
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
goto retry;
@@ -323,7 +328,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
bo->tbo.ttm->pages);
if (r)
- goto unlock_mmap_sem;
+ goto release_object;
r = amdgpu_bo_reserve(bo, true);
if (r)
@@ -348,9 +353,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
free_pages:
release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
-unlock_mmap_sem:
- up_read(&current->mm->mmap_sem);
-
release_object:
drm_gem_object_put_unlocked(gobj);
@@ -556,9 +558,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
dev_err(&dev->pdev->dev,
- "va_address 0x%lX is in reserved area 0x%X\n",
- (unsigned long)args->va_address,
- AMDGPU_VA_RESERVED_SIZE);
+ "va_address 0x%LX is in reserved area 0x%LX\n",
+ args->va_address, AMDGPU_VA_RESERVED_SIZE);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 33535d347734..00e0ce10862f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -71,12 +71,6 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
- spin_lock(&mgr->lock);
- if (!drm_mm_clean(&mgr->mm)) {
- spin_unlock(&mgr->lock);
- return -EBUSY;
- }
-
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index d6df5728df7f..6c570d4e4516 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -946,6 +946,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode;
+ /* no skipping for powerplay */
+ if (adev->powerplay.cgs_device)
+ return effective_mode;
+
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 90af8e82b16a..ae9c106979d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -169,10 +169,14 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
int flags)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct dma_buf *buf;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
- return drm_gem_prime_export(dev, gobj, flags);
+ buf = drm_gem_prime_export(dev, gobj, flags);
+ if (!IS_ERR(buf))
+ buf->file->f_mapping = dev->anon_inode->i_mapping;
+ return buf;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index 190e28cb827e..93d86619e802 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -63,7 +63,7 @@ static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper,
static int amdgpu_identity_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper,
- int ring,
+ u32 ring,
struct amdgpu_ring **out_ring)
{
switch (mapper->hw_ip) {
@@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
static int amdgpu_lru_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper,
- int user_ring, bool lru_pipe_order,
+ u32 user_ring, bool lru_pipe_order,
struct amdgpu_ring **out_ring)
{
int r, i, j;
@@ -208,7 +208,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
*/
int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr,
- int hw_ip, int instance, int ring,
+ u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring)
{
int r, ip_num_rings;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index aa914256b4bc..bae77353447b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -94,7 +94,8 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_MMHUB 1
/* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
+#define AMDGPU_VA_RESERVED_SIZE (8ULL << 20)
+
/* max vmids dedicated for process */
#define AMDGPU_VM_MAX_RESERVED_VMID 1
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 26e900627971..4acca92f6a52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -68,11 +68,6 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
struct amdgpu_vram_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
- if (!drm_mm_clean(&mgr->mm)) {
- spin_unlock(&mgr->lock);
- return -EBUSY;
- }
-
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 793b1470284d..a296f7bbe57c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1023,22 +1023,101 @@ static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] =
{mmPA_SC_RASTER_CONFIG_1, true},
};
-static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
- u32 se_num, u32 sh_num,
- u32 reg_offset)
+
+static uint32_t cik_get_register_value(struct amdgpu_device *adev,
+ bool indexed, u32 se_num,
+ u32 sh_num, u32 reg_offset)
{
- uint32_t val;
+ if (indexed) {
+ uint32_t val;
+ unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+ unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+ switch (reg_offset) {
+ case mmCC_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+ case mmGC_USER_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+ case mmPA_SC_RASTER_CONFIG:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+ case mmPA_SC_RASTER_CONFIG_1:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+ }
- mutex_lock(&adev->grbm_idx_mutex);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
- val = RREG32(reg_offset);
+ val = RREG32(reg_offset);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- return val;
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return val;
+ } else {
+ unsigned idx;
+
+ switch (reg_offset) {
+ case mmGB_ADDR_CONFIG:
+ return adev->gfx.config.gb_addr_config;
+ case mmMC_ARB_RAMCFG:
+ return adev->gfx.config.mc_arb_ramcfg;
+ case mmGB_TILE_MODE0:
+ case mmGB_TILE_MODE1:
+ case mmGB_TILE_MODE2:
+ case mmGB_TILE_MODE3:
+ case mmGB_TILE_MODE4:
+ case mmGB_TILE_MODE5:
+ case mmGB_TILE_MODE6:
+ case mmGB_TILE_MODE7:
+ case mmGB_TILE_MODE8:
+ case mmGB_TILE_MODE9:
+ case mmGB_TILE_MODE10:
+ case mmGB_TILE_MODE11:
+ case mmGB_TILE_MODE12:
+ case mmGB_TILE_MODE13:
+ case mmGB_TILE_MODE14:
+ case mmGB_TILE_MODE15:
+ case mmGB_TILE_MODE16:
+ case mmGB_TILE_MODE17:
+ case mmGB_TILE_MODE18:
+ case mmGB_TILE_MODE19:
+ case mmGB_TILE_MODE20:
+ case mmGB_TILE_MODE21:
+ case mmGB_TILE_MODE22:
+ case mmGB_TILE_MODE23:
+ case mmGB_TILE_MODE24:
+ case mmGB_TILE_MODE25:
+ case mmGB_TILE_MODE26:
+ case mmGB_TILE_MODE27:
+ case mmGB_TILE_MODE28:
+ case mmGB_TILE_MODE29:
+ case mmGB_TILE_MODE30:
+ case mmGB_TILE_MODE31:
+ idx = (reg_offset - mmGB_TILE_MODE0);
+ return adev->gfx.config.tile_mode_array[idx];
+ case mmGB_MACROTILE_MODE0:
+ case mmGB_MACROTILE_MODE1:
+ case mmGB_MACROTILE_MODE2:
+ case mmGB_MACROTILE_MODE3:
+ case mmGB_MACROTILE_MODE4:
+ case mmGB_MACROTILE_MODE5:
+ case mmGB_MACROTILE_MODE6:
+ case mmGB_MACROTILE_MODE7:
+ case mmGB_MACROTILE_MODE8:
+ case mmGB_MACROTILE_MODE9:
+ case mmGB_MACROTILE_MODE10:
+ case mmGB_MACROTILE_MODE11:
+ case mmGB_MACROTILE_MODE12:
+ case mmGB_MACROTILE_MODE13:
+ case mmGB_MACROTILE_MODE14:
+ case mmGB_MACROTILE_MODE15:
+ idx = (reg_offset - mmGB_MACROTILE_MODE0);
+ return adev->gfx.config.macrotile_mode_array[idx];
+ default:
+ return RREG32(reg_offset);
+ }
+ }
}
static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -1048,13 +1127,13 @@ static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) {
+ bool indexed = cik_allowed_read_registers[i].grbm_indexed;
+
if (reg_offset != cik_allowed_read_registers[i].reg_offset)
continue;
- *value = cik_allowed_read_registers[i].grbm_indexed ?
- cik_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = cik_get_register_value(adev, indexed, se_num, sh_num,
+ reg_offset);
return 0;
}
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 00868764a0dd..419ba0ce7ee5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1819,6 +1819,22 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
adev->gfx.config.backend_enable_mask,
num_rb_pipes);
}
+
+ /* cache the values for userspace */
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ adev->gfx.config.rb_config[i][j].rb_backend_disable =
+ RREG32(mmCC_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+ RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].raster_config =
+ RREG32(mmPA_SC_RASTER_CONFIG);
+ adev->gfx.config.rb_config[i][j].raster_config_1 =
+ RREG32(mmPA_SC_RASTER_CONFIG_1);
+ }
+ }
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -4670,6 +4686,14 @@ static int gfx_v7_0_sw_fini(void *handle)
gfx_v7_0_cp_compute_fini(adev);
gfx_v7_0_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (adev->gfx.rlc.cp_table_size) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v7_0_free_microcode(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b8002ac3e536..9ecdf621a74a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -2118,6 +2118,15 @@ static int gfx_v8_0_sw_fini(void *handle)
gfx_v8_0_mec_fini(adev);
gfx_v8_0_rlc_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v8_0_free_microcode(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 7f15bb2c5233..da43813d67a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -207,6 +207,12 @@ static const u32 golden_settings_gc_9_1_rv1[] =
SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
};
+static const u32 golden_settings_gc_9_x_common[] =
+{
+ SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_INDEX), 0xffffffff, 0x00000000,
+ SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_DATA), 0xffffffff, 0x2544c382
+};
+
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -242,6 +248,9 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
default:
break;
}
+
+ amdgpu_program_register_sequence(adev, golden_settings_gc_9_x_common,
+ (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
@@ -988,12 +997,22 @@ static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
+static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t thread,
+ uint32_t start, uint32_t size,
+ uint32_t *dst)
+{
+ wave_read_regs(
+ adev, simd, wave, thread,
+ start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
+}
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v9_0_select_se_sh,
.read_wave_data = &gfx_v9_0_read_wave_data,
.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
};
static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1449,6 +1468,14 @@ static int gfx_v9_0_sw_fini(void *handle)
gfx_v9_0_mec_fini(adev);
gfx_v9_0_ngg_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (adev->asic_type == CHIP_RAVEN) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v9_0_free_microcode(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 621699331e09..c8f1aebeac7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -392,7 +392,16 @@ static int gmc_v9_0_early_init(void *handle)
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
+ /*
+ * The latest engine allocation on gfx9 is:
+ * Engine 0, 1: idle
+ * Engine 2, 3: firmware
+ * Engine 4~13: amdgpu ring, subject to change when ring number changes
+ * Engine 14~15: idle
+ * Engine 16: kfd tlb invalidation
+ * Engine 17: Gart flushes
+ */
+ unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
unsigned i;
for(i = 0; i < adev->num_rings; ++i) {
@@ -405,9 +414,9 @@ static int gmc_v9_0_late_init(void *handle)
ring->funcs->vmhub);
}
- /* Engine 17 is used for GART flushes */
+ /* Engine 16 is used for KFD and 17 for GART flushes */
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
- BUG_ON(vm_inv_eng[i] > 17);
+ BUG_ON(vm_inv_eng[i] > 16);
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 1eb4d79d6e30..0450ac5ba6b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1175,7 +1175,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 6c5a9cab55de..f744caeaee04 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -24,6 +24,7 @@
#include <linux/sched.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
+#include <linux/printk.h>
#include "kfd_priv.h"
#define KFD_DRIVER_AUTHOR "AMD Inc. and others"
@@ -132,7 +133,7 @@ static void __exit kfd_module_exit(void)
kfd_process_destroy_wq();
kfd_topology_shutdown();
kfd_chardev_exit();
- dev_info(kfd_device, "Removed module\n");
+ pr_info("amdkfd: Removed module\n");
}
module_init(kfd_module_init);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 4859d263fa2a..4728fad3fd74 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -202,8 +202,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct cik_sdma_rlc_registers *m;
m = get_sdma_mqd(mqd);
- m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
- SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+ m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
+ << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 2bec902fc939..a3f1e62c60ba 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -191,6 +191,24 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
+ if (dev->dqm->queue_count >=
+ CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
+ pr_err("Over-subscription is not allowed for SDMA.\n");
+ retval = -EPERM;
+ goto err_create_queue;
+ }
+
+ retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+ if (retval != 0)
+ goto err_create_queue;
+ pqn->q = q;
+ pqn->kq = NULL;
+ retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
+ &q->properties.vmid);
+ pr_debug("DQM returned %d for create_queue\n", retval);
+ print_queue(q);
+ break;
+
case KFD_QUEUE_TYPE_COMPUTE:
/* check if there is over subscription */
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 889ed24084e8..f71fe6d2ddda 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -520,7 +520,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
aconnector = to_amdgpu_dm_connector(connector);
- if (aconnector->dc_link->type == dc_connection_mst_branch) {
+ if (aconnector->dc_link->type == dc_connection_mst_branch &&
+ aconnector->mst_mgr.aux) {
DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
aconnector, aconnector->base.base.id);
@@ -677,6 +678,10 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
mutex_lock(&aconnector->hpd_lock);
dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+
+ if (aconnector->fake_enable && aconnector->dc_link->local_sink)
+ aconnector->fake_enable = false;
+
aconnector->dc_sink = NULL;
amdgpu_dm_update_connector_after_detect(aconnector);
mutex_unlock(&aconnector->hpd_lock);
@@ -711,7 +716,6 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
- drm_atomic_state_put(adev->dm.cached_state);
adev->dm.cached_state = NULL;
amdgpu_dm_irq_resume_late(adev);
@@ -2704,7 +2708,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
- struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+ struct edid *edid;
if (!aconnector->base.edid_blob_ptr ||
!aconnector->base.edid_blob_ptr->data) {
@@ -2716,6 +2720,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
return;
}
+ edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+
aconnector->edid = edid;
aconnector->dc_em_sink = dc_link_add_remote_sink(
@@ -4193,13 +4199,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
+ if (!dm_new_crtc_state->stream)
+ continue;
+
status = dc_stream_get_status(dm_new_crtc_state->stream);
WARN_ON(!status);
WARN_ON(!status->plane_count);
- if (!dm_new_crtc_state->stream)
- continue;
-
/*TODO How it works with MPO ?*/
if (!dc_commit_planes_to_stream(
dm->dc,
@@ -4253,7 +4259,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_hw_done(state);
if (wait_for_vblank)
- drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_wait_for_flip_done(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
}
@@ -4332,9 +4338,11 @@ void dm_restore_drm_connector_state(struct drm_device *dev,
return;
disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
- acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+ if (!disconnected_acrtc)
+ return;
- if (!disconnected_acrtc || !acrtc_state->stream)
+ acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+ if (!acrtc_state->stream)
return;
/*
@@ -4455,7 +4463,7 @@ static int dm_update_crtcs_state(struct dc *dc,
}
}
- if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
new_crtc_state->mode_changed = false;
@@ -4709,7 +4717,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
} else {
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+ !new_crtc_state->color_mgmt_changed)
continue;
if (!new_crtc_state->enable)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
index 785b943b60ed..6e43168fbdd6 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
@@ -75,6 +75,9 @@ void dc_conn_log(struct dc_context *ctx,
if (signal == signal_type_info_tbl[i].type)
break;
+ if (i == NUM_ELEMENTS(signal_type_info_tbl))
+ goto fail;
+
dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
signal_type_info_tbl[i].name,
link->link_index);
@@ -96,6 +99,8 @@ void dc_conn_log(struct dc_context *ctx,
dm_logger_append(&entry, "^\n");
dm_helpers_dc_conn_log(ctx, &entry, event);
+
+fail:
dm_logger_close(&entry);
va_end(args);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index aaaebd06d7ee..86e6438c5cf3 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -249,7 +249,7 @@ static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
struct graphics_object_id *dest_object_id)
{
uint32_t number;
- uint16_t *id;
+ uint16_t *id = NULL;
ATOM_OBJECT *object;
struct bios_parser *bp = BP_FROM_DCB(dcb);
@@ -260,7 +260,7 @@ static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
number = get_dest_obj_list(bp, object, &id);
- if (number <= index)
+ if (number <= index || !id)
return BP_RESULT_BADINPUT;
*dest_object_id = object_id_from_bios_object_id(id[index]);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index fe63f5894d43..7240db2e6f09 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -121,6 +121,10 @@ static bool create_links(
goto failed_alloc;
}
+ link->link_index = dc->link_count;
+ dc->links[dc->link_count] = link;
+ dc->link_count++;
+
link->ctx = dc->ctx;
link->dc = dc;
link->connector_signal = SIGNAL_TYPE_VIRTUAL;
@@ -129,6 +133,13 @@ static bool create_links(
link->link_id.enum_id = ENUM_ID_1;
link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
+ if (!link->link_enc) {
+ BREAK_TO_DEBUGGER();
+ goto failed_alloc;
+ }
+
+ link->link_status.dpcd_caps = &link->dpcd_caps;
+
enc_init.ctx = dc->ctx;
enc_init.channel = CHANNEL_ID_UNKNOWN;
enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
@@ -138,10 +149,6 @@ static bool create_links(
enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
enc_init.encoder.enum_id = ENUM_ID_1;
virtual_link_encoder_construct(link->link_enc, &enc_init);
-
- link->link_index = dc->link_count;
- dc->links[dc->link_count] = link;
- dc->link_count++;
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 0602610489d7..e27ed4a45265 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -480,22 +480,6 @@ static void detect_dp(
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
detect_dp_sink_caps(link);
- /* DP active dongles */
- if (is_dp_active_dongle(link)) {
- link->type = dc_connection_active_dongle;
- if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
- /*
- * active dongle unplug processing for short irq
- */
- link_disconnect_sink(link);
- return;
- }
-
- if (link->dpcd_caps.dongle_type !=
- DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
- *converter_disable_audio = true;
- }
- }
if (is_mst_supported(link)) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
link->type = dc_connection_mst_branch;
@@ -535,6 +519,22 @@ static void detect_dp(
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
}
}
+
+ if (link->type != dc_connection_mst_branch &&
+ is_dp_active_dongle(link)) {
+ /* DP active dongles */
+ link->type = dc_connection_active_dongle;
+ if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
+ /*
+ * active dongle unplug processing for short irq
+ */
+ link_disconnect_sink(link);
+ return;
+ }
+
+ if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ *converter_disable_audio = true;
+ }
} else {
/* DP passive dongles */
sink_caps->signal = dp_passive_dongle_detection(link->ddc,
@@ -1801,12 +1801,75 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
link->link_enc->funcs->disable_output(link->link_enc, signal, link);
}
+bool dp_active_dongle_validate_timing(
+ const struct dc_crtc_timing *timing,
+ const struct dc_dongle_caps *dongle_caps)
+{
+ unsigned int required_pix_clk = timing->pix_clk_khz;
+
+ if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
+ dongle_caps->extendedCapValid == false)
+ return true;
+
+ /* Check Pixel Encoding */
+ switch (timing->pixel_encoding) {
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ break;
+ case PIXEL_ENCODING_YCBCR422:
+ if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through)
+ return false;
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
+ return false;
+ break;
+ default:
+ /* Invalid Pixel Encoding*/
+ return false;
+ }
+
+
+ /* Check Color Depth and Pixel Clock */
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ required_pix_clk /= 2;
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ case COLOR_DEPTH_888:
+ /*888 and 666 should always be supported*/
+ break;
+ case COLOR_DEPTH_101010:
+ if (dongle_caps->dp_hdmi_max_bpc < 10)
+ return false;
+ required_pix_clk = required_pix_clk * 10 / 8;
+ break;
+ case COLOR_DEPTH_121212:
+ if (dongle_caps->dp_hdmi_max_bpc < 12)
+ return false;
+ required_pix_clk = required_pix_clk * 12 / 8;
+ break;
+
+ case COLOR_DEPTH_141414:
+ case COLOR_DEPTH_161616:
+ default:
+ /* These color depths are currently not supported */
+ return false;
+ }
+
+ if (required_pix_clk > dongle_caps->dp_hdmi_max_pixel_clk)
+ return false;
+
+ return true;
+}
+
enum dc_status dc_link_validate_mode_timing(
const struct dc_stream_state *stream,
struct dc_link *link,
const struct dc_crtc_timing *timing)
{
uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
+ struct dc_dongle_caps *dongle_caps = &link->link_status.dpcd_caps->dongle_caps;
/* A hack to avoid failing any modes for EDID override feature on
* topology change such as lower quality cable for DP or different dongle
@@ -1814,8 +1877,13 @@ enum dc_status dc_link_validate_mode_timing(
if (link->remote_sinks[0])
return DC_OK;
+ /* Passive Dongle */
if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk)
- return DC_EXCEED_DONGLE_MAX_CLK;
+ return DC_EXCEED_DONGLE_CAP;
+
+ /* Active Dongle*/
+ if (!dp_active_dongle_validate_timing(timing, dongle_caps))
+ return DC_EXCEED_DONGLE_CAP;
switch (stream->signal) {
case SIGNAL_TYPE_EDP:
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index ced42484dcfc..e6bf05d76a94 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1512,7 +1512,7 @@ static bool hpd_rx_irq_check_link_loss_status(
struct dc_link *link,
union hpd_irq_data *hpd_irq_dpcd_data)
{
- uint8_t irq_reg_rx_power_state;
+ uint8_t irq_reg_rx_power_state = 0;
enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
union lane_status lane_status;
uint32_t lane;
@@ -1524,60 +1524,55 @@ static bool hpd_rx_irq_check_link_loss_status(
if (link->cur_link_settings.lane_count == 0)
return return_code;
- /*1. Check that we can handle interrupt: Not in FS DOS,
- * Not in "Display Timeout" state, Link is trained.
- */
- dpcd_result = core_link_read_dpcd(link,
- DP_SET_POWER,
- &irq_reg_rx_power_state,
- sizeof(irq_reg_rx_power_state));
+ /*1. Check that Link Status changed, before re-training.*/
- if (dpcd_result != DC_OK) {
- irq_reg_rx_power_state = DP_SET_POWER_D0;
- dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
- "%s: DPCD read failed to obtain power state.\n",
- __func__);
+ /*parse lane status*/
+ for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+ /* check status of lanes 0,1
+ * changed DpcdAddress_Lane01Status (0x202)
+ */
+ lane_status.raw = get_nibble_at_index(
+ &hpd_irq_dpcd_data->bytes.lane01_status.raw,
+ lane);
+
+ if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+ !lane_status.bits.CR_DONE_0 ||
+ !lane_status.bits.SYMBOL_LOCKED_0) {
+ /* if one of the channel equalization, clock
+ * recovery or symbol lock is dropped
+ * consider it as (link has been
+ * dropped) dp sink status has changed
+ */
+ sink_status_changed = true;
+ break;
+ }
}
- if (irq_reg_rx_power_state == DP_SET_POWER_D0) {
-
- /*2. Check that Link Status changed, before re-training.*/
-
- /*parse lane status*/
- for (lane = 0;
- lane < link->cur_link_settings.lane_count;
- lane++) {
+ /* Check interlane align.*/
+ if (sink_status_changed ||
+ !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
- /* check status of lanes 0,1
- * changed DpcdAddress_Lane01Status (0x202)*/
- lane_status.raw = get_nibble_at_index(
- &hpd_irq_dpcd_data->bytes.lane01_status.raw,
- lane);
-
- if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
- !lane_status.bits.CR_DONE_0 ||
- !lane_status.bits.SYMBOL_LOCKED_0) {
- /* if one of the channel equalization, clock
- * recovery or symbol lock is dropped
- * consider it as (link has been
- * dropped) dp sink status has changed*/
- sink_status_changed = true;
- break;
- }
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: Link Status changed.\n", __func__);
- }
+ return_code = true;
- /* Check interlane align.*/
- if (sink_status_changed ||
- !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.
- INTERLANE_ALIGN_DONE) {
+ /*2. Check that we can handle interrupt: Not in FS DOS,
+ * Not in "Display Timeout" state, Link is trained.
+ */
+ dpcd_result = core_link_read_dpcd(link,
+ DP_SET_POWER,
+ &irq_reg_rx_power_state,
+ sizeof(irq_reg_rx_power_state));
+ if (dpcd_result != DC_OK) {
dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
- "%s: Link Status changed.\n",
+ "%s: DPCD read failed to obtain power state.\n",
__func__);
-
- return_code = true;
+ } else {
+ if (irq_reg_rx_power_state != DP_SET_POWER_D0)
+ return_code = false;
}
}
@@ -2062,6 +2057,24 @@ bool is_dp_active_dongle(const struct dc_link *link)
(dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
}
+static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
+{
+ switch (bpc) {
+ case DOWN_STREAM_MAX_8BPC:
+ return 8;
+ case DOWN_STREAM_MAX_10BPC:
+ return 10;
+ case DOWN_STREAM_MAX_12BPC:
+ return 12;
+ case DOWN_STREAM_MAX_16BPC:
+ return 16;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
static void get_active_converter_info(
uint8_t data, struct dc_link *link)
{
@@ -2131,7 +2144,8 @@ static void get_active_converter_info(
hdmi_caps.bits.YCrCr420_CONVERSION;
link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
- hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT;
+ translate_dpcd_max_bpc(
+ hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
link->dpcd_caps.dongle_caps.extendedCapValid = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index d1cdf9f8853d..b7422d3b71ef 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -516,13 +516,11 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
if (right_view) {
- data->viewport.width /= 2;
- data->viewport_c.width /= 2;
- data->viewport.x += data->viewport.width;
- data->viewport_c.x += data->viewport_c.width;
+ data->viewport.x += data->viewport.width / 2;
+ data->viewport_c.x += data->viewport_c.width / 2;
/* Ceil offset pipe */
- data->viewport.width += data->viewport.width % 2;
- data->viewport_c.width += data->viewport_c.width % 2;
+ data->viewport.width = (data->viewport.width + 1) / 2;
+ data->viewport_c.width = (data->viewport_c.width + 1) / 2;
} else {
data->viewport.width /= 2;
data->viewport_c.width /= 2;
@@ -580,14 +578,12 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state ==
pipe_ctx->plane_state) {
if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
- pipe_ctx->plane_res.scl_data.recout.height /= 2;
- pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height;
+ pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height / 2;
/* Floor primary pipe, ceil 2ndary pipe */
- pipe_ctx->plane_res.scl_data.recout.height += pipe_ctx->plane_res.scl_data.recout.height % 2;
+ pipe_ctx->plane_res.scl_data.recout.height = (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
} else {
- pipe_ctx->plane_res.scl_data.recout.width /= 2;
- pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
- pipe_ctx->plane_res.scl_data.recout.width += pipe_ctx->plane_res.scl_data.recout.width % 2;
+ pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width / 2;
+ pipe_ctx->plane_res.scl_data.recout.width = (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
}
} else if (pipe_ctx->bottom_pipe &&
pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) {
@@ -856,6 +852,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
+
/* Taps calculations */
if (pipe_ctx->plane_res.xfm != NULL)
res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
@@ -864,16 +861,21 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->plane_res.dpp != NULL)
res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
-
if (!res) {
/* Try 24 bpp linebuffer */
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
- res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
- pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+ if (pipe_ctx->plane_res.xfm != NULL)
+ res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.xfm,
+ &pipe_ctx->plane_res.scl_data,
+ &plane_state->scaling_quality);
- res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
- pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+ if (pipe_ctx->plane_res.dpp != NULL)
+ res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.dpp,
+ &pipe_ctx->plane_res.scl_data,
+ &plane_state->scaling_quality);
}
if (res)
@@ -991,8 +993,10 @@ static struct pipe_ctx *acquire_free_pipe_for_stream(
head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
- if (!head_pipe)
+ if (!head_pipe) {
ASSERT(0);
+ return NULL;
+ }
if (!head_pipe->plane_state)
return head_pipe;
@@ -1447,11 +1451,16 @@ static struct stream_encoder *find_first_free_match_stream_enc_for_link(
static struct audio *find_first_free_audio(
struct resource_context *res_ctx,
- const struct resource_pool *pool)
+ const struct resource_pool *pool,
+ enum engine_id id)
{
int i;
for (i = 0; i < pool->audio_count; i++) {
if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
+ /*we have enough audio endpoint, find the matching inst*/
+ if (id != i)
+ continue;
+
return pool->audios[i];
}
}
@@ -1700,7 +1709,7 @@ enum dc_status resource_map_pool_resources(
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
stream->audio_info.mode_count) {
pipe_ctx->stream_res.audio = find_first_free_audio(
- &context->res_ctx, pool);
+ &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
/*
* Audio assigned in order first come first get.
@@ -1765,13 +1774,16 @@ enum dc_status dc_validate_global_state(
enum dc_status result = DC_ERROR_UNEXPECTED;
int i, j;
+ if (!new_ctx)
+ return DC_ERROR_UNEXPECTED;
+
if (dc->res_pool->funcs->validate_global) {
result = dc->res_pool->funcs->validate_global(dc, new_ctx);
if (result != DC_OK)
return result;
}
- for (i = 0; new_ctx && i < new_ctx->stream_count; i++) {
+ for (i = 0; i < new_ctx->stream_count; i++) {
struct dc_stream_state *stream = new_ctx->streams[i];
for (j = 0; j < dc->res_pool->pipe_count; j++) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index b00a6040a697..e230cc44a0a7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -263,7 +263,6 @@ bool dc_stream_set_cursor_position(
struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
struct mem_input *mi = pipe_ctx->plane_res.mi;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct transform *xfm = pipe_ctx->plane_res.xfm;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_cursor_position pos_cpy = *position;
struct dc_cursor_mi_param param = {
@@ -294,11 +293,11 @@ bool dc_stream_set_cursor_position(
if (mi != NULL && mi->funcs->set_cursor_position != NULL)
mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
- if (hubp != NULL && hubp->funcs->set_cursor_position != NULL)
- hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
+ if (!hubp)
+ continue;
- if (xfm != NULL && xfm->funcs->set_cursor_position != NULL)
- xfm->funcs->set_cursor_position(xfm, &pos_cpy, &param, hubp->curs_attr.width);
+ if (hubp->funcs->set_cursor_position != NULL)
+ hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 81c40f8864db..0df9ecb2710c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -352,11 +352,11 @@ void dce_aud_az_enable(struct audio *audio)
uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
set_reg_field_value(value, 1,
- AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
- CLOCK_GATING_DISABLE);
- set_reg_field_value(value, 1,
- AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
- AUDIO_ENABLED);
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ set_reg_field_value(value, 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ AUDIO_ENABLED);
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 4fd49a16c3b6..e42b6eb1c1f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -87,6 +87,9 @@ static void dce110_update_generic_info_packet(
*/
uint32_t max_retries = 50;
+ /*we need turn on clock before programming AFMT block*/
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
if (REG(AFMT_VBI_PACKET_CONTROL1)) {
if (packet_index >= 8)
ASSERT(0);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 1229a3315018..07ff8d2faf3f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -991,6 +991,16 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
struct dc_link *link = stream->sink->link;
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
@@ -1015,18 +1025,6 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
*/
}
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
- pipe_ctx->stream_res.stream_enc);
-
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
- pipe_ctx->stream_res.stream_enc);
-
- pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
- pipe_ctx->stream_res.stream_enc, true);
-
-
/* blank at encoder level */
if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (pipe_ctx->stream->sink->link->connector_signal == SIGNAL_TYPE_EDP)
@@ -1774,6 +1772,10 @@ static enum dc_status validate_fbc(struct dc *dc,
if (pipe_ctx->stream->sink->link->psr_enabled)
return DC_ERROR_UNEXPECTED;
+ /* Nothing to compress */
+ if (!pipe_ctx->plane_state)
+ return DC_ERROR_UNEXPECTED;
+
/* Only for non-linear tiling */
if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
return DC_ERROR_UNEXPECTED;
@@ -1868,8 +1870,10 @@ static void dce110_reset_hw_ctx_wrap(
pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
struct clock_source *old_clk = pipe_ctx_old->clock_source;
- /* disable already, no need to disable again */
- if (pipe_ctx->stream && !pipe_ctx->stream->dpms_off)
+ /* Disable if new stream is null. O/w, if stream is
+ * disabled already, no need to disable again.
+ */
+ if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off)
core_link_disable_stream(pipe_ctx_old, FREE_ACQUIRED_RESOURCE);
pipe_ctx_old->stream_res.tg->funcs->set_blank(pipe_ctx_old->stream_res.tg, true);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index db96d2b47ff1..61adb8174ce0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -1037,11 +1037,13 @@ static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool)
struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv),
GFP_KERNEL);
- if ((dce110_tgv == NULL) ||
- (dce110_xfmv == NULL) ||
- (dce110_miv == NULL) ||
- (dce110_oppv == NULL))
- return false;
+ if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) {
+ kfree(dce110_tgv);
+ kfree(dce110_xfmv);
+ kfree(dce110_miv);
+ kfree(dce110_oppv);
+ return false;
+ }
dce110_opp_v_construct(dce110_oppv, ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 67ac737eaa7e..4befce6cd87a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -1112,10 +1112,7 @@ bool dce110_timing_generator_validate_timing(
enum signal_type signal)
{
uint32_t h_blank;
- uint32_t h_back_porch;
- uint32_t hsync_offset = timing->h_border_right +
- timing->h_front_porch;
- uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+ uint32_t h_back_porch, hsync_offset, h_sync_start;
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -1124,6 +1121,9 @@ bool dce110_timing_generator_validate_timing(
if (!timing)
return false;
+ hsync_offset = timing->h_border_right + timing->h_front_porch;
+ h_sync_start = timing->h_addressable + hsync_offset;
+
/* Currently we don't support 3D, so block all 3D timings */
if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 4c4bd72d4e40..9fc8f827f2a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -912,11 +912,13 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
- if (!head_pipe)
+ if (!head_pipe) {
ASSERT(0);
+ return NULL;
+ }
if (!idle_pipe)
- return false;
+ return NULL;
idle_pipe->stream = head_pipe->stream;
idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
index c7333cdf1802..fced178c8c79 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
@@ -496,9 +496,6 @@ static bool tgn10_validate_timing(
timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA)
return false;
- if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE &&
- tg->ctx->dc->debug.disable_stereo_support)
- return false;
/* Temporarily blocking interlacing mode until it's supported */
if (timing->flags.INTERLACE == 1)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index 01df85641684..94fc31080fda 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -38,7 +38,7 @@ enum dc_status {
DC_FAIL_DETACH_SURFACES = 8,
DC_FAIL_SURFACE_VALIDATE = 9,
DC_NO_DP_LINK_BANDWIDTH = 10,
- DC_EXCEED_DONGLE_MAX_CLK = 11,
+ DC_EXCEED_DONGLE_CAP = 11,
DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED = 12,
DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
DC_FAIL_SCALING = 14,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index 7c08bc62c1f5..ea88997e1bbd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -259,13 +259,6 @@ struct transform_funcs {
struct transform *xfm_base,
const struct dc_cursor_attributes *attr);
- void (*set_cursor_position)(
- struct transform *xfm_base,
- const struct dc_cursor_position *pos,
- const struct dc_cursor_mi_param *param,
- uint32_t width
- );
-
};
const uint16_t *get_filter_2tap_16p(void);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index a129bc5b1844..c6febbf0bf69 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1486,7 +1486,7 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
if (vddci_id_buf[i] == virtual_voltage_id) {
for (j = 0; j < profile->ucLeakageBinNum; j++) {
if (efuse_voltage_id <= leakage_bin[j]) {
- *vddci = vddci_buf[j * profile->ucElbVDDC_Num + i];
+ *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
break;
}
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index d1af1483c69b..a651ebcf44fd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -830,9 +830,9 @@ static int init_over_drive_limits(
const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
{
hwmgr->platform_descriptor.overdriveLimit.engineClock =
- le16_to_cpu(powerplay_table->ulMaxODEngineClock);
+ le32_to_cpu(powerplay_table->ulMaxODEngineClock);
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
- le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
+ le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
hwmgr->platform_descriptor.minOverdriveVDDC = 0;
hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 4466469cf8ab..e33ec7fc5d09 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3778,7 +3778,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to Unfreeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+ PPSMC_MSG_MCLKDPM_UnfreezeLevel),
"Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 4f79c21f27ed..f8d838c2c8ee 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -753,6 +753,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
uint32_t config_telemetry = 0;
struct pp_atomfwctrl_voltage_table vol_table;
struct cgs_system_info sys_info = {0};
+ uint32_t reg;
data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
if (data == NULL)
@@ -859,6 +860,16 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
advanceFanControlParameters.usFanPWMMinLimit *
hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
+ reg = soc15_get_register_offset(DF_HWID, 0,
+ mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
+ mmDF_CS_AON0_DramBaseAddress0);
+ data->mem_channels = (cgs_read_register(hwmgr->device, reg) &
+ DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
+ DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
+ PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
+ "Mem Channel Index Exceeded maximum!",
+ return -EINVAL);
+
return result;
}
@@ -1777,7 +1788,7 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
struct vega10_single_dpm_table *dpm_table =
&(data->dpm_table.mem_table);
int result = 0;
- uint32_t i, j, reg, mem_channels;
+ uint32_t i, j;
for (i = 0; i < dpm_table->count; i++) {
result = vega10_populate_single_memory_level(hwmgr,
@@ -1801,20 +1812,10 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
i++;
}
- reg = soc15_get_register_offset(DF_HWID, 0,
- mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
- mmDF_CS_AON0_DramBaseAddress0);
- mem_channels = (cgs_read_register(hwmgr->device, reg) &
- DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
- DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
- PP_ASSERT_WITH_CODE(mem_channels < ARRAY_SIZE(channel_number),
- "Mem Channel Index Exceeded maximum!",
- return -1);
-
- pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
+ pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
pp_table->MemoryChannelWidth =
- cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
- channel_number[mem_channels]);
+ (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
+ channel_number[data->mem_channels]);
pp_table->LowestUclkReservedForUlv =
(uint8_t)(data->lowest_uclk_reserved_for_ulv);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index b4b461c3b8ee..8f7358cc3327 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -389,6 +389,7 @@ struct vega10_hwmgr {
uint32_t config_telemetry;
uint32_t smu_version;
uint32_t acg_loop_state;
+ uint32_t mem_channels;
};
#define VEGA10_DPM2_NEAR_TDP_DEC 10