aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_display.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c143
1 files changed, 54 insertions, 89 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 76f96028313d..083e2b429872 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -41,7 +41,7 @@ static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
container_of(cb, struct amdgpu_flip_work, cb);
fence_put(f);
- schedule_work(&work->flip_work);
+ schedule_work(&work->flip_work.work);
}
static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
@@ -63,16 +63,17 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
static void amdgpu_flip_work_func(struct work_struct *__work)
{
+ struct delayed_work *delayed_work =
+ container_of(__work, struct delayed_work, work);
struct amdgpu_flip_work *work =
- container_of(__work, struct amdgpu_flip_work, flip_work);
+ container_of(delayed_work, struct amdgpu_flip_work, flip_work);
struct amdgpu_device *adev = work->adev;
struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &amdgpuCrtc->base;
unsigned long flags;
- unsigned i, repcnt = 4;
- int vpos, hpos, stat, min_udelay = 0;
- struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
+ unsigned i;
+ int vpos, hpos;
if (amdgpu_flip_handle_fence(work, &work->excl))
return;
@@ -81,55 +82,23 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
if (amdgpu_flip_handle_fence(work, &work->shared[i]))
return;
- /* We borrow the event spin lock for protecting flip_status */
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
-
- /* If this happens to execute within the "virtually extended" vblank
- * interval before the start of the real vblank interval then it needs
- * to delay programming the mmio flip until the real vblank is entered.
- * This prevents completing a flip too early due to the way we fudge
- * our vblank counter and vblank timestamps in order to work around the
- * problem that the hw fires vblank interrupts before actual start of
- * vblank (when line buffer refilling is done for a frame). It
- * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
- * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
- *
- * In practice this won't execute very often unless on very fast
- * machines because the time window for this to happen is very small.
+ /* Wait until we're out of the vertical blank period before the one
+ * targeted by the flip
*/
- while (amdgpuCrtc->enabled && --repcnt) {
- /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
- * start in hpos, and to the "fudged earlier" vblank start in
- * vpos.
- */
- stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id,
- GET_DISTANCE_TO_VBLANKSTART,
- &vpos, &hpos, NULL, NULL,
- &crtc->hwmode);
-
- if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
- (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
- !(vpos >= 0 && hpos <= 0))
- break;
-
- /* Sleep at least until estimated real start of hw vblank */
- min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
- if (min_udelay > vblank->framedur_ns / 2000) {
- /* Don't wait ridiculously long - something is wrong */
- repcnt = 0;
- break;
- }
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- usleep_range(min_udelay, 2 * min_udelay);
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (amdgpuCrtc->enabled &&
+ (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
+ &vpos, &hpos, NULL, NULL,
+ &crtc->hwmode)
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+ (int)(work->target_vblank -
+ amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
+ schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
+ return;
}
- if (!repcnt)
- DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
- "framedur %d, linedur %d, stat %d, vpos %d, "
- "hpos %d\n", work->crtc_id, min_udelay,
- vblank->framedur_ns / 1000,
- vblank->linedur_ns / 1000, stat, vpos, hpos);
+ /* We borrow the event spin lock for protecting flip_status */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
/* Do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
@@ -154,25 +123,25 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
int r;
/* unpin of the old buffer */
- r = amdgpu_bo_reserve(work->old_rbo, false);
+ r = amdgpu_bo_reserve(work->old_abo, false);
if (likely(r == 0)) {
- r = amdgpu_bo_unpin(work->old_rbo);
+ r = amdgpu_bo_unpin(work->old_abo);
if (unlikely(r != 0)) {
DRM_ERROR("failed to unpin buffer after flip\n");
}
- amdgpu_bo_unreserve(work->old_rbo);
+ amdgpu_bo_unreserve(work->old_abo);
} else
DRM_ERROR("failed to reserve buffer after flip\n");
- amdgpu_bo_unref(&work->old_rbo);
+ amdgpu_bo_unref(&work->old_abo);
kfree(work->shared);
kfree(work);
}
-int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags, uint32_t target)
{
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
@@ -181,7 +150,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
struct amdgpu_framebuffer *new_amdgpu_fb;
struct drm_gem_object *obj;
struct amdgpu_flip_work *work;
- struct amdgpu_bo *new_rbo;
+ struct amdgpu_bo *new_abo;
unsigned long flags;
u64 tiling_flags;
u64 base;
@@ -191,7 +160,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
if (work == NULL)
return -ENOMEM;
- INIT_WORK(&work->flip_work, amdgpu_flip_work_func);
+ INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func);
INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
work->event = event;
@@ -204,28 +173,28 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
obj = old_amdgpu_fb->obj;
/* take a reference to the old object */
- work->old_rbo = gem_to_amdgpu_bo(obj);
- amdgpu_bo_ref(work->old_rbo);
+ work->old_abo = gem_to_amdgpu_bo(obj);
+ amdgpu_bo_ref(work->old_abo);
new_amdgpu_fb = to_amdgpu_framebuffer(fb);
obj = new_amdgpu_fb->obj;
- new_rbo = gem_to_amdgpu_bo(obj);
+ new_abo = gem_to_amdgpu_bo(obj);
/* pin the new buffer */
- r = amdgpu_bo_reserve(new_rbo, false);
+ r = amdgpu_bo_reserve(new_abo, false);
if (unlikely(r != 0)) {
- DRM_ERROR("failed to reserve new rbo buffer before flip\n");
+ DRM_ERROR("failed to reserve new abo buffer before flip\n");
goto cleanup;
}
- r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
+ r = amdgpu_bo_pin_restricted(new_abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
if (unlikely(r != 0)) {
r = -EINVAL;
- DRM_ERROR("failed to pin new rbo buffer before flip\n");
+ DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve;
}
- r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
+ r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
@@ -233,16 +202,12 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
goto unpin;
}
- amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
- amdgpu_bo_unreserve(new_rbo);
+ amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
+ amdgpu_bo_unreserve(new_abo);
work->base = base;
-
- r = drm_crtc_vblank_get(crtc);
- if (r) {
- DRM_ERROR("failed to get vblank before flip\n");
- goto pflip_cleanup;
- }
+ work->target_vblank = target - drm_crtc_vblank_count(crtc) +
+ amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
/* we borrow the event spin lock for protecting flip_wrok */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -250,7 +215,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
r = -EBUSY;
- goto vblank_cleanup;
+ goto pflip_cleanup;
}
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
@@ -262,26 +227,23 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
/* update crtc fb */
crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- amdgpu_flip_work_func(&work->flip_work);
+ amdgpu_flip_work_func(&work->flip_work.work);
return 0;
-vblank_cleanup:
- drm_crtc_vblank_put(crtc);
-
pflip_cleanup:
- if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
- DRM_ERROR("failed to reserve new rbo in error path\n");
+ if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
+ DRM_ERROR("failed to reserve new abo in error path\n");
goto cleanup;
}
unpin:
- if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
- DRM_ERROR("failed to unpin new rbo in error path\n");
+ if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
+ DRM_ERROR("failed to unpin new abo in error path\n");
}
unreserve:
- amdgpu_bo_unreserve(new_rbo);
+ amdgpu_bo_unreserve(new_abo);
cleanup:
- amdgpu_bo_unref(&work->old_rbo);
+ amdgpu_bo_unref(&work->old_abo);
fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
fence_put(work->shared[i]);
@@ -335,7 +297,7 @@ int amdgpu_crtc_set_config(struct drm_mode_set *set)
return ret;
}
-static const char *encoder_names[38] = {
+static const char *encoder_names[41] = {
"NONE",
"INTERNAL_LVDS",
"INTERNAL_TMDS1",
@@ -374,6 +336,9 @@ static const char *encoder_names[38] = {
"TRAVIS",
"INTERNAL_VCE",
"INTERNAL_UNIPHY3",
+ "HDMI_ANX9805",
+ "INTERNAL_AMCLK",
+ "VIRTUAL",
};
static const char *hpd_names[6] = {