aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
diff options
context:
space:
mode:
authorHai Li <hali@codeaurora.org>2015-06-26 16:03:25 -0400
committerRob Clark <robdclark@gmail.com>2015-08-15 18:27:16 -0400
commitc71716b17bc772e9c38f85a4b496bbfac0dd32f0 (patch)
tree06b551c9d06a7edad0d12841b094e99a20855198 /drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
parentdrm/msm/mdp5: Add plane blending operation support for MDP5 (v2) (diff)
downloadlinux-dev-c71716b17bc772e9c38f85a4b496bbfac0dd32f0.tar.xz
linux-dev-c71716b17bc772e9c38f85a4b496bbfac0dd32f0.zip
drm/msm/mdp5: Allocate CTL for each display interface
In MDP5, CTL contains information of the whole pipeline whose output goes down to a display interface. In various cases, one interface may require 2 CRTCs, but only one CTL. Some interfaces also require to use certain CTLs. Instead of allocating CTL for each active CRTC, this change is to associate a CTL with each interface. Signed-off-by: Hai Li <hali@codeaurora.org> Signed-off-by: Rob Clark <robdclark@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c')
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c53
1 files changed, 20 insertions, 33 deletions
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index 622849bbc346..9cf987bb0b09 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -60,8 +60,6 @@ struct mdp5_ctl {
u32 pending_ctl_trigger;
bool cursor_on;
-
- struct drm_crtc *crtc;
};
struct mdp5_ctl_manager {
@@ -168,11 +166,21 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
+ struct mdp5_interface *intf, int lm)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+ if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) {
+ dev_err(mdp5_kms->dev->dev,
+ "CTL %d is allocated by INTF %d, but used by INTF %d\n",
+ ctl->id, ctl->pipeline.intf.num, intf->num);
+ return -EINVAL;
+ }
+
+ ctl->lm = lm;
+
memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
@@ -335,7 +343,7 @@ static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
}
}
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u8 *stage, u32 stage_cnt,
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
u32 ctl_blend_op_flags)
{
unsigned long flags;
@@ -358,13 +366,13 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u8 *stage, u32 stage_cnt,
if (ctl->cursor_on)
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
- ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
- ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, lm), blend_ext_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
- ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);
- DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", lm,
+ DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
blend_cfg, blend_ext_cfg);
return 0;
@@ -490,38 +498,18 @@ u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
}
-void mdp5_ctl_release(struct mdp5_ctl *ctl)
-{
- struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
- unsigned long flags;
-
- if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
- dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
- ctl->id, ctl->busy);
- return;
- }
-
- spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
- ctl->busy = false;
- spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
-
- DBG("CTL %d released", ctl->id);
-}
-
int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
{
return WARN_ON(!ctl) ? -EINVAL : ctl->id;
}
/*
- * mdp5_ctl_request() - CTL dynamic allocation
- *
- * Note: Current implementation considers that we can only have one CRTC per CTL
+ * mdp5_ctl_request() - CTL allocation
*
* @return first free CTL
*/
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
- struct drm_crtc *crtc)
+ int intf_num)
{
struct mdp5_ctl *ctl = NULL;
unsigned long flags;
@@ -539,9 +527,8 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
}
ctl = &ctl_mgr->ctls[c];
-
- ctl->lm = mdp5_crtc_get_lm(crtc);
- ctl->crtc = crtc;
+ ctl->pipeline.intf.num = intf_num;
+ ctl->lm = -1;
ctl->busy = true;
ctl->pending_ctl_trigger = 0;
DBG("CTL %d allocated", ctl->id);