aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2022-05-11 12:40:47 +1000
committerDave Airlie <airlied@redhat.com>2022-05-11 12:40:47 +1000
commitf83493f7d34da258310ecd3d07f0cc78f884c954 (patch)
tree5b0d034a505dc8a0f42a16fe17407e443afae32d /drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
parentMerge tag 'drm-intel-next-2022-05-06' of git://anongit.freedesktop.org/drm/drm-intel into drm-next (diff)
parentMAINTAINERS: Add Dmitry as MSM DRM driver co-maintainer (diff)
downloadwireguard-linux-f83493f7d34da258310ecd3d07f0cc78f884c954.tar.xz
wireguard-linux-f83493f7d34da258310ecd3d07f0cc78f884c954.zip
Merge tag 'drm-msm-next-2022-05-09' of https://gitlab.freedesktop.org/drm/msm into drm-next
- Fourcc modifier for tiled but not compressed layouts - Support for userspace allocated IOVA (GPU virtual address) - Devfreq clamp_to_idle fix - DPU: DSC (Display Stream Compression) support - DPU: inline rotation support on SC7280 - DPU: update DP timings to follow vendor recommendations - DP, DPU: add support for wide bus (on newer chipsets) - DP: eDP support - Merge DPU1 and MDP5 MDSS driver, make dpu/mdp device the master component - MDSS: optionally reset the IP block at the bootup to drop bootloader state - Properly register and unregister internal bridges in the DRM framework - Complete DPU IRQ cleanup - DP: conversion to use drm_bridge and drm_bridge_connector - eDP: drop old eDP parts again - DPU: writeback support - Misc small fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rob Clark <robdclark@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvJCr_1D8d0dgmyQC5HD4gmXeZw=bFV_CNCfceZbpMxRw@mail.gmail.com
Diffstat (limited to 'drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c')
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c161
1 files changed, 62 insertions, 99 deletions
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index c61b5b283f08..61284e6c313d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -151,25 +151,22 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
*/
static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
{
- struct dpu_irq_callback *cb;
-
VERB("irq_idx=%d\n", irq_idx);
- if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]))
+ if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
- atomic_inc(&dpu_kms->hw_intr->irq_counts[irq_idx]);
+ atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
/*
* Perform registered function callback
*/
- list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[irq_idx], list)
- if (cb->func)
- cb->func(cb->arg, irq_idx);
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
}
-irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
+irqreturn_t dpu_core_irq(struct msm_kms *kms)
{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
int irq_idx;
@@ -362,7 +359,7 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
wmb();
}
-u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
{
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
@@ -389,7 +386,7 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
intr_status = DPU_REG_READ(&intr->hw,
dpu_intr_set[reg_idx].status_off) &
DPU_IRQ_MASK(irq_idx);
- if (intr_status && clear)
+ if (intr_status)
DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
intr_status);
@@ -413,24 +410,18 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
struct dpu_mdss_cfg *m)
{
struct dpu_hw_intr *intr;
+ int nirq = MDP_INTR_MAX * 32;
if (!addr || !m)
return ERR_PTR(-EINVAL);
- intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
if (!intr)
return ERR_PTR(-ENOMEM);
__intr_offset(m, addr, &intr->hw);
- intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;
-
- intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
- GFP_KERNEL);
- if (intr->cache_irq_mask == NULL) {
- kfree(intr);
- return ERR_PTR(-ENOMEM);
- }
+ intr->total_irqs = nirq;
intr->irq_mask = m->mdss_irqs;
@@ -441,31 +432,18 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
{
- if (intr) {
- kfree(intr->cache_irq_mask);
-
- kfree(intr->irq_cb_tbl);
- kfree(intr->irq_counts);
-
- kfree(intr);
- }
+ kfree(intr);
}
int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
- struct dpu_irq_callback *register_irq_cb)
+ void (*irq_cb)(void *arg, int irq_idx),
+ void *irq_arg)
{
unsigned long irq_flags;
+ int ret;
- if (!dpu_kms->hw_intr->irq_cb_tbl) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- if (!register_irq_cb || !register_irq_cb->func) {
- DPU_ERROR("invalid irq_cb:%d func:%d\n",
- register_irq_cb != NULL,
- register_irq_cb ?
- register_irq_cb->func != NULL : -1);
+ if (!irq_cb) {
+ DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
return -EINVAL;
}
@@ -477,41 +455,34 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
- trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
- list_del_init(&register_irq_cb->list);
- list_add_tail(&register_irq_cb->list,
- &dpu_kms->hw_intr->irq_cb_tbl[irq_idx]);
- if (list_is_first(&register_irq_cb->list,
- &dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
- int ret = dpu_hw_intr_enable_irq_locked(
+
+ if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ return -EBUSY;
+ }
+
+ trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
+ dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
+
+ ret = dpu_hw_intr_enable_irq_locked(
dpu_kms->hw_intr,
irq_idx);
- if (ret)
- DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ if (ret)
+ DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
irq_idx);
- }
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ trace_dpu_irq_register_success(irq_idx);
+
return 0;
}
-int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
- struct dpu_irq_callback *register_irq_cb)
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
{
unsigned long irq_flags;
-
- if (!dpu_kms->hw_intr->irq_cb_tbl) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- if (!register_irq_cb || !register_irq_cb->func) {
- DPU_ERROR("invalid irq_cb:%d func:%d\n",
- register_irq_cb != NULL,
- register_irq_cb ?
- register_irq_cb->func != NULL : -1);
- return -EINVAL;
- }
+ int ret;
if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
@@ -521,20 +492,20 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
- trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
- list_del_init(&register_irq_cb->list);
- /* empty callback list but interrupt is still enabled */
- if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
- int ret = dpu_hw_intr_disable_irq_locked(
- dpu_kms->hw_intr,
- irq_idx);
- if (ret)
- DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
- irq_idx);
- VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
- }
+ trace_dpu_core_irq_unregister_callback(irq_idx);
+
+ ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
+ irq_idx, ret);
+
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
+ dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
+
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ trace_dpu_irq_unregister_success(irq_idx);
+
return 0;
}
@@ -542,24 +513,18 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
{
struct dpu_kms *dpu_kms = s->private;
- struct dpu_irq_callback *cb;
unsigned long irq_flags;
- int i, irq_count, cb_count;
-
- if (WARN_ON(!dpu_kms->hw_intr->irq_cb_tbl))
- return 0;
+ int i, irq_count;
+ void *cb;
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
- cb_count = 0;
- irq_count = atomic_read(&dpu_kms->hw_intr->irq_counts[i]);
- list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[i], list)
- cb_count++;
+ irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
+ cb = dpu_kms->hw_intr->irq_tbl[i].cb;
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
- if (irq_count || cb_count)
- seq_printf(s, "idx:%d irq:%d cb:%d\n",
- i, irq_count, cb_count);
+ if (irq_count || cb)
+ seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
}
return 0;
@@ -575,8 +540,9 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
}
#endif
-void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
+void dpu_core_irq_preinstall(struct msm_kms *kms)
{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
int i;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
@@ -584,24 +550,21 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
- /* Create irq callbacks for all possible irq_idx */
- dpu_kms->hw_intr->irq_cb_tbl = kcalloc(dpu_kms->hw_intr->total_irqs,
- sizeof(struct list_head), GFP_KERNEL);
- dpu_kms->hw_intr->irq_counts = kcalloc(dpu_kms->hw_intr->total_irqs,
- sizeof(atomic_t), GFP_KERNEL);
- for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
- INIT_LIST_HEAD(&dpu_kms->hw_intr->irq_cb_tbl[i]);
- atomic_set(&dpu_kms->hw_intr->irq_counts[i], 0);
- }
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
+ atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
}
-void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
+void dpu_core_irq_uninstall(struct msm_kms *kms)
{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
int i;
+ if (!dpu_kms->hw_intr)
+ return;
+
pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
- if (!list_empty(&dpu_kms->hw_intr->irq_cb_tbl[i]))
+ if (dpu_kms->hw_intr->irq_tbl[i].cb)
DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
dpu_clear_irqs(dpu_kms);