aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu/drm/vc4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vc4')
-rw-r--r--drivers/gpu/drm/vc4/Kconfig2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c79
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c149
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c72
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c131
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c29
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h65
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c131
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c1000
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h15
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c145
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c71
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c40
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c63
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c73
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c28
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c273
22 files changed, 1543 insertions, 853 deletions
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index b0f3117102ca..246305d17a52 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -12,7 +12,7 @@ config DRM_VC4
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL_BRIDGE
select SND_PCM
select SND_PCM_ELD
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 0846d56f74f2..231add8b8e12 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -8,10 +8,10 @@
*
* The VC4 GPU architecture (both scanout and rendering) has direct
* access to system memory with no MMU in between. To support it, we
- * use the GEM CMA helper functions to allocate contiguous ranges of
+ * use the GEM DMA helper functions to allocate contiguous ranges of
* physical memory for our BOs.
*
- * Since the CMA allocator is very slow, we keep a cache of recently
+ * Since the DMA allocator is very slow, we keep a cache of recently
* freed BOs around so that the kernel's allocation of objects for 3D
* rendering can return quickly.
*/
@@ -179,7 +179,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
bo->validated_shader = NULL;
}
- drm_gem_cma_free(&bo->base);
+ drm_gem_dma_free(&bo->base);
}
static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
@@ -303,7 +303,7 @@ static void vc4_bo_purge(struct drm_gem_object *obj)
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
- dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
+ dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.dma_addr);
bo->base.vaddr = NULL;
bo->madv = __VC4_MADV_PURGED;
}
@@ -387,13 +387,14 @@ out:
* @dev: DRM device
* @size: Size in bytes of the memory the object will reference
*
- * This lets the CMA helpers allocate object structs for us, and keep
+ * This lets the DMA helpers allocate object structs for us, and keep
* our BO stats correct.
*/
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo;
+ int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return ERR_PTR(-ENODEV);
@@ -404,7 +405,11 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
bo->madv = VC4_MADV_WILLNEED;
refcount_set(&bo->usecnt, 0);
- mutex_init(&bo->madv_lock);
+
+ ret = drmm_mutex_init(dev, &bo->madv_lock);
+ if (ret)
+ return ERR_PTR(ret);
+
mutex_lock(&vc4->bo_lock);
bo->label = VC4_BO_TYPE_KERNEL;
vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
@@ -421,7 +426,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
{
size_t size = roundup(unaligned_size, PAGE_SIZE);
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
@@ -438,39 +443,39 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
return bo;
}
- cma_obj = drm_gem_cma_create(dev, size);
- if (IS_ERR(cma_obj)) {
+ dma_obj = drm_gem_dma_create(dev, size);
+ if (IS_ERR(dma_obj)) {
/*
- * If we've run out of CMA memory, kill the cache of
- * CMA allocations we've got laying around and try again.
+ * If we've run out of DMA memory, kill the cache of
+ * DMA allocations we've got laying around and try again.
*/
vc4_bo_cache_purge(dev);
- cma_obj = drm_gem_cma_create(dev, size);
+ dma_obj = drm_gem_dma_create(dev, size);
}
- if (IS_ERR(cma_obj)) {
+ if (IS_ERR(dma_obj)) {
/*
- * Still not enough CMA memory, purge the userspace BO
+ * Still not enough DMA memory, purge the userspace BO
* cache and retry.
* This is sub-optimal since we purge the whole userspace
* BO cache which forces user that want to re-use the BO to
* restore its initial content.
* Ideally, we should purge entries one by one and retry
- * after each to see if CMA allocation succeeds. Or even
+ * after each to see if DMA allocation succeeds. Or even
* better, try to find an entry with at least the same
* size.
*/
vc4_bo_userspace_cache_purge(dev);
- cma_obj = drm_gem_cma_create(dev, size);
+ dma_obj = drm_gem_dma_create(dev, size);
}
- if (IS_ERR(cma_obj)) {
+ if (IS_ERR(dma_obj)) {
struct drm_printer p = drm_info_printer(vc4->base.dev);
- DRM_ERROR("Failed to allocate from CMA:\n");
+ DRM_ERROR("Failed to allocate from GEM DMA helper:\n");
vc4_bo_stats_print(&p, vc4);
return ERR_PTR(-ENOMEM);
}
- bo = to_vc4_bo(&cma_obj->base);
+ bo = to_vc4_bo(&dma_obj->base);
/* By default, BOs do not support the MADV ioctl. This will be enabled
* only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
@@ -479,7 +484,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
bo->madv = __VC4_MADV_NOTSUPP;
mutex_lock(&vc4->bo_lock);
- vc4_bo_set_label(&cma_obj->base, type);
+ vc4_bo_set_label(&dma_obj->base, type);
mutex_unlock(&vc4->bo_lock);
return bo;
@@ -564,7 +569,7 @@ static void vc4_free_object(struct drm_gem_object *gem_bo)
goto out;
}
- /* If this object was partially constructed but CMA allocation
+ /* If this object was partially constructed but DMA allocation
* had failed, just free it. Can also happen when the BO has been
* purged.
*/
@@ -742,7 +747,7 @@ static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
return -EINVAL;
}
- return drm_gem_cma_mmap(&bo->base, vma);
+ return drm_gem_dma_mmap(&bo->base, vma);
}
static const struct vm_operations_struct vc4_vm_ops = {
@@ -754,8 +759,8 @@ static const struct vm_operations_struct vc4_vm_ops = {
static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
.free = vc4_free_object,
.export = vc4_prime_export,
- .get_sg_table = drm_gem_cma_object_get_sg_table,
- .vmap = drm_gem_cma_object_vmap,
+ .get_sg_table = drm_gem_dma_object_get_sg_table,
+ .vmap = drm_gem_dma_object_vmap,
.mmap = vc4_gem_object_mmap,
.vm_ops = &vc4_vm_ops,
};
@@ -984,10 +989,28 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
return 0;
}
+int vc4_bo_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *drm = minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ int ret;
+
+ if (!vc4->v3d)
+ return -ENODEV;
+
+ ret = vc4_debugfs_add_file(minor, "bo_stats",
+ vc4_bo_stats_debugfs, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
int vc4_bo_cache_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int ret;
int i;
if (WARN_ON_ONCE(vc4->is_vc5))
@@ -1007,9 +1030,11 @@ int vc4_bo_cache_init(struct drm_device *dev)
for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
vc4->bo_labels[i].name = bo_type_names[i];
- mutex_init(&vc4->bo_lock);
-
- vc4_debugfs_add_file(dev, "bo_stats", vc4_bo_stats_debugfs, NULL);
+ ret = drmm_mutex_init(dev, &vc4->bo_lock);
+ if (ret) {
+ kfree(vc4->bo_labels);
+ return ret;
+ }
INIT_LIST_HEAD(&vc4->bo_cache.time_list);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 029be98660b3..0108613e79d5 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -37,8 +37,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -206,11 +207,6 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
return ret;
}
-void vc4_crtc_destroy(struct drm_crtc *crtc)
-{
- drm_crtc_cleanup(crtc);
-}
-
static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
{
const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
@@ -300,10 +296,17 @@ struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc,
static void vc4_crtc_pixelvalve_reset(struct drm_crtc *crtc)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
/* The PV needs to be disabled before it can be flushed */
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) & ~PV_CONTROL_EN);
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_FIFO_CLR);
+
+ drm_dev_exit(idx);
}
static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encoder,
@@ -326,6 +329,10 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
u32 format = is_dsi1 ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
u8 ppc = pv_data->pixels_per_clock;
bool debug_dump_regs = false;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
if (debug_dump_regs) {
struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
@@ -415,6 +422,8 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
drm_crtc_index(crtc));
drm_print_regset32(&p, &vc4_crtc->regset);
}
+
+ drm_dev_exit(idx);
}
static void require_hvs_enabled(struct drm_device *dev)
@@ -435,7 +444,10 @@ static int vc4_crtc_disable(struct drm_crtc *crtc,
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int ret;
+ int idx, ret;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
CRTC_WRITE(PV_V_CONTROL,
CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN);
@@ -469,6 +481,8 @@ static int vc4_crtc_disable(struct drm_crtc *crtc,
if (vc4_encoder && vc4_encoder->post_crtc_powerdown)
vc4_encoder->post_crtc_powerdown(encoder, state);
+ drm_dev_exit(idx);
+
return 0;
}
@@ -544,6 +558,20 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
return 0;
}
+void vc4_crtc_send_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ if (!crtc->state || !crtc->state->event)
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -567,14 +595,7 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
* Make sure we issue a vblank event after disabling the CRTC if
* someone was waiting it.
*/
- if (crtc->state->event) {
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
+ vc4_crtc_send_vblank(crtc);
}
static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -586,10 +607,14 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, new_state);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+ int idx;
drm_dbg(dev, "Enabling CRTC %s (%u) connected to Encoder %s (%u)",
crtc->name, crtc->base.id, encoder->name, encoder->base.id);
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
require_hvs_enabled(dev);
/* Enable vblank irq handling before crtc is started otherwise
@@ -617,6 +642,8 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
if (vc4_encoder->post_crtc_enable)
vc4_encoder->post_crtc_enable(encoder, state);
+
+ drm_dev_exit(idx);
}
static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
@@ -709,17 +736,31 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
static int vc4_enable_vblank(struct drm_crtc *crtc)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
CRTC_WRITE(PV_INTEN, PV_INT_VFP_START);
+ drm_dev_exit(idx);
+
return 0;
}
static void vc4_disable_vblank(struct drm_crtc *crtc)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
CRTC_WRITE(PV_INTEN, 0);
+
+ drm_dev_exit(idx);
}
static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
@@ -821,9 +862,9 @@ static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
struct vc4_bo *bo = NULL;
if (flip_state->old_fb) {
- struct drm_gem_cma_object *cma_bo =
- drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
- bo = to_vc4_bo(&cma_bo->base);
+ struct drm_gem_dma_object *dma_bo =
+ drm_fb_dma_get_gem_obj(flip_state->old_fb, 0);
+ bo = to_vc4_bo(&dma_bo->base);
}
vc4_async_page_flip_complete(flip_state);
@@ -855,19 +896,19 @@ static int vc4_async_set_fence_cb(struct drm_device *dev,
struct vc4_async_flip_state *flip_state)
{
struct drm_framebuffer *fb = flip_state->fb;
- struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct dma_fence *fence;
int ret;
if (!vc4->is_vc5) {
- struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
vc4_async_page_flip_seqno_complete);
}
- ret = dma_resv_get_singleton(cma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
+ ret = dma_resv_get_singleton(dma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
if (ret)
return ret;
@@ -943,8 +984,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
- struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
+ struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
@@ -1050,9 +1091,23 @@ void vc4_crtc_reset(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_reset(crtc, &vc4_crtc_state->base);
}
+int vc4_crtc_late_register(struct drm_crtc *crtc)
+{
+ struct drm_device *drm = crtc->dev;
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
+ int ret;
+
+ ret = vc4_debugfs_add_regset32(drm->primary, crtc_data->debugfs_name,
+ &vc4_crtc->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static const struct drm_crtc_funcs vc4_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = vc4_crtc_destroy,
.page_flip = vc4_page_flip,
.set_property = NULL,
.cursor_set = NULL, /* handled by drm_mode_cursor_universal */
@@ -1063,6 +1118,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+ .late_register = vc4_crtc_late_register,
};
static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
@@ -1077,10 +1133,10 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
static const struct vc4_pv_data bcm2835_pv0_data = {
.base = {
+ .debugfs_name = "crtc0_regs",
.hvs_available_channels = BIT(0),
.hvs_output = 0,
},
- .debugfs_name = "crtc0_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1091,10 +1147,10 @@ static const struct vc4_pv_data bcm2835_pv0_data = {
static const struct vc4_pv_data bcm2835_pv1_data = {
.base = {
+ .debugfs_name = "crtc1_regs",
.hvs_available_channels = BIT(2),
.hvs_output = 2,
},
- .debugfs_name = "crtc1_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1105,10 +1161,10 @@ static const struct vc4_pv_data bcm2835_pv1_data = {
static const struct vc4_pv_data bcm2835_pv2_data = {
.base = {
+ .debugfs_name = "crtc2_regs",
.hvs_available_channels = BIT(1),
.hvs_output = 1,
},
- .debugfs_name = "crtc2_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1119,10 +1175,10 @@ static const struct vc4_pv_data bcm2835_pv2_data = {
static const struct vc4_pv_data bcm2711_pv0_data = {
.base = {
+ .debugfs_name = "crtc0_regs",
.hvs_available_channels = BIT(0),
.hvs_output = 0,
},
- .debugfs_name = "crtc0_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1133,10 +1189,10 @@ static const struct vc4_pv_data bcm2711_pv0_data = {
static const struct vc4_pv_data bcm2711_pv1_data = {
.base = {
+ .debugfs_name = "crtc1_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 3,
},
- .debugfs_name = "crtc1_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1147,10 +1203,10 @@ static const struct vc4_pv_data bcm2711_pv1_data = {
static const struct vc4_pv_data bcm2711_pv2_data = {
.base = {
+ .debugfs_name = "crtc2_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 4,
},
- .debugfs_name = "crtc2_regs",
.fifo_depth = 256,
.pixels_per_clock = 2,
.encoder_types = {
@@ -1160,10 +1216,10 @@ static const struct vc4_pv_data bcm2711_pv2_data = {
static const struct vc4_pv_data bcm2711_pv3_data = {
.base = {
+ .debugfs_name = "crtc3_regs",
.hvs_available_channels = BIT(1),
.hvs_output = 1,
},
- .debugfs_name = "crtc3_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1173,10 +1229,10 @@ static const struct vc4_pv_data bcm2711_pv3_data = {
static const struct vc4_pv_data bcm2711_pv4_data = {
.base = {
+ .debugfs_name = "crtc4_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 5,
},
- .debugfs_name = "crtc4_regs",
.fifo_depth = 64,
.pixels_per_clock = 2,
.encoder_types = {
@@ -1230,6 +1286,7 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
struct drm_crtc *crtc = &vc4_crtc->base;
struct drm_plane *primary_plane;
unsigned int i;
+ int ret;
/* For now, we create just the primary and the legacy cursor
* planes. We should be able to stack more planes on easily,
@@ -1237,15 +1294,18 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
* requirement of the plane configuration, and reject ones
* that will take too much.
*/
- primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY);
+ primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY, 0);
if (IS_ERR(primary_plane)) {
dev_err(drm->dev, "failed to construct primary plane\n");
return PTR_ERR(primary_plane);
}
spin_lock_init(&vc4_crtc->irq_lock);
- drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
- crtc_funcs, NULL);
+ ret = drmm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
+ crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
drm_crtc_helper_add(crtc, crtc_helper_funcs);
if (!vc4->is_vc5) {
@@ -1275,10 +1335,9 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
const struct vc4_pv_data *pv_data;
struct vc4_crtc *vc4_crtc;
struct drm_crtc *crtc;
- struct drm_plane *destroy_plane, *temp;
int ret;
- vc4_crtc = devm_kzalloc(dev, sizeof(*vc4_crtc), GFP_KERNEL);
+ vc4_crtc = drmm_kzalloc(drm, sizeof(*vc4_crtc), GFP_KERNEL);
if (!vc4_crtc)
return -ENOMEM;
crtc = &vc4_crtc->base;
@@ -1310,23 +1369,11 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
IRQF_SHARED,
"vc4 crtc", vc4_crtc);
if (ret)
- goto err_destroy_planes;
+ return ret;
platform_set_drvdata(pdev, vc4_crtc);
- vc4_debugfs_add_regset32(drm, pv_data->debugfs_name,
- &vc4_crtc->regset);
-
return 0;
-
-err_destroy_planes:
- list_for_each_entry_safe(destroy_plane, temp,
- &drm->mode_config.plane_list, head) {
- if (destroy_plane->possible_crtcs == drm_crtc_mask(crtc))
- destroy_plane->funcs->destroy(destroy_plane);
- }
-
- return ret;
}
static void vc4_crtc_unbind(struct device *dev, struct device *master,
@@ -1335,8 +1382,6 @@ static void vc4_crtc_unbind(struct device *dev, struct device *master,
struct platform_device *pdev = to_platform_device(dev);
struct vc4_crtc *vc4_crtc = dev_get_drvdata(dev);
- vc4_crtc_destroy(&vc4_crtc->base);
-
CRTC_WRITE(PV_INTEN, 0);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index ba2d8ea562af..19cda4f91a82 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -3,6 +3,8 @@
* Copyright © 2014 Broadcom
*/
+#include <drm/drm_drv.h>
+
#include <linux/seq_file.h>
#include <linux/circ_buf.h>
#include <linux/ctype.h>
@@ -12,11 +14,6 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
-struct vc4_debugfs_info_entry {
- struct list_head link;
- struct drm_info_list info;
-};
-
/*
* Called at drm_dev_register() time on each of the minors registered
* by the DRM device, to attach the debugfs files.
@@ -25,62 +22,59 @@ void
vc4_debugfs_init(struct drm_minor *minor)
{
struct vc4_dev *vc4 = to_vc4_dev(minor->dev);
- struct vc4_debugfs_info_entry *entry;
+ struct drm_device *drm = &vc4->base;
- if (!of_device_is_compatible(vc4->hvs->pdev->dev.of_node,
- "brcm,bcm2711-vc5"))
- debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
- minor->debugfs_root, &vc4->load_tracker_enabled);
+ drm_WARN_ON(drm, vc4_hvs_debugfs_init(minor));
- list_for_each_entry(entry, &vc4->debugfs_list, link) {
- drm_debugfs_create_files(&entry->info, 1,
- minor->debugfs_root, minor);
+ if (vc4->v3d) {
+ drm_WARN_ON(drm, vc4_bo_debugfs_init(minor));
+ drm_WARN_ON(drm, vc4_v3d_debugfs_init(minor));
}
}
static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *drm = node->minor->dev;
struct debugfs_regset32 *regset = node->info_ent->data;
struct drm_printer p = drm_seq_file_printer(m);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return -ENODEV;
drm_print_regset32(&p, regset);
+ drm_dev_exit(idx);
+
return 0;
}
-/*
- * Registers a debugfs file with a callback function for a vc4 component.
- *
- * This is like drm_debugfs_create_files(), but that can only be
- * called a given DRM minor, while the various VC4 components want to
- * register their debugfs files during the component bind process. We
- * track the request and delay it to be called on each minor during
- * vc4_debugfs_init().
- */
-void vc4_debugfs_add_file(struct drm_device *dev,
- const char *name,
- int (*show)(struct seq_file*, void*),
- void *data)
+int vc4_debugfs_add_file(struct drm_minor *minor,
+ const char *name,
+ int (*show)(struct seq_file*, void*),
+ void *data)
{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_device *dev = minor->dev;
+ struct dentry *root = minor->debugfs_root;
+ struct drm_info_list *file;
- struct vc4_debugfs_info_entry *entry =
- devm_kzalloc(dev->dev, sizeof(*entry), GFP_KERNEL);
+ file = drmm_kzalloc(dev, sizeof(*file), GFP_KERNEL);
+ if (!file)
+ return -ENOMEM;
- if (!entry)
- return;
+ file->name = name;
+ file->show = show;
+ file->data = data;
- entry->info.name = name;
- entry->info.show = show;
- entry->info.data = data;
+ drm_debugfs_create_files(file, 1, root, minor);
- list_add(&entry->link, &vc4->debugfs_list);
+ return 0;
}
-void vc4_debugfs_add_regset32(struct drm_device *drm,
- const char *name,
- struct debugfs_regset32 *regset)
+int vc4_debugfs_add_regset32(struct drm_minor *minor,
+ const char *name,
+ struct debugfs_regset32 *regset)
{
- vc4_debugfs_add_file(drm, name, vc4_debugfs_regset32, regset);
+ return vc4_debugfs_add_file(minor, name, vc4_debugfs_regset32, regset);
}
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index ef5e3921062c..1f8f44b7b5a5 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -84,9 +85,9 @@
/* General DPI hardware state. */
struct vc4_dpi {
- struct platform_device *pdev;
+ struct vc4_encoder encoder;
- struct drm_encoder *encoder;
+ struct platform_device *pdev;
void __iomem *regs;
@@ -96,21 +97,15 @@ struct vc4_dpi {
struct debugfs_regset32 regset;
};
-#define DPI_READ(offset) readl(dpi->regs + (offset))
-#define DPI_WRITE(offset, val) writel(val, dpi->regs + (offset))
-
-/* VC4 DPI encoder KMS struct */
-struct vc4_dpi_encoder {
- struct vc4_encoder base;
- struct vc4_dpi *dpi;
-};
-
-static inline struct vc4_dpi_encoder *
-to_vc4_dpi_encoder(struct drm_encoder *encoder)
+static inline struct vc4_dpi *
+to_vc4_dpi(struct drm_encoder *encoder)
{
- return container_of(encoder, struct vc4_dpi_encoder, base.base);
+ return container_of(encoder, struct vc4_dpi, encoder.base);
}
+#define DPI_READ(offset) readl(dpi->regs + (offset))
+#define DPI_WRITE(offset, val) writel(val, dpi->regs + (offset))
+
static const struct debugfs_reg32 dpi_regs[] = {
VC4_REG32(DPI_C),
VC4_REG32(DPI_ID),
@@ -118,21 +113,27 @@ static const struct debugfs_reg32 dpi_regs[] = {
static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
{
- struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
- struct vc4_dpi *dpi = vc4_encoder->dpi;
+ struct drm_device *dev = encoder->dev;
+ struct vc4_dpi *dpi = to_vc4_dpi(encoder);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
clk_disable_unprepare(dpi->pixel_clock);
+
+ drm_dev_exit(idx);
}
static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_display_mode *mode = &encoder->crtc->mode;
- struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
- struct vc4_dpi *dpi = vc4_encoder->dpi;
+ struct vc4_dpi *dpi = to_vc4_dpi(encoder);
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector = NULL, *connector_scan;
u32 dpi_c = DPI_ENABLE;
+ int idx;
int ret;
/* Look up the connector attached to DPI so we can get the
@@ -212,6 +213,9 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
dpi_c |= DPI_VSYNC_DISABLE;
}
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
DPI_WRITE(DPI_C, dpi_c);
ret = clk_set_rate(dpi->pixel_clock, mode->clock * 1000);
@@ -221,6 +225,8 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
ret = clk_prepare_enable(dpi->pixel_clock);
if (ret)
DRM_ERROR("Failed to set clock rate: %d\n", ret);
+
+ drm_dev_exit(idx);
}
static enum drm_mode_status vc4_dpi_encoder_mode_valid(struct drm_encoder *encoder,
@@ -238,6 +244,23 @@ static const struct drm_encoder_helper_funcs vc4_dpi_encoder_helper_funcs = {
.mode_valid = vc4_dpi_encoder_mode_valid,
};
+static int vc4_dpi_late_register(struct drm_encoder *encoder)
+{
+ struct drm_device *drm = encoder->dev;
+ struct vc4_dpi *dpi = to_vc4_dpi(encoder);
+ int ret;
+
+ ret = vc4_debugfs_add_regset32(drm->primary, "dpi_regs", &dpi->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
+ .late_register = vc4_dpi_late_register,
+};
+
static const struct of_device_id vc4_dpi_dt_match[] = {
{ .compatible = "brcm,bcm2835-dpi", .data = NULL },
{}
@@ -248,10 +271,11 @@ static const struct of_device_id vc4_dpi_dt_match[] = {
*/
static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
{
+ struct drm_device *drm = dpi->encoder.base.dev;
struct device *dev = &dpi->pdev->dev;
struct drm_bridge *bridge;
- bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ bridge = drmm_of_get_bridge(drm, dev->of_node, 0, 0);
if (IS_ERR(bridge)) {
/* If nothing was connected in the DT, that's not an
* error.
@@ -262,30 +286,28 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
return PTR_ERR(bridge);
}
- return drm_bridge_attach(dpi->encoder, bridge, NULL, 0);
+ return drm_bridge_attach(&dpi->encoder.base, bridge, NULL, 0);
+}
+
+static void vc4_dpi_disable_clock(void *ptr)
+{
+ struct vc4_dpi *dpi = ptr;
+
+ clk_disable_unprepare(dpi->core_clock);
}
static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dpi *dpi;
- struct vc4_dpi_encoder *vc4_dpi_encoder;
int ret;
- dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
+ dpi = drmm_kzalloc(drm, sizeof(*dpi), GFP_KERNEL);
if (!dpi)
return -ENOMEM;
- vc4_dpi_encoder = devm_kzalloc(dev, sizeof(*vc4_dpi_encoder),
- GFP_KERNEL);
- if (!vc4_dpi_encoder)
- return -ENOMEM;
- vc4_dpi_encoder->base.type = VC4_ENCODER_TYPE_DPI;
- vc4_dpi_encoder->dpi = dpi;
- dpi->encoder = &vc4_dpi_encoder->base.base;
-
+ dpi->encoder.type = VC4_ENCODER_TYPE_DPI;
dpi->pdev = pdev;
dpi->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(dpi->regs))
@@ -307,6 +329,7 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
DRM_ERROR("Failed to get core clock: %d\n", ret);
return ret;
}
+
dpi->pixel_clock = devm_clk_get(dev, "pixel");
if (IS_ERR(dpi->pixel_clock)) {
ret = PTR_ERR(dpi->pixel_clock);
@@ -316,49 +339,35 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
}
ret = clk_prepare_enable(dpi->core_clock);
- if (ret)
+ if (ret) {
DRM_ERROR("Failed to turn on core clock: %d\n", ret);
+ return ret;
+ }
- drm_simple_encoder_init(drm, dpi->encoder, DRM_MODE_ENCODER_DPI);
- drm_encoder_helper_add(dpi->encoder, &vc4_dpi_encoder_helper_funcs);
+ ret = devm_add_action_or_reset(dev, vc4_dpi_disable_clock, dpi);
+ if (ret)
+ return ret;
- ret = vc4_dpi_init_bridge(dpi);
+ ret = drmm_encoder_init(drm, &dpi->encoder.base,
+ &vc4_dpi_encoder_funcs,
+ DRM_MODE_ENCODER_DPI,
+ NULL);
if (ret)
- goto err_destroy_encoder;
+ return ret;
- dev_set_drvdata(dev, dpi);
+ drm_encoder_helper_add(&dpi->encoder.base, &vc4_dpi_encoder_helper_funcs);
- vc4->dpi = dpi;
+ ret = vc4_dpi_init_bridge(dpi);
+ if (ret)
+ return ret;
- vc4_debugfs_add_regset32(drm, "dpi_regs", &dpi->regset);
+ dev_set_drvdata(dev, dpi);
return 0;
-
-err_destroy_encoder:
- drm_encoder_cleanup(dpi->encoder);
- clk_disable_unprepare(dpi->core_clock);
- return ret;
-}
-
-static void vc4_dpi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
- struct vc4_dpi *dpi = dev_get_drvdata(dev);
-
- drm_of_panel_bridge_remove(dev->of_node, 0, 0);
-
- drm_encoder_cleanup(dpi->encoder);
-
- clk_disable_unprepare(dpi->core_clock);
-
- vc4->dpi = NULL;
}
static const struct component_ops vc4_dpi_ops = {
.bind = vc4_dpi_bind,
- .unbind = vc4_dpi_unbind,
};
static int vc4_dpi_dev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 292d1b6a01b6..2027063fdc30 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -33,7 +33,6 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_vblank.h>
@@ -86,7 +85,7 @@ static int vc5_dumb_create(struct drm_file *file_priv,
if (ret)
return ret;
- return drm_gem_cma_dumb_create_internal(file_priv, dev, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, dev, args);
}
static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
@@ -212,7 +211,7 @@ static const struct drm_driver vc4_drm_driver = {
.gem_create_object = vc4_create_object,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create),
.ioctls = vc4_drm_ioctls,
.num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
@@ -235,7 +234,7 @@ static const struct drm_driver vc5_drm_driver = {
.debugfs_init = vc4_debugfs_init,
#endif
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create),
.fops = &vc4_drm_fops,
@@ -267,6 +266,13 @@ static void vc4_match_add_drivers(struct device *dev,
}
}
+static void vc4_component_unbind_all(void *ptr)
+{
+ struct vc4_dev *vc4 = ptr;
+
+ component_unbind_all(vc4->dev, &vc4->base);
+}
+
static const struct of_device_id vc4_dma_range_matches[] = {
{ .compatible = "brcm,bcm2711-hvs" },
{ .compatible = "brcm,bcm2835-hvs" },
@@ -310,13 +316,16 @@ static int vc4_drm_bind(struct device *dev)
if (IS_ERR(vc4))
return PTR_ERR(vc4);
vc4->is_vc5 = is_vc5;
+ vc4->dev = dev;
drm = &vc4->base;
platform_set_drvdata(pdev, drm);
INIT_LIST_HEAD(&vc4->debugfs_list);
if (!is_vc5) {
- mutex_init(&vc4->bin_bo_lock);
+ ret = drmm_mutex_init(drm, &vc4->bin_bo_lock);
+ if (ret)
+ return ret;
ret = vc4_bo_cache_init(drm);
if (ret)
@@ -360,6 +369,10 @@ static int vc4_drm_bind(struct device *dev)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, vc4_component_unbind_all, vc4);
+ if (ret)
+ return ret;
+
ret = vc4_plane_create_additional_planes(drm);
if (ret)
goto unbind_all;
@@ -380,8 +393,6 @@ static int vc4_drm_bind(struct device *dev)
return 0;
unbind_all:
- component_unbind_all(dev, drm);
-
return ret;
}
@@ -389,8 +400,7 @@ static void vc4_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- drm_dev_unregister(drm);
-
+ drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
@@ -480,6 +490,7 @@ module_init(vc4_drm_register);
module_exit(vc4_drm_unregister);
MODULE_ALIAS("platform:vc4-drm");
+MODULE_SOFTDEP("pre: snd-soc-hdmi-codec");
MODULE_DESCRIPTION("Broadcom VC4 DRM Driver");
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 1beb96b77b8c..418a8242691f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -14,7 +14,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mm.h>
#include <drm/drm_modeset_lock.h>
@@ -76,6 +76,7 @@ struct vc4_perfmon {
struct vc4_dev {
struct drm_device base;
+ struct device *dev;
bool is_vc5;
@@ -83,9 +84,6 @@ struct vc4_dev {
struct vc4_hvs *hvs;
struct vc4_v3d *v3d;
- struct vc4_dpi *dpi;
- struct vc4_vec *vec;
- struct vc4_txp *txp;
struct vc4_hang_state *hang_state;
@@ -241,7 +239,7 @@ to_vc4_dev(struct drm_device *dev)
}
struct vc4_bo {
- struct drm_gem_cma_object base;
+ struct drm_gem_dma_object base;
/* seqno of the last job to render using this BO. */
uint64_t seqno;
@@ -290,7 +288,7 @@ struct vc4_bo {
static inline struct vc4_bo *
to_vc4_bo(struct drm_gem_object *bo)
{
- return container_of(to_drm_gem_cma_obj(bo), struct vc4_bo, base);
+ return container_of(to_drm_gem_dma_obj(bo), struct vc4_bo, base);
}
struct vc4_fence {
@@ -458,6 +456,8 @@ to_vc4_encoder(struct drm_encoder *encoder)
}
struct vc4_crtc_data {
+ const char *debugfs_name;
+
/* Bitmask of channels (FIFOs) of the HVS that the output can source from */
unsigned int hvs_available_channels;
@@ -475,8 +475,6 @@ struct vc4_pv_data {
u8 pixels_per_clock;
enum vc4_encoder_type encoder_types[4];
- const char *debugfs_name;
-
};
struct vc4_crtc {
@@ -604,14 +602,14 @@ struct vc4_exec_info {
/* This is the array of BOs that were looked up at the start of exec.
* Command validation will use indices into this array.
*/
- struct drm_gem_cma_object **bo;
+ struct drm_gem_dma_object **bo;
uint32_t bo_count;
/* List of BOs that are being written by the RCL. Other than
* the binner temporary storage, this is all the BOs written
* by the job.
*/
- struct drm_gem_cma_object *rcl_write_bo[4];
+ struct drm_gem_dma_object *rcl_write_bo[4];
uint32_t rcl_write_bo_count;
/* Pointers for our position in vc4->job_list */
@@ -630,7 +628,7 @@ struct vc4_exec_info {
/* This is the BO where we store the validated command lists, shader
* records, and uniforms.
*/
- struct drm_gem_cma_object *exec_bo;
+ struct drm_gem_dma_object *exec_bo;
/**
* This tracks the per-shader-record state (packet 64) that
@@ -843,6 +841,7 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo);
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
+int vc4_bo_debugfs_init(struct drm_minor *minor);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
@@ -850,7 +849,6 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc);
int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
const struct drm_crtc_funcs *crtc_funcs,
const struct drm_crtc_helper_funcs *crtc_helper_funcs);
-void vc4_crtc_destroy(struct drm_crtc *crtc);
int vc4_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
@@ -861,6 +859,8 @@ void vc4_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void vc4_crtc_reset(struct drm_crtc *crtc);
void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
+void vc4_crtc_send_vblank(struct drm_crtc *crtc);
+int vc4_crtc_late_register(struct drm_crtc *crtc);
void vc4_crtc_get_margins(struct drm_crtc_state *state,
unsigned int *left, unsigned int *right,
unsigned int *top, unsigned int *bottom);
@@ -868,25 +868,27 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
/* vc4_debugfs.c */
void vc4_debugfs_init(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
-void vc4_debugfs_add_file(struct drm_device *drm,
- const char *filename,
- int (*show)(struct seq_file*, void*),
- void *data);
-void vc4_debugfs_add_regset32(struct drm_device *drm,
- const char *filename,
- struct debugfs_regset32 *regset);
+int vc4_debugfs_add_file(struct drm_minor *minor,
+ const char *filename,
+ int (*show)(struct seq_file*, void*),
+ void *data);
+int vc4_debugfs_add_regset32(struct drm_minor *minor,
+ const char *filename,
+ struct debugfs_regset32 *regset);
#else
-static inline void vc4_debugfs_add_file(struct drm_device *drm,
- const char *filename,
- int (*show)(struct seq_file*, void*),
- void *data)
+static inline int vc4_debugfs_add_file(struct drm_minor *minor,
+ const char *filename,
+ int (*show)(struct seq_file*, void*),
+ void *data)
{
+ return 0;
}
-static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
- const char *filename,
- struct debugfs_regset32 *regset)
+static inline int vc4_debugfs_add_regset32(struct drm_minor *minor,
+ const char *filename,
+ struct debugfs_regset32 *regset)
{
+ return 0;
}
#endif
@@ -952,13 +954,15 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
void vc4_hvs_dump_state(struct vc4_hvs *hvs);
void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel);
void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel);
+int vc4_hvs_debugfs_init(struct drm_minor *minor);
/* vc4_kms.c */
int vc4_kms_load(struct drm_device *dev);
/* vc4_plane.c */
struct drm_plane *vc4_plane_init(struct drm_device *dev,
- enum drm_plane_type type);
+ enum drm_plane_type type,
+ uint32_t possible_crtcs);
int vc4_plane_create_additional_planes(struct drm_device *dev);
u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
@@ -973,6 +977,7 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used);
void vc4_v3d_bin_bo_put(struct vc4_dev *vc4);
int vc4_v3d_pm_get(struct vc4_dev *vc4);
void vc4_v3d_pm_put(struct vc4_dev *vc4);
+int vc4_v3d_debugfs_init(struct drm_minor *minor);
/* vc4_validate.c */
int
@@ -984,19 +989,19 @@ vc4_validate_bin_cl(struct drm_device *dev,
int
vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
-struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
+struct drm_gem_dma_object *vc4_use_bo(struct vc4_exec_info *exec,
uint32_t hindex);
int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
bool vc4_check_tex_size(struct vc4_exec_info *exec,
- struct drm_gem_cma_object *fbo,
+ struct drm_gem_dma_object *fbo,
uint32_t offset, uint8_t tiling_format,
uint32_t width, uint32_t height, uint8_t cpp);
/* vc4_validate_shader.c */
struct vc4_validated_shader_info *
-vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
+vc4_validate_shader(struct drm_gem_dma_object *shader_obj);
/* vc4_perfmon.c */
void vc4_perfmon_get(struct vc4_perfmon *perfmon);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index b7b2c76770dc..878e05d79e81 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -549,10 +549,13 @@ struct vc4_dsi_variant {
/* General DSI hardware state. */
struct vc4_dsi {
+ struct vc4_encoder encoder;
+ struct mipi_dsi_host dsi_host;
+
+ struct kref kref;
+
struct platform_device *pdev;
- struct mipi_dsi_host dsi_host;
- struct drm_encoder *encoder;
struct drm_bridge *bridge;
struct list_head bridge_chain;
@@ -600,6 +603,12 @@ struct vc4_dsi {
#define host_to_dsi(host) container_of(host, struct vc4_dsi, dsi_host)
+static inline struct vc4_dsi *
+to_vc4_dsi(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct vc4_dsi, encoder.base);
+}
+
static inline void
dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
{
@@ -644,18 +653,6 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
DSI_WRITE(dsi->variant->port ? DSI1_##offset : DSI0_##offset, val)
#define DSI_PORT_BIT(bit) (dsi->variant->port ? DSI1_##bit : DSI0_##bit)
-/* VC4 DSI encoder KMS struct */
-struct vc4_dsi_encoder {
- struct vc4_encoder base;
- struct vc4_dsi *dsi;
-};
-
-static inline struct vc4_dsi_encoder *
-to_vc4_dsi_encoder(struct drm_encoder *encoder)
-{
- return container_of(encoder, struct vc4_dsi_encoder, base.base);
-}
-
static const struct debugfs_reg32 dsi0_regs[] = {
VC4_REG32(DSI0_CTRL),
VC4_REG32(DSI0_STAT),
@@ -795,8 +792,7 @@ dsi_esc_timing(u32 ns)
static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
{
- struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
- struct vc4_dsi *dsi = vc4_encoder->dsi;
+ struct vc4_dsi *dsi = to_vc4_dsi(encoder);
struct device *dev = &dsi->pdev->dev;
struct drm_bridge *iter;
@@ -839,8 +835,7 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
- struct vc4_dsi *dsi = vc4_encoder->dsi;
+ struct vc4_dsi *dsi = to_vc4_dsi(encoder);
struct clk *phy_parent = clk_get_parent(dsi->pll_phy_clock);
unsigned long parent_rate = clk_get_rate(phy_parent);
unsigned long pixel_clock_hz = mode->clock * 1000;
@@ -875,8 +870,7 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
- struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
- struct vc4_dsi *dsi = vc4_encoder->dsi;
+ struct vc4_dsi *dsi = to_vc4_dsi(encoder);
struct device *dev = &dsi->pdev->dev;
bool debug_dump_regs = false;
struct drm_bridge *iter;
@@ -1378,6 +1372,24 @@ static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = {
.mode_fixup = vc4_dsi_encoder_mode_fixup,
};
+static int vc4_dsi_late_register(struct drm_encoder *encoder)
+{
+ struct drm_device *drm = encoder->dev;
+ struct vc4_dsi *dsi = to_vc4_dsi(encoder);
+ int ret;
+
+ ret = vc4_debugfs_add_regset32(drm->primary, dsi->variant->debugfs_name,
+ &dsi->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs vc4_dsi_encoder_funcs = {
+ .late_register = vc4_dsi_late_register,
+};
+
static const struct vc4_dsi_variant bcm2711_dsi1_variant = {
.port = 1,
.debugfs_name = "dsi1_regs",
@@ -1564,26 +1576,50 @@ static void vc4_dsi_dma_chan_release(void *ptr)
dsi->reg_dma_chan = NULL;
}
+static void vc4_dsi_release(struct kref *kref)
+{
+ struct vc4_dsi *dsi =
+ container_of(kref, struct vc4_dsi, kref);
+
+ kfree(dsi);
+}
+
+static void vc4_dsi_get(struct vc4_dsi *dsi)
+{
+ kref_get(&dsi->kref);
+}
+
+static void vc4_dsi_put(struct vc4_dsi *dsi)
+{
+ kref_put(&dsi->kref, &vc4_dsi_release);
+}
+
+static void vc4_dsi_release_action(struct drm_device *drm, void *ptr)
+{
+ struct vc4_dsi *dsi = ptr;
+
+ vc4_dsi_put(dsi);
+}
+
static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
- struct vc4_dsi_encoder *vc4_dsi_encoder;
+ struct drm_encoder *encoder = &dsi->encoder.base;
int ret;
- dsi->variant = of_device_get_match_data(dev);
+ vc4_dsi_get(dsi);
- vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder),
- GFP_KERNEL);
- if (!vc4_dsi_encoder)
- return -ENOMEM;
+ ret = drmm_add_action_or_reset(drm, vc4_dsi_release_action, dsi);
+ if (ret)
+ return ret;
+
+ dsi->variant = of_device_get_match_data(dev);
INIT_LIST_HEAD(&dsi->bridge_chain);
- vc4_dsi_encoder->base.type = dsi->variant->port ?
- VC4_ENCODER_TYPE_DSI1 : VC4_ENCODER_TYPE_DSI0;
- vc4_dsi_encoder->dsi = dsi;
- dsi->encoder = &vc4_dsi_encoder->base.base;
+ dsi->encoder.type = dsi->variant->port ?
+ VC4_ENCODER_TYPE_DSI1 : VC4_ENCODER_TYPE_DSI0;
dsi->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(dsi->regs))
@@ -1687,7 +1723,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- dsi->bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ dsi->bridge = drmm_of_get_bridge(drm, dev->of_node, 0, 0);
if (IS_ERR(dsi->bridge))
return PTR_ERR(dsi->bridge);
@@ -1702,10 +1738,20 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI);
- drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
+ ret = drmm_encoder_init(drm, encoder,
+ &vc4_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI,
+ NULL);
+ if (ret)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &vc4_dsi_encoder_helper_funcs);
- ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = drm_bridge_attach(encoder, dsi->bridge, NULL, 0);
if (ret)
return ret;
/* Disable the atomic helper calls into the bridge. We
@@ -1713,11 +1759,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
* from our driver, since we need to sequence them within the
* encoder's enable/disable paths.
*/
- list_splice_init(&dsi->encoder->bridge_chain, &dsi->bridge_chain);
-
- vc4_debugfs_add_regset32(drm, dsi->variant->debugfs_name, &dsi->regset);
-
- pm_runtime_enable(dev);
+ list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
return 0;
}
@@ -1726,15 +1768,13 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
struct vc4_dsi *dsi = dev_get_drvdata(dev);
-
- pm_runtime_disable(dev);
+ struct drm_encoder *encoder = &dsi->encoder.base;
/*
* Restore the bridge_chain so the bridge detach procedure can happen
* normally.
*/
- list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
- drm_encoder_cleanup(dsi->encoder);
+ list_splice_init(&dsi->bridge_chain, &encoder->bridge_chain);
}
static const struct component_ops vc4_dsi_ops = {
@@ -1747,11 +1787,12 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct vc4_dsi *dsi;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
dev_set_drvdata(dev, dsi);
+ kref_init(&dsi->kref);
dsi->pdev = pdev;
dsi->dsi_host.ops = &vc4_dsi_host_ops;
dsi->dsi_host.dev = dev;
@@ -1766,6 +1807,8 @@ static int vc4_dsi_dev_remove(struct platform_device *pdev)
struct vc4_dsi *dsi = dev_get_drvdata(dev);
mipi_dsi_host_unregister(&dsi->dsi_host);
+ vc4_dsi_put(dsi);
+
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index fe10d9c3fff8..628d40ff3aa1 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -126,7 +126,7 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
goto err_delete_handle;
}
bo_state[i].handle = handle;
- bo_state[i].paddr = vc4_bo->base.paddr;
+ bo_state[i].paddr = vc4_bo->base.dma_addr;
bo_state[i].size = vc4_bo->base.base.size;
}
@@ -764,7 +764,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
}
exec->bo = kvmalloc_array(exec->bo_count,
- sizeof(struct drm_gem_cma_object *),
+ sizeof(struct drm_gem_dma_object *),
GFP_KERNEL | __GFP_ZERO);
if (!exec->bo) {
DRM_ERROR("Failed to allocate validated BO pointers\n");
@@ -797,7 +797,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
}
drm_gem_object_get(bo);
- exec->bo[i] = (struct drm_gem_cma_object *)bo;
+ exec->bo[i] = (struct drm_gem_dma_object *)bo;
}
spin_unlock(&file_priv->table_lock);
@@ -917,16 +917,16 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
&exec->unref_list);
- exec->ct0ca = exec->exec_bo->paddr + bin_offset;
+ exec->ct0ca = exec->exec_bo->dma_addr + bin_offset;
exec->bin_u = bin;
exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
- exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
+ exec->shader_rec_p = exec->exec_bo->dma_addr + shader_rec_offset;
exec->shader_rec_size = args->shader_rec_size;
exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
- exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
+ exec->uniforms_p = exec->exec_bo->dma_addr + uniforms_offset;
exec->uniforms_size = args->uniforms_size;
ret = vc4_validate_bin_cl(dev,
@@ -1308,6 +1308,7 @@ static void vc4_gem_destroy(struct drm_device *dev, void *unused);
int vc4_gem_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
@@ -1325,10 +1326,15 @@ int vc4_gem_init(struct drm_device *dev)
INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
- mutex_init(&vc4->power_lock);
+ ret = drmm_mutex_init(dev, &vc4->power_lock);
+ if (ret)
+ return ret;
INIT_LIST_HEAD(&vc4->purgeable.list);
- mutex_init(&vc4->purgeable.lock);
+
+ ret = drmm_mutex_init(dev, &vc4->purgeable.lock);
+ if (ret)
+ return ret;
return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 1e5f68704d7d..596e311d6e58 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -34,6 +34,7 @@
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
@@ -41,7 +42,6 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/rational.h>
@@ -124,6 +124,23 @@ static unsigned long long
vc4_hdmi_encoder_compute_mode_clock(const struct drm_display_mode *mode,
unsigned int bpc, enum vc4_hdmi_output_format fmt);
+static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder)
+{
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_display_info *display = &vc4_hdmi->connector.display_info;
+
+ lockdep_assert_held(&vc4_hdmi->mutex);
+
+ if (!display->is_hdmi)
+ return false;
+
+ if (!display->hdmi.scdc.supported ||
+ !display->hdmi.scdc.scrambling.supported)
+ return false;
+
+ return true;
+}
+
static bool vc4_hdmi_mode_needs_scrambling(const struct drm_display_mode *mode,
unsigned int bpc,
enum vc4_hdmi_output_format fmt)
@@ -146,7 +163,12 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct vc4_hdmi *vc4_hdmi = node->info_ent->data;
+ struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_printer p = drm_seq_file_printer(m);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return -ENODEV;
drm_print_regset32(&p, &vc4_hdmi->hdmi_regset);
drm_print_regset32(&p, &vc4_hdmi->hd_regset);
@@ -157,12 +179,23 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
drm_print_regset32(&p, &vc4_hdmi->ram_regset);
drm_print_regset32(&p, &vc4_hdmi->rm_regset);
+ drm_dev_exit(idx);
+
return 0;
}
static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
+
+ /*
+ * We can be called by our bind callback, when the
+ * connector->dev pointer might not be initialised yet.
+ */
+ if (drm && !drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -179,11 +212,23 @@ static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
HDMI_WRITE(HDMI_SW_RESET_CONTROL, 0);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ if (drm)
+ drm_dev_exit(idx);
}
static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
+
+ /*
+ * We can be called by our bind callback, when the
+ * connector->dev pointer might not be initialised yet.
+ */
+ if (drm && !drm_dev_enter(drm, &idx))
+ return;
reset_control_reset(vc4_hdmi->reset);
@@ -195,15 +240,31 @@ static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ if (drm)
+ drm_dev_exit(idx);
}
#ifdef CONFIG_DRM_VC4_HDMI_CEC
static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
{
- unsigned long cec_rate = clk_get_rate(vc4_hdmi->cec_clock);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
+ unsigned long cec_rate;
unsigned long flags;
u16 clk_cnt;
u32 value;
+ int idx;
+
+ /*
+ * This function is called by our runtime_resume implementation
+ * and thus at bind time, when we haven't registered our
+ * connector yet and thus don't have a pointer to the DRM
+ * device.
+ */
+ if (drm && !drm_dev_enter(drm, &idx))
+ return;
+
+ cec_rate = clk_get_rate(vc4_hdmi->cec_clock);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -219,58 +280,180 @@ static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ if (drm)
+ drm_dev_exit(idx);
}
#else
static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {}
#endif
-static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder);
+static int reset_pipe(struct drm_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ crtc_state->connectors_changed = true;
+
+ ret = drm_atomic_commit(state);
+out:
+ drm_atomic_state_put(state);
-static enum drm_connector_status
-vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
+ return ret;
+}
+
+static int vc4_hdmi_reset_link(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx)
{
+ struct drm_device *drm = connector->dev;
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
- bool connected = false;
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ bool scrambling_needed;
+ u8 config;
+ int ret;
- mutex_lock(&vc4_hdmi->mutex);
+ if (!connector)
+ return 0;
+
+ ret = drm_modeset_lock(&drm->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ret;
+
+ conn_state = connector->state;
+ crtc = conn_state->crtc;
+ if (!crtc)
+ return 0;
+
+ ret = drm_modeset_lock(&crtc->mutex, ctx);
+ if (ret)
+ return ret;
+
+ crtc_state = crtc->state;
+ if (!crtc_state->active)
+ return 0;
+
+ if (!vc4_hdmi_supports_scrambling(encoder))
+ return 0;
+
+ scrambling_needed = vc4_hdmi_mode_needs_scrambling(&vc4_hdmi->saved_adjusted_mode,
+ vc4_hdmi->output_bpc,
+ vc4_hdmi->output_format);
+ if (!scrambling_needed)
+ return 0;
+
+ if (conn_state->commit &&
+ !try_wait_for_completion(&conn_state->commit->hw_done))
+ return 0;
+
+ ret = drm_scdc_readb(connector->ddc, SCDC_TMDS_CONFIG, &config);
+ if (ret < 0) {
+ drm_err(drm, "Failed to read TMDS config: %d\n", ret);
+ return 0;
+ }
+
+ if (!!(config & SCDC_SCRAMBLING_ENABLE) == scrambling_needed)
+ return 0;
+
+ /*
+ * HDMI 2.0 says that one should not send scrambled data
+ * prior to configuring the sink scrambling, and that
+ * TMDS clock/data transmission should be suspended when
+ * changing the TMDS clock rate in the sink. So let's
+ * just do a full modeset here, even though some sinks
+ * would be perfectly happy if were to just reconfigure
+ * the SCDC settings on the fly.
+ */
+ return reset_pipe(crtc, ctx);
+}
+
+static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
+ struct drm_modeset_acquire_ctx *ctx,
+ enum drm_connector_status status)
+{
+ struct drm_connector *connector = &vc4_hdmi->connector;
+ struct edid *edid;
+
+ /*
+ * NOTE: This function should really be called with
+ * vc4_hdmi->mutex held, but doing so results in reentrancy
+ * issues since cec_s_phys_addr_from_edid might call
+ * .adap_enable, which leads to that funtion being called with
+ * our mutex held.
+ *
+ * A similar situation occurs with
+ * drm_atomic_helper_connector_hdmi_reset_link() that will call
+ * into our KMS hooks if the scrambling was enabled.
+ *
+ * Concurrency isn't an issue at the moment since we don't share
+ * any state with any of the other frameworks so we can ignore
+ * the lock for now.
+ */
+
+ if (status == connector_status_disconnected) {
+ cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+ return;
+ }
+
+ edid = drm_get_edid(connector, vc4_hdmi->ddc);
+ if (!edid)
+ return;
+
+ cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
+ kfree(edid);
+
+ vc4_hdmi_reset_link(connector, ctx);
+}
+
+static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
+ enum drm_connector_status status = connector_status_disconnected;
+
+ /*
+ * NOTE: This function should really take vc4_hdmi->mutex, but
+ * doing so results in reentrancy issues since
+ * vc4_hdmi_handle_hotplug() can call into other functions that
+ * would take the mutex while it's held here.
+ *
+ * Concurrency isn't an issue at the moment since we don't share
+ * any state with any of the other frameworks so we can ignore
+ * the lock for now.
+ */
WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
if (vc4_hdmi->hpd_gpio) {
if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio))
- connected = true;
+ status = connector_status_connected;
} else {
if (vc4_hdmi->variant->hp_detect &&
vc4_hdmi->variant->hp_detect(vc4_hdmi))
- connected = true;
+ status = connector_status_connected;
}
- if (connected) {
- if (connector->status != connector_status_connected) {
- struct edid *edid = drm_get_edid(connector, vc4_hdmi->ddc);
-
- if (edid) {
- cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
- kfree(edid);
- }
- }
-
- vc4_hdmi_enable_scrambling(&vc4_hdmi->encoder.base);
- pm_runtime_put(&vc4_hdmi->pdev->dev);
- mutex_unlock(&vc4_hdmi->mutex);
- return connector_status_connected;
- }
-
- cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+ vc4_hdmi_handle_hotplug(vc4_hdmi, ctx, status);
pm_runtime_put(&vc4_hdmi->pdev->dev);
- mutex_unlock(&vc4_hdmi->mutex);
- return connector_status_disconnected;
-}
-static void vc4_hdmi_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
+ return status;
}
static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
@@ -279,14 +462,21 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
int ret = 0;
struct edid *edid;
- mutex_lock(&vc4_hdmi->mutex);
+ /*
+ * NOTE: This function should really take vc4_hdmi->mutex, but
+ * doing so results in reentrancy issues since
+ * cec_s_phys_addr_from_edid might call .adap_enable, which
+ * leads to that funtion being called with our mutex held.
+ *
+ * Concurrency isn't an issue at the moment since we don't share
+ * any state with any of the other frameworks so we can ignore
+ * the lock for now.
+ */
edid = drm_get_edid(connector, vc4_hdmi->ddc);
cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
- if (!edid) {
- ret = -ENODEV;
- goto out;
- }
+ if (!edid)
+ return -ENODEV;
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
@@ -294,7 +484,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
if (vc4_hdmi->disable_4kp60) {
struct drm_device *drm = connector->dev;
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->probed_modes, head) {
if (vc4_hdmi_mode_needs_scrambling(mode, 8, VC4_HDMI_OUTPUT_RGB)) {
@@ -304,9 +494,6 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
}
}
-out:
- mutex_unlock(&vc4_hdmi->mutex);
-
return ret;
}
@@ -378,15 +565,14 @@ vc4_hdmi_connector_duplicate_state(struct drm_connector *connector)
}
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
- .detect = vc4_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = vc4_hdmi_connector_destroy,
.reset = vc4_hdmi_connector_reset,
.atomic_duplicate_state = vc4_hdmi_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = {
+ .detect_ctx = vc4_hdmi_connector_detect_ctx,
.get_modes = vc4_hdmi_connector_get_modes,
.atomic_check = vc4_hdmi_connector_atomic_check,
};
@@ -398,10 +584,13 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
struct drm_encoder *encoder = &vc4_hdmi->encoder.base;
int ret;
- drm_connector_init_with_ddc(dev, connector,
- &vc4_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA,
- vc4_hdmi->ddc);
+ ret = drmm_connector_init(dev, connector,
+ &vc4_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ vc4_hdmi->ddc);
+ if (ret)
+ return ret;
+
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
/*
@@ -444,25 +633,34 @@ static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
bool poll)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
u32 packet_id = type - 0x80;
unsigned long flags;
+ int ret = 0;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return -ENODEV;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id));
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
- if (!poll)
- return 0;
+ if (poll) {
+ ret = wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) &
+ BIT(packet_id)), 100);
+ }
- return wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) &
- BIT(packet_id)), 100);
+ drm_dev_exit(idx);
+ return ret;
}
static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
union hdmi_infoframe *frame)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
u32 packet_id = frame->any.type - 0x80;
const struct vc4_hdmi_register *ram_packet_start =
&vc4_hdmi->variant->registers[HDMI_RAM_PACKET_START];
@@ -475,6 +673,10 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
unsigned long flags;
ssize_t len, i;
int ret;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
WARN_ONCE(!(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
VC4_HDMI_RAM_PACKET_ENABLE),
@@ -482,12 +684,12 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
len = hdmi_infoframe_pack(frame, buffer, sizeof(buffer));
if (len < 0)
- return;
+ goto out;
ret = vc4_hdmi_stop_packet(encoder, frame->any.type, true);
if (ret) {
DRM_ERROR("Failed to wait for infoframe to go idle: %d\n", ret);
- return;
+ goto out;
}
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -523,6 +725,9 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
BIT(packet_id)), 100);
if (ret)
DRM_ERROR("Failed to wait for infoframe to start: %d\n", ret);
+
+out:
+ drm_dev_exit(idx);
}
static void vc4_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
@@ -649,35 +854,19 @@ static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
vc4_hdmi_set_hdr_infoframe(encoder);
}
-static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_display_info *display = &vc4_hdmi->connector.display_info;
-
- lockdep_assert_held(&vc4_hdmi->mutex);
-
- if (!display->is_hdmi)
- return false;
-
- if (!display->hdmi.scdc.supported ||
- !display->hdmi.scdc.scrambling.supported)
- return false;
-
- return true;
-}
-
#define SCRAMBLING_POLLING_DELAY_MS 1000
static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
+ struct drm_device *drm = vc4_hdmi->connector.dev;
+ const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
unsigned long flags;
+ int idx;
lockdep_assert_held(&vc4_hdmi->mutex);
- if (!vc4_hdmi_supports_scrambling(encoder, mode))
+ if (!vc4_hdmi_supports_scrambling(encoder))
return;
if (!vc4_hdmi_mode_needs_scrambling(mode,
@@ -685,6 +874,9 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
vc4_hdmi->output_format))
return;
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, true);
drm_scdc_set_scrambling(vc4_hdmi->ddc, true);
@@ -693,6 +885,8 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
VC5_HDMI_SCRAMBLER_CTL_ENABLE);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ drm_dev_exit(idx);
+
vc4_hdmi->scdc_enabled = true;
queue_delayed_work(system_wq, &vc4_hdmi->scrambling_work,
@@ -702,7 +896,9 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
lockdep_assert_held(&vc4_hdmi->mutex);
@@ -714,6 +910,9 @@ static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
if (delayed_work_pending(&vc4_hdmi->scrambling_work))
cancel_delayed_work_sync(&vc4_hdmi->scrambling_work);
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_SCRAMBLER_CTL, HDMI_READ(HDMI_SCRAMBLER_CTL) &
~VC5_HDMI_SCRAMBLER_CTL_ENABLE);
@@ -721,6 +920,8 @@ static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
drm_scdc_set_scrambling(vc4_hdmi->ddc, false);
drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, false);
+
+ drm_dev_exit(idx);
}
static void vc4_hdmi_scrambling_wq(struct work_struct *work)
@@ -743,12 +944,17 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
vc4_hdmi->packet_ram_enabled = false;
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, 0);
@@ -766,6 +972,9 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
vc4_hdmi_disable_scrambling(encoder);
+ drm_dev_exit(idx);
+
+out:
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -773,11 +982,16 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int ret;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_VID_CTL,
HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX);
@@ -793,6 +1007,9 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
if (ret < 0)
DRM_ERROR("Failed to release power domain: %d\n", ret);
+ drm_dev_exit(idx);
+
+out:
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -800,8 +1017,13 @@ static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
const struct drm_display_mode *mode)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
u32 csc_ctl;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -836,6 +1058,8 @@ static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_CSC_CTL, csc_ctl);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ drm_dev_exit(idx);
}
/*
@@ -920,6 +1144,7 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
const struct drm_display_mode *mode)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
struct vc4_hdmi_connector_state *vc4_state =
conn_state_to_vc4_hdmi_conn_state(state);
unsigned long flags;
@@ -928,6 +1153,10 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
u32 csc_chan_ctl = 0;
u32 csc_ctl = VC5_MT_CP_CSC_CTL_ENABLE | VC4_SET_FIELD(VC4_HD_CSC_CTL_MODE_CUSTOM,
VC5_MT_CP_CSC_CTL_MODE);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -970,12 +1199,15 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_CSC_CTL, csc_ctl);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ drm_dev_exit(idx);
}
static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
@@ -995,6 +1227,10 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
VC4_HDMI_VERTB_VBP));
unsigned long flags;
u32 reg;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -1027,12 +1263,15 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_MISC_CONTROL, reg);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ drm_dev_exit(idx);
}
static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
const struct vc4_hdmi_connector_state *vc4_state =
conn_state_to_vc4_hdmi_conn_state(state);
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -1056,6 +1295,10 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
unsigned char gcp;
bool gcp_en;
u32 reg;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -1132,13 +1375,20 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_CLOCK_STOP, 0);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ drm_dev_exit(idx);
}
static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
u32 drift;
int ret;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -1167,25 +1417,32 @@ static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
VC4_HDMI_FIFO_CTL_RECENTER_DONE, 1);
WARN_ONCE(ret, "Timeout waiting for "
"VC4_HDMI_FIFO_CTL_RECENTER_DONE");
+
+ drm_dev_exit(idx);
}
static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *conn_state =
drm_atomic_get_new_connector_state(state, connector);
struct vc4_hdmi_connector_state *vc4_conn_state =
conn_state_to_vc4_hdmi_conn_state(conn_state);
- struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
+ const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
unsigned long tmds_char_rate = vc4_conn_state->tmds_char_rate;
unsigned long bvb_rate, hsm_rate;
unsigned long flags;
int ret;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
/*
* As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must
* be faster than pixel clock, infinitesimally faster, tested in
@@ -1206,13 +1463,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate);
if (ret) {
DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
- goto out;
+ goto err_dev_exit;
}
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
- goto out;
+ goto err_dev_exit;
}
ret = clk_set_rate(vc4_hdmi->pixel_clock, tmds_char_rate);
@@ -1264,6 +1521,8 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
if (vc4_hdmi->variant->set_timings)
vc4_hdmi->variant->set_timings(vc4_hdmi, conn_state, mode);
+ drm_dev_exit(idx);
+
mutex_unlock(&vc4_hdmi->mutex);
return;
@@ -1272,6 +1531,8 @@ err_disable_pixel_clock:
clk_disable_unprepare(vc4_hdmi->pixel_clock);
err_put_runtime_pm:
pm_runtime_put(&vc4_hdmi->pdev->dev);
+err_dev_exit:
+ drm_dev_exit(idx);
out:
mutex_unlock(&vc4_hdmi->mutex);
return;
@@ -1281,14 +1542,19 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_connector *connector = &vc4_hdmi->connector;
- struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
+ const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
struct drm_connector_state *conn_state =
drm_atomic_get_new_connector_state(state, connector);
unsigned long flags;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
if (vc4_hdmi->variant->csc_setup)
vc4_hdmi->variant->csc_setup(vc4_hdmi, conn_state, mode);
@@ -1296,6 +1562,9 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ drm_dev_exit(idx);
+
+out:
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -1303,15 +1572,20 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
+ struct drm_device *drm = vc4_hdmi->connector.dev;
+ const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
struct drm_display_info *display = &vc4_hdmi->connector.display_info;
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
unsigned long flags;
int ret;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_VID_CTL,
@@ -1370,6 +1644,9 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
vc4_hdmi_recenter_fifo(vc4_hdmi);
vc4_hdmi_enable_scrambling(encoder);
+ drm_dev_exit(idx);
+
+out:
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -1692,6 +1969,26 @@ static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
.mode_valid = vc4_hdmi_encoder_mode_valid,
};
+static int vc4_hdmi_late_register(struct drm_encoder *encoder)
+{
+ struct drm_device *drm = encoder->dev;
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
+ int ret;
+
+ ret = vc4_debugfs_add_file(drm->primary, variant->debugfs_name,
+ vc4_hdmi_debugfs_regs,
+ vc4_hdmi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs vc4_hdmi_encoder_funcs = {
+ .late_register = vc4_hdmi_late_register,
+};
+
static u32 vc4_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
{
int i;
@@ -1718,13 +2015,20 @@ static u32 vc5_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
static bool vc5_hdmi_hp_detect(struct vc4_hdmi *vc4_hdmi)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
u32 hotplug;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return false;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
hotplug = HDMI_READ(HDMI_HOTPLUG);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ drm_dev_exit(idx);
+
return !!(hotplug & VC4_HDMI_HOTPLUG_CONNECTED);
}
@@ -1732,10 +2036,16 @@ static bool vc5_hdmi_hp_detect(struct vc4_hdmi *vc4_hdmi)
static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
unsigned int samplerate)
{
- u32 hsm_clock = clk_get_rate(vc4_hdmi->audio_clock);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
+ u32 hsm_clock;
unsigned long flags;
unsigned long n, m;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+ hsm_clock = clk_get_rate(vc4_hdmi->audio_clock);
rational_best_approximation(hsm_clock, samplerate,
VC4_HD_MAI_SMP_N_MASK >>
VC4_HD_MAI_SMP_N_SHIFT,
@@ -1748,6 +2058,8 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
VC4_SET_FIELD(n, VC4_HD_MAI_SMP_N) |
VC4_SET_FIELD(m - 1, VC4_HD_MAI_SMP_M));
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ drm_dev_exit(idx);
}
static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate)
@@ -1803,13 +2115,21 @@ static bool vc4_hdmi_audio_can_stream(struct vc4_hdmi *vc4_hdmi)
static int vc4_hdmi_audio_startup(struct device *dev, void *data)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int ret = 0;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) {
- mutex_unlock(&vc4_hdmi->mutex);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_dev_exit;
}
vc4_hdmi->audio.streaming = true;
@@ -1826,9 +2146,12 @@ static int vc4_hdmi_audio_startup(struct device *dev, void *data)
if (vc4_hdmi->variant->phy_rng_enable)
vc4_hdmi->variant->phy_rng_enable(vc4_hdmi);
+out_dev_exit:
+ drm_dev_exit(idx);
+out:
mutex_unlock(&vc4_hdmi->mutex);
- return 0;
+ return ret;
}
static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
@@ -1857,10 +2180,15 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
static void vc4_hdmi_audio_shutdown(struct device *dev, void *data)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_MAI_CTL,
@@ -1876,6 +2204,9 @@ static void vc4_hdmi_audio_shutdown(struct device *dev, void *data)
vc4_hdmi->audio.streaming = false;
vc4_hdmi_audio_reset(vc4_hdmi);
+ drm_dev_exit(idx);
+
+out:
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -1923,6 +2254,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
struct hdmi_codec_params *params)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_encoder *encoder = &vc4_hdmi->encoder.base;
unsigned int sample_rate = params->sample_rate;
unsigned int channels = params->channels;
@@ -1931,15 +2263,22 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
u32 channel_map;
u32 mai_audio_format;
u32 mai_sample_rate;
+ int ret = 0;
+ int idx;
dev_dbg(dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
sample_rate, params->sample_width, channels);
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) {
- mutex_unlock(&vc4_hdmi->mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_dev_exit;
}
vc4_hdmi_audio_set_mai_clock(vc4_hdmi, sample_rate);
@@ -1996,9 +2335,12 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
memcpy(&vc4_hdmi->audio.infoframe, &params->cea, sizeof(params->cea));
vc4_hdmi_set_audio_infoframe(encoder);
+out_dev_exit:
+ drm_dev_exit(idx);
+out:
mutex_unlock(&vc4_hdmi->mutex);
- return 0;
+ return ret;
}
static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = {
@@ -2061,6 +2403,14 @@ static struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
.i2s = 1,
};
+static void vc4_hdmi_audio_codec_release(void *ptr)
+{
+ struct vc4_hdmi *vc4_hdmi = ptr;
+
+ platform_device_unregister(vc4_hdmi->audio.codec_pdev);
+ vc4_hdmi->audio.codec_pdev = NULL;
+}
+
static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
{
const struct vc4_hdmi_register *mai_data =
@@ -2073,6 +2423,26 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
int index, len;
int ret;
+ /*
+ * ASoC makes it a bit hard to retrieve a pointer to the
+ * vc4_hdmi structure. Registering the card will overwrite our
+ * device drvdata with a pointer to the snd_soc_card structure,
+ * which can then be used to retrieve whatever drvdata we want
+ * to associate.
+ *
+ * However, that doesn't fly in the case where we wouldn't
+ * register an ASoC card (because of an old DT that is missing
+ * the dmas properties for example), then the card isn't
+ * registered and the device drvdata wouldn't be set.
+ *
+ * We can deal with both cases by making sure a snd_soc_card
+ * pointer and a vc4_hdmi structure are pointing to the same
+ * memory address, so we can treat them indistinctly without any
+ * issue.
+ */
+ BUILD_BUG_ON(offsetof(struct vc4_hdmi_audio, card) != 0);
+ BUILD_BUG_ON(offsetof(struct vc4_hdmi, audio) != 0);
+
if (!of_find_property(dev->of_node, "dmas", &len) || !len) {
dev_warn(dev,
"'dmas' DT property is missing or empty, no HDMI audio\n");
@@ -2102,6 +2472,30 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
vc4_hdmi->audio.dma_data.maxburst = 2;
+ /*
+ * NOTE: Strictly speaking, we should probably use a DRM-managed
+ * registration there to avoid removing all the audio components
+ * by the time the driver doesn't have any user anymore.
+ *
+ * However, the ASoC core uses a number of devm_kzalloc calls
+ * when registering, even when using non-device-managed
+ * functions (such as in snd_soc_register_component()).
+ *
+ * If we call snd_soc_unregister_component() in a DRM-managed
+ * action, the device-managed actions have already been executed
+ * and thus we would access memory that has been freed.
+ *
+ * Using device-managed hooks here probably leaves us open to a
+ * bunch of issues if userspace still has a handle on the ALSA
+ * device when the device is removed. However, this is mitigated
+ * by the use of drm_dev_enter()/drm_dev_exit() in the audio
+ * path to prevent the access to the device resources if it
+ * isn't there anymore.
+ *
+ * Then, the vc4_hdmi structure is DRM-managed and thus only
+ * freed whenever the last user has closed the DRM device file.
+ * It should thus outlive ALSA in most situations.
+ */
ret = devm_snd_dmaengine_pcm_register(dev, &pcm_conf, 0);
if (ret) {
dev_err(dev, "Could not register PCM component: %d\n", ret);
@@ -2125,6 +2519,10 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
}
vc4_hdmi->audio.codec_pdev = codec_pdev;
+ ret = devm_add_action_or_reset(dev, vc4_hdmi_audio_codec_release, vc4_hdmi);
+ if (ret)
+ return ret;
+
dai_link->cpus = &vc4_hdmi->audio.cpu;
dai_link->codecs = &vc4_hdmi->audio.codec;
dai_link->platforms = &vc4_hdmi->audio.platform;
@@ -2163,12 +2561,6 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
}
-static void vc4_hdmi_audio_exit(struct vc4_hdmi *vc4_hdmi)
-{
- platform_device_unregister(vc4_hdmi->audio.codec_pdev);
- vc4_hdmi->audio.codec_pdev = NULL;
-}
-
static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
@@ -2191,21 +2583,19 @@ static int vc4_hdmi_hotplug_init(struct vc4_hdmi *vc4_hdmi)
unsigned int hpd_con = platform_get_irq_byname(pdev, "hpd-connected");
unsigned int hpd_rm = platform_get_irq_byname(pdev, "hpd-removed");
- ret = request_threaded_irq(hpd_con,
- NULL,
- vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
- "vc4 hdmi hpd connected", vc4_hdmi);
+ ret = devm_request_threaded_irq(&pdev->dev, hpd_con,
+ NULL,
+ vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
+ "vc4 hdmi hpd connected", vc4_hdmi);
if (ret)
return ret;
- ret = request_threaded_irq(hpd_rm,
- NULL,
- vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
- "vc4 hdmi hpd disconnected", vc4_hdmi);
- if (ret) {
- free_irq(hpd_con, vc4_hdmi);
+ ret = devm_request_threaded_irq(&pdev->dev, hpd_rm,
+ NULL,
+ vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
+ "vc4 hdmi hpd disconnected", vc4_hdmi);
+ if (ret)
return ret;
- }
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
@@ -2213,16 +2603,6 @@ static int vc4_hdmi_hotplug_init(struct vc4_hdmi *vc4_hdmi)
return 0;
}
-static void vc4_hdmi_hotplug_exit(struct vc4_hdmi *vc4_hdmi)
-{
- struct platform_device *pdev = vc4_hdmi->pdev;
-
- if (vc4_hdmi->variant->external_irq_controller) {
- free_irq(platform_get_irq_byname(pdev, "hpd-connected"), vc4_hdmi);
- free_irq(platform_get_irq_byname(pdev, "hpd-removed"), vc4_hdmi);
- }
-}
-
#ifdef CONFIG_DRM_VC4_HDMI_CEC
static irqreturn_t vc4_cec_irq_handler_rx_thread(int irq, void *priv)
{
@@ -2296,6 +2676,17 @@ static irqreturn_t vc4_cec_irq_handler_tx_bare_locked(struct vc4_hdmi *vc4_hdmi)
{
u32 cntrl1;
+ /*
+ * We don't need to protect the register access using
+ * drm_dev_enter() there because the interrupt handler lifetime
+ * is tied to the device itself, and not to the DRM device.
+ *
+ * So when the device will be gone, one of the first thing we
+ * will be doing will be to unregister the interrupt handler,
+ * and then unregister the DRM device. drm_dev_enter() would
+ * thus always succeed if we are here.
+ */
+
lockdep_assert_held(&vc4_hdmi->hw_lock);
cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
@@ -2324,6 +2715,17 @@ static irqreturn_t vc4_cec_irq_handler_rx_bare_locked(struct vc4_hdmi *vc4_hdmi)
lockdep_assert_held(&vc4_hdmi->hw_lock);
+ /*
+ * We don't need to protect the register access using
+ * drm_dev_enter() there because the interrupt handler lifetime
+ * is tied to the device itself, and not to the DRM device.
+ *
+ * So when the device will be gone, one of the first thing we
+ * will be doing will be to unregister the interrupt handler,
+ * and then unregister the DRM device. drm_dev_enter() would
+ * thus always succeed if we are here.
+ */
+
vc4_hdmi->cec_rx_msg.len = 0;
cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
vc4_cec_read_msg(vc4_hdmi, cntrl1);
@@ -2355,6 +2757,17 @@ static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
irqreturn_t ret;
u32 cntrl5;
+ /*
+ * We don't need to protect the register access using
+ * drm_dev_enter() there because the interrupt handler lifetime
+ * is tied to the device itself, and not to the DRM device.
+ *
+ * So when the device will be gone, one of the first thing we
+ * will be doing will be to unregister the interrupt handler,
+ * and then unregister the DRM device. drm_dev_enter() would
+ * thus always succeed if we are here.
+ */
+
if (!(stat & VC4_HDMI_CPU_CEC))
return IRQ_NONE;
@@ -2375,26 +2788,29 @@ static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
static int vc4_hdmi_cec_enable(struct cec_adapter *adap)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
/* clock period in microseconds */
const u32 usecs = 1000000 / CEC_CLOCK_FREQ;
unsigned long flags;
u32 val;
int ret;
+ int idx;
- /*
- * NOTE: This function should really take vc4_hdmi->mutex, but doing so
- * results in a reentrancy since cec_s_phys_addr_from_edid() called in
- * .detect or .get_modes might call .adap_enable, which leads to this
- * function being called with that mutex held.
- *
- * Concurrency is not an issue for the moment since we don't share any
- * state with KMS, so we can ignore the lock for now, but we need to
- * keep it in mind if we were to change that assumption.
- */
+ if (!drm_dev_enter(drm, &idx))
+ /*
+ * We can't return an error code, because the CEC
+ * framework will emit WARN_ON messages at unbind
+ * otherwise.
+ */
+ return 0;
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
- if (ret)
+ if (ret) {
+ drm_dev_exit(idx);
return ret;
+ }
+
+ mutex_lock(&vc4_hdmi->mutex);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -2430,24 +2846,28 @@ static int vc4_hdmi_cec_enable(struct cec_adapter *adap)
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ mutex_unlock(&vc4_hdmi->mutex);
+ drm_dev_exit(idx);
+
return 0;
}
static int vc4_hdmi_cec_disable(struct cec_adapter *adap)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
- /*
- * NOTE: This function should really take vc4_hdmi->mutex, but doing so
- * results in a reentrancy since cec_s_phys_addr_from_edid() called in
- * .detect or .get_modes might call .adap_enable, which leads to this
- * function being called with that mutex held.
- *
- * Concurrency is not an issue for the moment since we don't share any
- * state with KMS, so we can ignore the lock for now, but we need to
- * keep it in mind if we were to change that assumption.
- */
+ if (!drm_dev_enter(drm, &idx))
+ /*
+ * We can't return an error code, because the CEC
+ * framework will emit WARN_ON messages at unbind
+ * otherwise.
+ */
+ return 0;
+
+ mutex_lock(&vc4_hdmi->mutex);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -2459,8 +2879,12 @@ static int vc4_hdmi_cec_disable(struct cec_adapter *adap)
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ mutex_unlock(&vc4_hdmi->mutex);
+
pm_runtime_put(&vc4_hdmi->pdev->dev);
+ drm_dev_exit(idx);
+
return 0;
}
@@ -2475,24 +2899,27 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
- /*
- * NOTE: This function should really take vc4_hdmi->mutex, but doing so
- * results in a reentrancy since cec_s_phys_addr_from_edid() called in
- * .detect or .get_modes might call .adap_enable, which leads to this
- * function being called with that mutex held.
- *
- * Concurrency is not an issue for the moment since we don't share any
- * state with KMS, so we can ignore the lock for now, but we need to
- * keep it in mind if we were to change that assumption.
- */
+ if (!drm_dev_enter(drm, &idx))
+ /*
+ * We can't return an error code, because the CEC
+ * framework will emit WARN_ON messages at unbind
+ * otherwise.
+ */
+ return 0;
+ mutex_lock(&vc4_hdmi->mutex);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_CEC_CNTRL_1,
(HDMI_READ(HDMI_CEC_CNTRL_1) & ~VC4_HDMI_CEC_ADDR_MASK) |
(log_addr & 0xf) << VC4_HDMI_CEC_ADDR_SHIFT);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ mutex_unlock(&vc4_hdmi->mutex);
+
+ drm_dev_exit(idx);
return 0;
}
@@ -2505,23 +2932,19 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
unsigned long flags;
u32 val;
unsigned int i;
+ int idx;
- /*
- * NOTE: This function should really take vc4_hdmi->mutex, but doing so
- * results in a reentrancy since cec_s_phys_addr_from_edid() called in
- * .detect or .get_modes might call .adap_enable, which leads to this
- * function being called with that mutex held.
- *
- * Concurrency is not an issue for the moment since we don't share any
- * state with KMS, so we can ignore the lock for now, but we need to
- * keep it in mind if we were to change that assumption.
- */
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
if (msg->len > 16) {
drm_err(dev, "Attempting to transmit too much data (%d)\n", msg->len);
+ drm_dev_exit(idx);
return -ENOMEM;
}
+ mutex_lock(&vc4_hdmi->mutex);
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
for (i = 0; i < msg->len; i += 4)
@@ -2541,6 +2964,8 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
HDMI_WRITE(HDMI_CEC_CNTRL_1, val);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ mutex_unlock(&vc4_hdmi->mutex);
+ drm_dev_exit(idx);
return 0;
}
@@ -2551,6 +2976,14 @@ static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = {
.adap_transmit = vc4_hdmi_cec_adap_transmit,
};
+static void vc4_hdmi_cec_release(void *ptr)
+{
+ struct vc4_hdmi *vc4_hdmi = ptr;
+
+ cec_unregister_adapter(vc4_hdmi->cec_adap);
+ vc4_hdmi->cec_adap = NULL;
+}
+
static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
{
struct cec_connector_info conn_info;
@@ -2575,73 +3008,82 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info);
if (vc4_hdmi->variant->external_irq_controller) {
- ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-rx"),
- vc4_cec_irq_handler_rx_bare,
- vc4_cec_irq_handler_rx_thread, 0,
- "vc4 hdmi cec rx", vc4_hdmi);
+ ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-rx"),
+ vc4_cec_irq_handler_rx_bare,
+ vc4_cec_irq_handler_rx_thread, 0,
+ "vc4 hdmi cec rx", vc4_hdmi);
if (ret)
goto err_delete_cec_adap;
- ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-tx"),
- vc4_cec_irq_handler_tx_bare,
- vc4_cec_irq_handler_tx_thread, 0,
- "vc4 hdmi cec tx", vc4_hdmi);
+ ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-tx"),
+ vc4_cec_irq_handler_tx_bare,
+ vc4_cec_irq_handler_tx_thread, 0,
+ "vc4 hdmi cec tx", vc4_hdmi);
if (ret)
- goto err_remove_cec_rx_handler;
+ goto err_delete_cec_adap;
} else {
- ret = request_threaded_irq(platform_get_irq(pdev, 0),
- vc4_cec_irq_handler,
- vc4_cec_irq_handler_thread, 0,
- "vc4 hdmi cec", vc4_hdmi);
+ ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0),
+ vc4_cec_irq_handler,
+ vc4_cec_irq_handler_thread, 0,
+ "vc4 hdmi cec", vc4_hdmi);
if (ret)
goto err_delete_cec_adap;
}
ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
if (ret < 0)
- goto err_remove_handlers;
+ goto err_delete_cec_adap;
- return 0;
-
-err_remove_handlers:
- if (vc4_hdmi->variant->external_irq_controller)
- free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
- else
- free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
+ /*
+ * NOTE: Strictly speaking, we should probably use a DRM-managed
+ * registration there to avoid removing the CEC adapter by the
+ * time the DRM driver doesn't have any user anymore.
+ *
+ * However, the CEC framework already cleans up the CEC adapter
+ * only when the last user has closed its file descriptor, so we
+ * don't need to handle it in DRM.
+ *
+ * By the time the device-managed hook is executed, we will give
+ * up our reference to the CEC adapter and therefore don't
+ * really care when it's actually freed.
+ *
+ * There's still a problematic sequence: if we unregister our
+ * CEC adapter, but the userspace keeps a handle on the CEC
+ * adapter but not the DRM device for some reason. In such a
+ * case, our vc4_hdmi structure will be freed, but the
+ * cec_adapter structure will have a dangling pointer to what
+ * used to be our HDMI controller. If we get a CEC call at that
+ * moment, we could end up with a use-after-free. Fortunately,
+ * the CEC framework already handles this too, by calling
+ * cec_is_registered() in cec_ioctl() and cec_poll().
+ */
+ ret = devm_add_action_or_reset(dev, vc4_hdmi_cec_release, vc4_hdmi);
+ if (ret)
+ return ret;
-err_remove_cec_rx_handler:
- if (vc4_hdmi->variant->external_irq_controller)
- free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
+ return 0;
err_delete_cec_adap:
cec_delete_adapter(vc4_hdmi->cec_adap);
return ret;
}
-
-static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi)
-{
- struct platform_device *pdev = vc4_hdmi->pdev;
-
- if (vc4_hdmi->variant->external_irq_controller) {
- free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
- free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
- } else {
- free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
- }
-
- cec_unregister_adapter(vc4_hdmi->cec_adap);
-}
#else
static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
{
return 0;
}
-
-static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi) {};
#endif
-static int vc4_hdmi_build_regset(struct vc4_hdmi *vc4_hdmi,
+static void vc4_hdmi_free_regset(struct drm_device *drm, void *ptr)
+{
+ struct debugfs_reg32 *regs = ptr;
+
+ kfree(regs);
+}
+
+static int vc4_hdmi_build_regset(struct drm_device *drm,
+ struct vc4_hdmi *vc4_hdmi,
struct debugfs_regset32 *regset,
enum vc4_hdmi_regs reg)
{
@@ -2649,6 +3091,7 @@ static int vc4_hdmi_build_regset(struct vc4_hdmi *vc4_hdmi,
struct debugfs_reg32 *regs, *new_regs;
unsigned int count = 0;
unsigned int i;
+ int ret;
regs = kcalloc(variant->num_registers, sizeof(*regs),
GFP_KERNEL);
@@ -2674,10 +3117,15 @@ static int vc4_hdmi_build_regset(struct vc4_hdmi *vc4_hdmi,
regset->regs = new_regs;
regset->nregs = count;
+ ret = drmm_add_action_or_reset(drm, vc4_hdmi_free_regset, new_regs);
+ if (ret)
+ return ret;
+
return 0;
}
-static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
+static int vc4_hdmi_init_resources(struct drm_device *drm,
+ struct vc4_hdmi *vc4_hdmi)
{
struct platform_device *pdev = vc4_hdmi->pdev;
struct device *dev = &pdev->dev;
@@ -2691,11 +3139,11 @@ static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
if (IS_ERR(vc4_hdmi->hd_regs))
return PTR_ERR(vc4_hdmi->hd_regs);
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
if (ret)
return ret;
@@ -2718,7 +3166,8 @@ static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0;
}
-static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
+static int vc5_hdmi_init_resources(struct drm_device *drm,
+ struct vc4_hdmi *vc4_hdmi)
{
struct platform_device *pdev = vc4_hdmi->pdev;
struct device *dev = &pdev->dev;
@@ -2820,35 +3269,35 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return PTR_ERR(vc4_hdmi->reset);
}
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->cec_regset, VC5_CEC);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->cec_regset, VC5_CEC);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->csc_regset, VC5_CSC);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->csc_regset, VC5_CSC);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->dvp_regset, VC5_DVP);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->dvp_regset, VC5_DVP);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->phy_regset, VC5_PHY);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->phy_regset, VC5_PHY);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->ram_regset, VC5_RAM);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->ram_regset, VC5_RAM);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->rm_regset, VC5_RM);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->rm_regset, VC5_RM);
if (ret)
return ret;
@@ -2869,12 +3318,37 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
unsigned long __maybe_unused flags;
u32 __maybe_unused value;
+ unsigned long rate;
int ret;
+ /*
+ * The HSM clock is in the HDMI power domain, so we need to set
+ * its frequency while the power domain is active so that it
+ * keeps its rate.
+ */
+ ret = clk_set_min_rate(vc4_hdmi->hsm_clock, HSM_MIN_CLOCK_FREQ);
+ if (ret)
+ return ret;
+
ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
if (ret)
return ret;
+ /*
+ * Whenever the RaspberryPi boots without an HDMI monitor
+ * plugged in, the firmware won't have initialized the HSM clock
+ * rate and it will be reported as 0.
+ *
+ * If we try to access a register of the controller in such a
+ * case, it will lead to a silent CPU stall. Let's make sure we
+ * prevent such a case.
+ */
+ rate = clk_get_rate(vc4_hdmi->hsm_clock);
+ if (!rate) {
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
+
if (vc4_hdmi->variant->reset)
vc4_hdmi->variant->reset(vc4_hdmi);
@@ -2896,6 +3370,17 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
#endif
return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+ return ret;
+}
+
+static void vc4_hdmi_put_ddc_device(void *ptr)
+{
+ struct vc4_hdmi *vc4_hdmi = ptr;
+
+ put_device(&vc4_hdmi->ddc->dev);
}
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
@@ -2908,10 +3393,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
struct device_node *ddc_node;
int ret;
- vc4_hdmi = devm_kzalloc(dev, sizeof(*vc4_hdmi), GFP_KERNEL);
+ vc4_hdmi = drmm_kzalloc(drm, sizeof(*vc4_hdmi), GFP_KERNEL);
if (!vc4_hdmi)
return -ENOMEM;
- mutex_init(&vc4_hdmi->mutex);
+
+ ret = drmm_mutex_init(drm, &vc4_hdmi->mutex);
+ if (ret)
+ return ret;
+
spin_lock_init(&vc4_hdmi->hw_lock);
INIT_DELAYED_WORK(&vc4_hdmi->scrambling_work, vc4_hdmi_scrambling_wq);
@@ -2935,7 +3424,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (variant->max_pixel_clock > HDMI_14_MAX_TMDS_CLK)
vc4_hdmi->scdc_enabled = true;
- ret = variant->init_resources(vc4_hdmi);
+ ret = variant->init_resources(drm, vc4_hdmi);
if (ret)
return ret;
@@ -2952,13 +3441,16 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
return -EPROBE_DEFER;
}
+ ret = devm_add_action_or_reset(dev, vc4_hdmi_put_ddc_device, vc4_hdmi);
+ if (ret)
+ return ret;
+
/* Only use the GPIO HPD pin if present in the DT, otherwise
* we'll use the HDMI core's register.
*/
vc4_hdmi->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
if (IS_ERR(vc4_hdmi->hpd_gpio)) {
- ret = PTR_ERR(vc4_hdmi->hpd_gpio);
- goto err_put_ddc;
+ return PTR_ERR(vc4_hdmi->hpd_gpio);
}
vc4_hdmi->disable_wifi_frequencies =
@@ -2972,7 +3464,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi->disable_4kp60 = true;
}
- pm_runtime_enable(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
/*
* We need to have the device powered up at this point to call
@@ -2980,7 +3474,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
*/
ret = pm_runtime_resume_and_get(dev);
if (ret)
- goto err_disable_runtime_pm;
+ return ret;
if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
@@ -2990,93 +3484,43 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
}
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
+ ret = drmm_encoder_init(drm, encoder,
+ &vc4_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ NULL);
+ if (ret)
+ goto err_put_runtime_pm;
+
drm_encoder_helper_add(encoder, &vc4_hdmi_encoder_helper_funcs);
ret = vc4_hdmi_connector_init(drm, vc4_hdmi);
if (ret)
- goto err_destroy_encoder;
+ goto err_put_runtime_pm;
ret = vc4_hdmi_hotplug_init(vc4_hdmi);
if (ret)
- goto err_destroy_conn;
+ goto err_put_runtime_pm;
ret = vc4_hdmi_cec_init(vc4_hdmi);
if (ret)
- goto err_free_hotplug;
+ goto err_put_runtime_pm;
ret = vc4_hdmi_audio_init(vc4_hdmi);
if (ret)
- goto err_free_cec;
-
- vc4_debugfs_add_file(drm, variant->debugfs_name,
- vc4_hdmi_debugfs_regs,
- vc4_hdmi);
+ goto err_put_runtime_pm;
pm_runtime_put_sync(dev);
return 0;
-err_free_cec:
- vc4_hdmi_cec_exit(vc4_hdmi);
-err_free_hotplug:
- vc4_hdmi_hotplug_exit(vc4_hdmi);
-err_destroy_conn:
- vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
-err_destroy_encoder:
- drm_encoder_cleanup(encoder);
+err_put_runtime_pm:
pm_runtime_put_sync(dev);
-err_disable_runtime_pm:
- pm_runtime_disable(dev);
-err_put_ddc:
- put_device(&vc4_hdmi->ddc->dev);
return ret;
}
-static void vc4_hdmi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct vc4_hdmi *vc4_hdmi;
-
- /*
- * ASoC makes it a bit hard to retrieve a pointer to the
- * vc4_hdmi structure. Registering the card will overwrite our
- * device drvdata with a pointer to the snd_soc_card structure,
- * which can then be used to retrieve whatever drvdata we want
- * to associate.
- *
- * However, that doesn't fly in the case where we wouldn't
- * register an ASoC card (because of an old DT that is missing
- * the dmas properties for example), then the card isn't
- * registered and the device drvdata wouldn't be set.
- *
- * We can deal with both cases by making sure a snd_soc_card
- * pointer and a vc4_hdmi structure are pointing to the same
- * memory address, so we can treat them indistinctly without any
- * issue.
- */
- BUILD_BUG_ON(offsetof(struct vc4_hdmi_audio, card) != 0);
- BUILD_BUG_ON(offsetof(struct vc4_hdmi, audio) != 0);
- vc4_hdmi = dev_get_drvdata(dev);
-
- kfree(vc4_hdmi->hdmi_regset.regs);
- kfree(vc4_hdmi->hd_regset.regs);
-
- vc4_hdmi_audio_exit(vc4_hdmi);
- vc4_hdmi_cec_exit(vc4_hdmi);
- vc4_hdmi_hotplug_exit(vc4_hdmi);
- vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
- drm_encoder_cleanup(&vc4_hdmi->encoder.base);
-
- pm_runtime_disable(dev);
-
- put_device(&vc4_hdmi->ddc->dev);
-}
-
static const struct component_ops vc4_hdmi_ops = {
.bind = vc4_hdmi_bind,
- .unbind = vc4_hdmi_unbind,
};
static int vc4_hdmi_dev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index c3ed2b07df23..db823efb2563 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -58,7 +58,8 @@ struct vc4_hdmi_variant {
/* Callback to get the resources (memory region, interrupts,
* clocks, etc) for that variant.
*/
- int (*init_resources)(struct vc4_hdmi *vc4_hdmi);
+ int (*init_resources)(struct drm_device *drm,
+ struct vc4_hdmi *vc4_hdmi);
/* Callback to reset the HDMI block */
void (*reset)(struct vc4_hdmi *vc4_hdmi);
@@ -71,7 +72,7 @@ struct vc4_hdmi_variant {
/* Callback to configure the video timings in the HDMI block */
void (*set_timings)(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
/* Callback to initialize the PHY according to the connector state */
void (*phy_init)(struct vc4_hdmi *vc4_hdmi,
@@ -194,15 +195,7 @@ struct vc4_hdmi {
/**
* @mutex: Mutex protecting the driver access across multiple
- * frameworks (KMS, ALSA).
- *
- * NOTE: While supported, CEC has been left out since
- * cec_s_phys_addr_from_edid() might call .adap_enable and lead to a
- * reentrancy issue between .get_modes (or .detect) and .adap_enable.
- * Since we don't share any state between the CEC hooks and KMS', it's
- * not a big deal. The only trouble might come from updating the CEC
- * clock divider which might be affected by a modeset, but CEC should
- * be resilient to that.
+ * frameworks (KMS, ALSA, CEC).
*/
struct mutex mutex;
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index fbaa741dda5f..4ac9f5a2d5f9 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_vblank.h>
#include "vc4_drv.h"
@@ -66,8 +67,12 @@ static const struct debugfs_reg32 hvs_regs[] = {
void vc4_hvs_dump_state(struct vc4_hvs *hvs)
{
+ struct drm_device *drm = &hvs->vc4->base;
struct drm_printer p = drm_info_printer(&hvs->pdev->dev);
- int i;
+ int idx, i;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
drm_print_regset32(&p, &hvs->regset);
@@ -80,6 +85,8 @@ void vc4_hvs_dump_state(struct vc4_hvs *hvs)
readl((u32 __iomem *)hvs->dlist + i + 2),
readl((u32 __iomem *)hvs->dlist + i + 3));
}
+
+ drm_dev_exit(idx);
}
static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
@@ -175,6 +182,11 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
int ret, i;
u32 __iomem *dst_kernel;
+ /*
+ * NOTE: We don't need a call to drm_dev_enter()/drm_dev_exit()
+ * here since that function is only called from vc4_hvs_bind().
+ */
+
ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
if (ret) {
DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
@@ -199,10 +211,15 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
static void vc4_hvs_lut_load(struct vc4_hvs *hvs,
struct vc4_crtc *vc4_crtc)
{
+ struct drm_device *drm = &hvs->vc4->base;
struct drm_crtc *crtc = &vc4_crtc->base;
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ int idx;
u32 i;
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
/* The LUT memory is laid out with each HVS channel in order,
* each of which takes 256 writes for R, 256 for G, then 256
* for B.
@@ -217,6 +234,8 @@ static void vc4_hvs_lut_load(struct vc4_hvs *hvs,
HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_g[i]);
for (i = 0; i < crtc->gamma_size; i++)
HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
+
+ drm_dev_exit(idx);
}
static void vc4_hvs_update_gamma_lut(struct vc4_hvs *hvs,
@@ -238,7 +257,12 @@ static void vc4_hvs_update_gamma_lut(struct vc4_hvs *hvs,
u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
{
+ struct drm_device *drm = &hvs->vc4->base;
u8 field = 0;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return 0;
switch (fifo) {
case 0:
@@ -255,6 +279,7 @@ u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
break;
}
+ drm_dev_exit(idx);
return field;
}
@@ -267,6 +292,12 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
if (!vc4->is_vc5)
return output;
+ /*
+ * NOTE: We should probably use drm_dev_enter()/drm_dev_exit()
+ * here, but this function is only used during the DRM device
+ * initialization, so we should be fine.
+ */
+
switch (output) {
case 0:
return 0;
@@ -315,12 +346,17 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
struct drm_display_mode *mode, bool oneshot)
{
struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
unsigned int chan = vc4_crtc_state->assigned_channel;
bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
u32 dispbkgndx;
u32 dispctrl;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return -ENODEV;
HVS_WRITE(SCALER_DISPCTRLX(chan), 0);
HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET);
@@ -362,14 +398,22 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
*/
vc4_hvs_lut_load(hvs, vc4_crtc);
+ drm_dev_exit(idx);
+
return 0;
}
void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
{
- if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)
+ struct drm_device *drm = &hvs->vc4->base;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
return;
+ if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)
+ goto out;
+
HVS_WRITE(SCALER_DISPCTRLX(chan),
HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET);
HVS_WRITE(SCALER_DISPCTRLX(chan),
@@ -385,6 +429,9 @@ void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) &
(SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) !=
SCALER_DISPSTATX_EMPTY);
+
+out:
+ drm_dev_exit(idx);
}
int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
@@ -426,9 +473,15 @@ static void vc4_hvs_install_dlist(struct drm_crtc *crtc)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
vc4_state->mm.start);
+
+ drm_dev_exit(idx);
}
static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
@@ -513,6 +566,12 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
bool enable_bg_fill = false;
u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
u32 __iomem *dlist_next = dlist_start;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx)) {
+ vc4_crtc_send_vblank(crtc);
+ return;
+ }
if (debug_dump_regs) {
DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
@@ -583,26 +642,44 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
vc4_hvs_dump_state(hvs);
}
+
+ drm_dev_exit(idx);
}
void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
{
- u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
+ struct drm_device *drm = &hvs->vc4->base;
+ u32 dispctrl;
+ int idx;
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ dispctrl = HVS_READ(SCALER_DISPCTRL);
dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel);
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+
+ drm_dev_exit(idx);
}
void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
{
- u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
+ struct drm_device *drm = &hvs->vc4->base;
+ u32 dispctrl;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+ dispctrl = HVS_READ(SCALER_DISPCTRL);
dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel);
HVS_WRITE(SCALER_DISPSTAT,
SCALER_DISPSTAT_EUFLOW(channel));
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+
+ drm_dev_exit(idx);
}
static void vc4_hvs_report_underrun(struct drm_device *dev)
@@ -623,6 +700,17 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
u32 control;
u32 status;
+ /*
+ * NOTE: We don't need to protect the register access using
+ * drm_dev_enter() there because the interrupt handler lifetime
+ * is tied to the device itself, and not to the DRM device.
+ *
+ * So when the device will be gone, one of the first thing we
+ * will be doing will be to unregister the interrupt handler,
+ * and then unregister the DRM device. drm_dev_enter() would
+ * thus always succeed if we are here.
+ */
+
status = HVS_READ(SCALER_DISPSTAT);
control = HVS_READ(SCALER_DISPCTRL);
@@ -645,6 +733,39 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
return irqret;
}
+int vc4_hvs_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *drm = minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_hvs *hvs = vc4->hvs;
+ int ret;
+
+ if (!vc4->hvs)
+ return -ENODEV;
+
+ if (!vc4->is_vc5)
+ debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
+ minor->debugfs_root,
+ &vc4->load_tracker_enabled);
+
+ ret = vc4_debugfs_add_file(minor, "hvs_dlists",
+ vc4_hvs_debugfs_dlist, NULL);
+ if (ret)
+ return ret;
+
+ ret = vc4_debugfs_add_file(minor, "hvs_underrun",
+ vc4_hvs_debugfs_underrun, NULL);
+ if (ret)
+ return ret;
+
+ ret = vc4_debugfs_add_regset32(minor, "hvs_regs",
+ &hvs->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -655,10 +776,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
u32 dispctrl;
u32 reg;
- hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
+ hvs = drmm_kzalloc(drm, sizeof(*hvs), GFP_KERNEL);
if (!hvs)
return -ENOMEM;
-
hvs->vc4 = vc4;
hvs->pdev = pdev;
@@ -771,12 +891,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset);
- vc4_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun,
- NULL);
- vc4_debugfs_add_file(drm, "hvs_dlists", vc4_hvs_debugfs_dlist,
- NULL);
-
return 0;
}
@@ -786,11 +900,18 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_mm_node *node, *next;
if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))
drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
+ drm_mm_for_each_node_safe(node, next, &vc4->hvs->dlist_mm)
+ drm_mm_remove_node(node);
+
drm_mm_takedown(&vc4->hvs->dlist_mm);
+
+ drm_mm_for_each_node_safe(node, next, &vc4->hvs->lbm_mm)
+ drm_mm_remove_node(node);
drm_mm_takedown(&vc4->hvs->lbm_mm);
clk_disable_unprepare(hvs->core_clk);
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 2eacfb6773d2..1e6db0121ccd 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -105,7 +105,7 @@ vc4_overflow_mem_work(struct work_struct *work)
}
vc4->bin_alloc_overflow = BIT(bin_bo_slot);
- V3D_WRITE(V3D_BPOA, bo->base.paddr + bin_bo_slot * vc4->bin_alloc_size);
+ V3D_WRITE(V3D_BPOA, bo->base.dma_addr + bin_bo_slot * vc4->bin_alloc_size);
V3D_WRITE(V3D_BPOS, bo->base.base.size);
V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
@@ -295,7 +295,7 @@ vc4_irq_disable(struct drm_device *dev)
V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
/* Finish any interrupt handler still in flight. */
- disable_irq(vc4->irq);
+ synchronize_irq(vc4->irq);
cancel_work_sync(&vc4->overflow_mem_work);
}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index b45dcdfd7306..4419e810103d 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -18,7 +18,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c
index 79a74184d732..c4ac2c946238 100644
--- a/drivers/gpu/drm/vc4/vc4_perfmon.c
+++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
@@ -133,6 +133,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file)
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
idr_destroy(&vc4file->perfmon.idr);
mutex_unlock(&vc4file->perfmon.lock);
+ mutex_destroy(&vc4file->perfmon.lock);
}
int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index f27e87a23df7..8b92a45a3c89 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -19,11 +19,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include "uapi/drm/vc4_drm.h"
@@ -340,7 +340,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state;
u32 h_subsample = fb->format->hsub;
@@ -360,7 +360,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
return ret;
for (i = 0; i < num_planes; i++)
- vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
+ vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
/*
* We don't support subpixel source positioning for scaling,
@@ -1220,6 +1220,10 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
int i;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ goto out;
vc4_state->hw_dlist = dlist;
@@ -1227,6 +1231,9 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
for (i = 0; i < vc4_state->dlist_count; i++)
writel(vc4_state->dlist[i], &dlist[i]);
+ drm_dev_exit(idx);
+
+out:
return vc4_state->dlist_count;
}
@@ -1244,14 +1251,18 @@ u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
- struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
uint32_t addr;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
/* We're skipping the address adjustment for negative origin,
* because this is only called on the primary plane.
*/
WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
- addr = bo->paddr + fb->offsets[0];
+ addr = bo->dma_addr + fb->offsets[0];
/* Write the new address into the hardware immediately. The
* scanout will start from this address as soon as the FIFO
@@ -1264,6 +1275,8 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
* also use our updated address.
*/
vc4_state->dlist[vc4_state->ptr0_offset] = addr;
+
+ drm_dev_exit(idx);
}
static void vc4_plane_atomic_async_update(struct drm_plane *plane,
@@ -1272,6 +1285,10 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct vc4_plane_state *vc4_state, *new_vc4_state;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
swap(plane->state->fb, new_plane_state->fb);
plane->state->crtc_x = new_plane_state->crtc_x;
@@ -1334,6 +1351,8 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
&vc4_state->hw_dlist[vc4_state->pos2_offset]);
writel(vc4_state->dlist[vc4_state->ptr0_offset],
&vc4_state->hw_dlist[vc4_state->ptr0_offset]);
+
+ drm_dev_exit(idx);
}
static int vc4_plane_atomic_async_check(struct drm_plane *plane,
@@ -1388,7 +1407,7 @@ static int vc4_prepare_fb(struct drm_plane *plane,
if (!state->fb)
return 0;
- bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
+ bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
drm_gem_plane_helper_prepare_fb(plane, state);
@@ -1406,7 +1425,7 @@ static void vc4_cleanup_fb(struct drm_plane *plane,
if (plane->state->fb == state->fb || !state->fb)
return;
- bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
+ bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
vc4_bo_dec_usecnt(bo);
}
@@ -1483,8 +1502,6 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
static const struct drm_plane_funcs vc4_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
- .set_property = NULL,
.reset = vc4_plane_reset,
.atomic_duplicate_state = vc4_plane_duplicate_state,
.atomic_destroy_state = vc4_plane_destroy_state,
@@ -1492,14 +1509,14 @@ static const struct drm_plane_funcs vc4_plane_funcs = {
};
struct drm_plane *vc4_plane_init(struct drm_device *dev,
- enum drm_plane_type type)
+ enum drm_plane_type type,
+ uint32_t possible_crtcs)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_plane *plane = NULL;
+ struct drm_plane *plane;
struct vc4_plane *vc4_plane;
u32 formats[ARRAY_SIZE(hvs_formats)];
int num_formats = 0;
- int ret = 0;
unsigned i;
static const uint64_t modifiers[] = {
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
@@ -1510,11 +1527,6 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
DRM_FORMAT_MOD_INVALID
};
- vc4_plane = devm_kzalloc(dev->dev, sizeof(*vc4_plane),
- GFP_KERNEL);
- if (!vc4_plane)
- return ERR_PTR(-ENOMEM);
-
for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
if (!hvs_formats[i].hvs5_only || vc4->is_vc5) {
formats[num_formats] = hvs_formats[i].drm;
@@ -1522,13 +1534,14 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
}
}
+ vc4_plane = drmm_universal_plane_alloc(dev, struct vc4_plane, base,
+ possible_crtcs,
+ &vc4_plane_funcs,
+ formats, num_formats,
+ modifiers, type, NULL);
+ if (IS_ERR(vc4_plane))
+ return ERR_CAST(vc4_plane);
plane = &vc4_plane->base;
- ret = drm_universal_plane_init(dev, plane, 0,
- &vc4_plane_funcs,
- formats, num_formats,
- modifiers, type, NULL);
- if (ret)
- return ERR_PTR(ret);
if (vc4->is_vc5)
drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
@@ -1575,13 +1588,11 @@ int vc4_plane_create_additional_planes(struct drm_device *drm)
*/
for (i = 0; i < 16; i++) {
struct drm_plane *plane =
- vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY);
+ vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY,
+ GENMASK(drm->mode_config.num_crtc - 1, 0));
if (IS_ERR(plane))
continue;
-
- plane->possible_crtcs =
- GENMASK(drm->mode_config.num_crtc - 1, 0);
}
drm_for_each_crtc(crtc, drm) {
@@ -1589,9 +1600,9 @@ int vc4_plane_create_additional_planes(struct drm_device *drm)
* since we overlay planes on the CRTC in the order they were
* initialized.
*/
- cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
+ cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR,
+ drm_crtc_mask(crtc));
if (!IS_ERR(cursor_plane)) {
- cursor_plane->possible_crtcs = drm_crtc_mask(crtc);
crtc->cursor = cursor_plane;
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index f6b7dc3df08c..1bda5010f15a 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -40,14 +40,14 @@
#include "vc4_packet.h"
struct vc4_rcl_setup {
- struct drm_gem_cma_object *color_read;
- struct drm_gem_cma_object *color_write;
- struct drm_gem_cma_object *zs_read;
- struct drm_gem_cma_object *zs_write;
- struct drm_gem_cma_object *msaa_color_write;
- struct drm_gem_cma_object *msaa_zs_write;
-
- struct drm_gem_cma_object *rcl;
+ struct drm_gem_dma_object *color_read;
+ struct drm_gem_dma_object *color_write;
+ struct drm_gem_dma_object *zs_read;
+ struct drm_gem_dma_object *zs_write;
+ struct drm_gem_dma_object *msaa_color_write;
+ struct drm_gem_dma_object *msaa_zs_write;
+
+ struct drm_gem_dma_object *rcl;
u32 next_offset;
u32 next_write_bo_index;
@@ -97,11 +97,11 @@ static void vc4_store_before_load(struct vc4_rcl_setup *setup)
* coordinates packet, and instead just store to the address given.
*/
static uint32_t vc4_full_res_offset(struct vc4_exec_info *exec,
- struct drm_gem_cma_object *bo,
+ struct drm_gem_dma_object *bo,
struct drm_vc4_submit_rcl_surface *surf,
uint8_t x, uint8_t y)
{
- return bo->paddr + surf->offset + VC4_TILE_BUFFER_SIZE *
+ return bo->dma_addr + surf->offset + VC4_TILE_BUFFER_SIZE *
(DIV_ROUND_UP(exec->args->width, 32) * y + x);
}
@@ -142,7 +142,7 @@ static void emit_tile(struct vc4_exec_info *exec,
} else {
rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
rcl_u16(setup, args->color_read.bits);
- rcl_u32(setup, setup->color_read->paddr +
+ rcl_u32(setup, setup->color_read->dma_addr +
args->color_read.offset);
}
}
@@ -164,7 +164,7 @@ static void emit_tile(struct vc4_exec_info *exec,
} else {
rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
rcl_u16(setup, args->zs_read.bits);
- rcl_u32(setup, setup->zs_read->paddr +
+ rcl_u32(setup, setup->zs_read->dma_addr +
args->zs_read.offset);
}
}
@@ -232,7 +232,7 @@ static void emit_tile(struct vc4_exec_info *exec,
(last_tile_write ?
0 : VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR));
rcl_u32(setup,
- (setup->zs_write->paddr + args->zs_write.offset) |
+ (setup->zs_write->dma_addr + args->zs_write.offset) |
((last && last_tile_write) ?
VC4_LOADSTORE_TILE_BUFFER_EOF : 0));
}
@@ -355,7 +355,7 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
rcl_u32(setup,
- (setup->color_write ? (setup->color_write->paddr +
+ (setup->color_write ? (setup->color_write->dma_addr +
args->color_write.offset) :
0));
rcl_u16(setup, args->width);
@@ -374,14 +374,14 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
}
BUG_ON(setup->next_offset != size);
- exec->ct1ca = setup->rcl->paddr;
- exec->ct1ea = setup->rcl->paddr + setup->next_offset;
+ exec->ct1ca = setup->rcl->dma_addr;
+ exec->ct1ea = setup->rcl->dma_addr + setup->next_offset;
return 0;
}
static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
- struct drm_gem_cma_object *obj,
+ struct drm_gem_dma_object *obj,
struct drm_vc4_submit_rcl_surface *surf)
{
struct drm_vc4_submit_cl *args = exec->args;
@@ -407,7 +407,7 @@ static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
}
static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
- struct drm_gem_cma_object **obj,
+ struct drm_gem_dma_object **obj,
struct drm_vc4_submit_rcl_surface *surf)
{
if (surf->flags != 0 || surf->bits != 0) {
@@ -433,7 +433,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
}
static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
- struct drm_gem_cma_object **obj,
+ struct drm_gem_dma_object **obj,
struct drm_vc4_submit_rcl_surface *surf,
bool is_write)
{
@@ -533,7 +533,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
static int
vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
struct vc4_rcl_setup *setup,
- struct drm_gem_cma_object **obj,
+ struct drm_gem_dma_object **obj,
struct drm_vc4_submit_rcl_surface *surf)
{
uint8_t tiling = VC4_GET_FIELD(surf->bits,
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index d20b0bc51a18..bd181b5a7b52 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -15,8 +15,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_panel.h>
@@ -155,7 +156,6 @@ struct vc4_txp {
struct drm_writeback_connector connector;
void __iomem *regs;
- struct debugfs_regset32 regset;
};
static inline struct vc4_txp *encoder_to_vc4_txp(struct drm_encoder *encoder)
@@ -276,13 +276,15 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
struct drm_atomic_state *state)
{
+ struct drm_device *drm = conn->dev;
struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state,
conn);
struct vc4_txp *txp = connector_to_vc4_txp(conn);
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
struct drm_display_mode *mode;
struct drm_framebuffer *fb;
u32 ctrl;
+ int idx;
int i;
if (WARN_ON(!conn_state->writeback_job))
@@ -312,8 +314,11 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
*/
ctrl |= TXP_ALPHA_INVERT;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
- TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]);
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
+ TXP_WRITE(TXP_DST_PTR, gem->dma_addr + fb->offsets[0]);
TXP_WRITE(TXP_DST_PITCH, fb->pitches[0]);
TXP_WRITE(TXP_DIM,
VC4_SET_FIELD(mode->hdisplay, TXP_WIDTH) |
@@ -322,6 +327,8 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
TXP_WRITE(TXP_DST_CTRL, ctrl);
drm_writeback_queue_job(&txp->connector, conn_state);
+
+ drm_dev_exit(idx);
}
static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = {
@@ -337,16 +344,10 @@ vc4_txp_connector_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-static void vc4_txp_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static const struct drm_connector_funcs vc4_txp_connector_funcs = {
.detect = vc4_txp_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = vc4_txp_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -354,7 +355,12 @@ static const struct drm_connector_funcs vc4_txp_connector_funcs = {
static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
{
+ struct drm_device *drm = encoder->dev;
struct vc4_txp *txp = encoder_to_vc4_txp(encoder);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
if (TXP_READ(TXP_DST_CTRL) & TXP_BUSY) {
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
@@ -369,6 +375,8 @@ static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
}
TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN);
+
+ drm_dev_exit(idx);
}
static const struct drm_encoder_helper_funcs vc4_txp_encoder_helper_funcs = {
@@ -384,13 +392,13 @@ static void vc4_txp_disable_vblank(struct drm_crtc *crtc) {}
static const struct drm_crtc_funcs vc4_txp_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = vc4_crtc_destroy,
.page_flip = vc4_page_flip,
.reset = vc4_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
.enable_vblank = vc4_txp_enable_vblank,
.disable_vblank = vc4_txp_disable_vblank,
+ .late_register = vc4_crtc_late_register,
};
static int vc4_txp_atomic_check(struct drm_crtc *crtc,
@@ -453,6 +461,16 @@ static irqreturn_t vc4_txp_interrupt(int irq, void *data)
struct vc4_txp *txp = data;
struct vc4_crtc *vc4_crtc = &txp->base;
+ /*
+ * We don't need to protect the register access using
+ * drm_dev_enter() there because the interrupt handler lifetime
+ * is tied to the device itself, and not to the DRM device.
+ *
+ * So when the device will be gone, one of the first thing we
+ * will be doing will be to unregister the interrupt handler,
+ * and then unregister the DRM device. drm_dev_enter() would
+ * thus always succeed if we are here.
+ */
TXP_WRITE(TXP_DST_CTRL, TXP_READ(TXP_DST_CTRL) & ~TXP_EI);
vc4_crtc_handle_vblank(vc4_crtc);
drm_writeback_signal_completion(&txp->connector, 0);
@@ -461,6 +479,7 @@ static irqreturn_t vc4_txp_interrupt(int irq, void *data)
}
static const struct vc4_crtc_data vc4_txp_crtc_data = {
+ .debugfs_name = "txp_regs",
.hvs_available_channels = BIT(2),
.hvs_output = 2,
};
@@ -469,7 +488,6 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_crtc *vc4_crtc;
struct vc4_txp *txp;
struct drm_crtc *crtc;
@@ -480,7 +498,7 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
if (irq < 0)
return irq;
- txp = devm_kzalloc(dev, sizeof(*txp), GFP_KERNEL);
+ txp = drmm_kzalloc(drm, sizeof(*txp), GFP_KERNEL);
if (!txp)
return -ENOMEM;
vc4_crtc = &txp->base;
@@ -495,9 +513,9 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
txp->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(txp->regs))
return PTR_ERR(txp->regs);
- txp->regset.base = txp->regs;
- txp->regset.regs = txp_regs;
- txp->regset.nregs = ARRAY_SIZE(txp_regs);
+ vc4_crtc->regset.base = txp->regs;
+ vc4_crtc->regset.regs = txp_regs;
+ vc4_crtc->regset.nregs = ARRAY_SIZE(txp_regs);
drm_connector_helper_add(&txp->connector.base,
&vc4_txp_connector_helper_funcs);
@@ -523,9 +541,6 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
return ret;
dev_set_drvdata(dev, txp);
- vc4->txp = txp;
-
- vc4_debugfs_add_regset32(drm, "txp_regs", &txp->regset);
return 0;
}
@@ -533,13 +548,9 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
static void vc4_txp_unbind(struct device *dev, struct device *master,
void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_txp *txp = dev_get_drvdata(dev);
- vc4_txp_connector_destroy(&txp->connector.base);
-
- vc4->txp = NULL;
+ drm_connector_cleanup(&txp->connector.base);
}
static const struct component_ops vc4_txp_ops = {
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index cc714dcfe1f2..56abb0d6bc39 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -231,7 +231,7 @@ try_again:
* if it doesn't fit within the buffer that we allocated up front.
* However, it turns out that 16MB is "enough for anybody", and
* real-world applications run into allocation failures from the
- * overall CMA pool before they make scenes complicated enough to run
+ * overall DMA pool before they make scenes complicated enough to run
* out of bin space.
*/
static int bin_bo_alloc(struct vc4_dev *vc4)
@@ -261,15 +261,15 @@ static int bin_bo_alloc(struct vc4_dev *vc4)
dev_err(&v3d->pdev->dev,
"Failed to allocate memory for tile binning: "
- "%d. You may need to enable CMA or give it "
+ "%d. You may need to enable DMA or give it "
"more memory.",
ret);
break;
}
/* Check if this BO won't trigger the addressing bug. */
- if ((bo->base.paddr & 0xf0000000) ==
- ((bo->base.paddr + bo->base.base.size - 1) & 0xf0000000)) {
+ if ((bo->base.dma_addr & 0xf0000000) ==
+ ((bo->base.dma_addr + bo->base.base.size - 1) & 0xf0000000)) {
vc4->bin_bo = bo;
/* Set up for allocating 512KB chunks of
@@ -393,14 +393,34 @@ static int vc4_v3d_runtime_resume(struct device *dev)
vc4_v3d_init_hw(&vc4->base);
- /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
- enable_irq(vc4->irq);
vc4_irq_enable(&vc4->base);
return 0;
}
#endif
+int vc4_v3d_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *drm = minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_v3d *v3d = vc4->v3d;
+ int ret;
+
+ if (!vc4->v3d)
+ return -ENODEV;
+
+ ret = vc4_debugfs_add_file(minor, "v3d_ident",
+ vc4_v3d_debugfs_ident, NULL);
+ if (ret)
+ return ret;
+
+ ret = vc4_debugfs_add_regset32(minor, "v3d_regs", &v3d->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -443,44 +463,47 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
}
}
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ vc4->irq = ret;
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_runtime_pm;
}
- ret = clk_prepare_enable(v3d->clk);
- if (ret != 0)
- return ret;
-
/* Reset the binner overflow address/size at setup, to be sure
* we don't reuse an old one.
*/
V3D_WRITE(V3D_BPOA, 0);
V3D_WRITE(V3D_BPOS, 0);
- vc4_v3d_init_hw(drm);
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
- vc4->irq = ret;
-
ret = vc4_irq_install(drm, vc4->irq);
if (ret) {
DRM_ERROR("Failed to install IRQ handler\n");
- return ret;
+ goto err_put_runtime_pm;
}
- pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
- pm_runtime_enable(dev);
-
- vc4_debugfs_add_file(drm, "v3d_ident", vc4_v3d_debugfs_ident, NULL);
- vc4_debugfs_add_regset32(drm, "v3d_regs", &v3d->regset);
return 0;
+
+err_put_runtime_pm:
+ pm_runtime_put(dev);
+
+ return ret;
}
static void vc4_v3d_unbind(struct device *dev, struct device *master,
@@ -489,8 +512,6 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = to_vc4_dev(drm);
- pm_runtime_disable(dev);
-
vc4_irq_uninstall(drm);
/* Disable the binner's overflow memory address, so the next
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 2feba55bcef7..520231af4df9 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -102,11 +102,11 @@ size_is_lt(uint32_t width, uint32_t height, int cpp)
height <= 4 * utile_height(cpp));
}
-struct drm_gem_cma_object *
+struct drm_gem_dma_object *
vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
{
struct vc4_dev *vc4 = exec->dev;
- struct drm_gem_cma_object *obj;
+ struct drm_gem_dma_object *obj;
struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
@@ -129,7 +129,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
return obj;
}
-static struct drm_gem_cma_object *
+static struct drm_gem_dma_object *
vc4_use_handle(struct vc4_exec_info *exec, uint32_t gem_handles_packet_index)
{
return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index]);
@@ -160,7 +160,7 @@ gl_shader_rec_size(uint32_t pointer_bits)
}
bool
-vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
+vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_dma_object *fbo,
uint32_t offset, uint8_t tiling_format,
uint32_t width, uint32_t height, uint8_t cpp)
{
@@ -263,7 +263,7 @@ validate_increment_semaphore(VALIDATE_ARGS)
static int
validate_indexed_prim_list(VALIDATE_ARGS)
{
- struct drm_gem_cma_object *ib;
+ struct drm_gem_dma_object *ib;
uint32_t length = *(uint32_t *)(untrusted + 1);
uint32_t offset = *(uint32_t *)(untrusted + 5);
uint32_t max_index = *(uint32_t *)(untrusted + 9);
@@ -294,7 +294,7 @@ validate_indexed_prim_list(VALIDATE_ARGS)
return -EINVAL;
}
- *(uint32_t *)(validated + 5) = ib->paddr + offset;
+ *(uint32_t *)(validated + 5) = ib->dma_addr + offset;
return 0;
}
@@ -400,7 +400,7 @@ validate_tile_binning_config(VALIDATE_ARGS)
* free when the job completes rendering.
*/
exec->bin_slots |= BIT(bin_slot);
- bin_addr = vc4->bin_bo->base.paddr + bin_slot * vc4->bin_alloc_size;
+ bin_addr = vc4->bin_bo->base.dma_addr + bin_slot * vc4->bin_alloc_size;
/* The tile state data array is 48 bytes per tile, and we put it at
* the start of a BO containing both it and the tile alloc.
@@ -575,7 +575,7 @@ reloc_tex(struct vc4_exec_info *exec,
struct vc4_texture_sample_info *sample,
uint32_t texture_handle_index, bool is_cs)
{
- struct drm_gem_cma_object *tex;
+ struct drm_gem_dma_object *tex;
uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]);
uint32_t p2 = (sample->p_offset[2] != ~0 ?
@@ -608,7 +608,7 @@ reloc_tex(struct vc4_exec_info *exec,
"outside of UBO\n");
goto fail;
}
- *validated_p0 = tex->paddr + p0;
+ *validated_p0 = tex->dma_addr + p0;
return true;
}
@@ -736,7 +736,7 @@ reloc_tex(struct vc4_exec_info *exec,
offset -= level_size;
}
- *validated_p0 = tex->paddr + p0;
+ *validated_p0 = tex->dma_addr + p0;
if (is_cs) {
exec->bin_dep_seqno = max(exec->bin_dep_seqno,
@@ -765,7 +765,7 @@ validate_gl_shader_rec(struct drm_device *dev,
28, /* cs */
};
uint32_t shader_reloc_count = ARRAY_SIZE(shader_reloc_offsets);
- struct drm_gem_cma_object *bo[ARRAY_SIZE(shader_reloc_offsets) + 8];
+ struct drm_gem_dma_object *bo[ARRAY_SIZE(shader_reloc_offsets) + 8];
uint32_t nr_attributes, nr_relocs, packet_size;
int i;
@@ -840,7 +840,7 @@ validate_gl_shader_rec(struct drm_device *dev,
void *uniform_data_u;
uint32_t tex, uni;
- *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
+ *(uint32_t *)(pkt_v + o) = bo[i]->dma_addr + src_offset;
if (src_offset != 0) {
DRM_DEBUG("Shaders must be at offset 0 of "
@@ -896,7 +896,7 @@ validate_gl_shader_rec(struct drm_device *dev,
}
for (i = 0; i < nr_attributes; i++) {
- struct drm_gem_cma_object *vbo =
+ struct drm_gem_dma_object *vbo =
bo[ARRAY_SIZE(shader_reloc_offsets) + i];
uint32_t o = 36 + i * 8;
uint32_t offset = *(uint32_t *)(pkt_u + o + 0);
@@ -928,7 +928,7 @@ validate_gl_shader_rec(struct drm_device *dev,
}
}
- *(uint32_t *)(pkt_v + o) = vbo->paddr + offset;
+ *(uint32_t *)(pkt_v + o) = vbo->dma_addr + offset;
}
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index e315aeb5fef5..9745f8810eca 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -776,7 +776,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
}
struct vc4_validated_shader_info *
-vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
+vc4_validate_shader(struct drm_gem_dma_object *shader_obj)
{
struct vc4_dev *vc4 = to_vc4_dev(shader_obj->base.dev);
bool found_shader_end = false;
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 11fc3d6f66b1..0b3333865702 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -14,6 +14,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
@@ -160,48 +161,28 @@ struct vc4_vec_variant {
/* General VEC hardware state. */
struct vc4_vec {
+ struct vc4_encoder encoder;
+ struct drm_connector connector;
+
struct platform_device *pdev;
const struct vc4_vec_variant *variant;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
-
void __iomem *regs;
struct clk *clock;
- const struct vc4_vec_tv_mode *tv_mode;
-
struct debugfs_regset32 regset;
};
#define VEC_READ(offset) readl(vec->regs + (offset))
#define VEC_WRITE(offset, val) writel(val, vec->regs + (offset))
-/* VC4 VEC encoder KMS struct */
-struct vc4_vec_encoder {
- struct vc4_encoder base;
- struct vc4_vec *vec;
-};
-
-static inline struct vc4_vec_encoder *
-to_vc4_vec_encoder(struct drm_encoder *encoder)
+static inline struct vc4_vec *
+encoder_to_vc4_vec(struct drm_encoder *encoder)
{
- return container_of(encoder, struct vc4_vec_encoder, base.base);
+ return container_of(encoder, struct vc4_vec, encoder.base);
}
-/* VC4 VEC connector KMS struct */
-struct vc4_vec_connector {
- struct drm_connector base;
- struct vc4_vec *vec;
-
- /* Since the connector is attached to just the one encoder,
- * this is the reference to it so we can do the best_encoder()
- * hook.
- */
- struct drm_encoder *encoder;
-};
-
enum vc4_vec_tv_mode_id {
VC4_VEC_TV_MODE_NTSC,
VC4_VEC_TV_MODE_NTSC_J,
@@ -211,7 +192,9 @@ enum vc4_vec_tv_mode_id {
struct vc4_vec_tv_mode {
const struct drm_display_mode *mode;
- void (*mode_set)(struct vc4_vec *vec);
+ u32 config0;
+ u32 config1;
+ u32 custom_freq;
};
static const struct debugfs_reg32 vec_regs[] = {
@@ -241,63 +224,41 @@ static const struct debugfs_reg32 vec_regs[] = {
VC4_REG32(VEC_DAC_MISC),
};
-static void vc4_vec_ntsc_mode_set(struct vc4_vec *vec)
-{
- VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_NTSC_STD | VEC_CONFIG0_PDEN);
- VEC_WRITE(VEC_CONFIG1, VEC_CONFIG1_C_CVBS_CVBS);
-}
-
-static void vc4_vec_ntsc_j_mode_set(struct vc4_vec *vec)
-{
- VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_NTSC_STD);
- VEC_WRITE(VEC_CONFIG1, VEC_CONFIG1_C_CVBS_CVBS);
-}
-
static const struct drm_display_mode ntsc_mode = {
DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 13500,
720, 720 + 14, 720 + 14 + 64, 720 + 14 + 64 + 60, 0,
- 480, 480 + 3, 480 + 3 + 3, 480 + 3 + 3 + 16, 0,
+ 480, 480 + 7, 480 + 7 + 6, 525, 0,
DRM_MODE_FLAG_INTERLACE)
};
-static void vc4_vec_pal_mode_set(struct vc4_vec *vec)
-{
- VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_PAL_BDGHI_STD);
- VEC_WRITE(VEC_CONFIG1, VEC_CONFIG1_C_CVBS_CVBS);
-}
-
-static void vc4_vec_pal_m_mode_set(struct vc4_vec *vec)
-{
- VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_PAL_BDGHI_STD);
- VEC_WRITE(VEC_CONFIG1,
- VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ);
- VEC_WRITE(VEC_FREQ3_2, 0x223b);
- VEC_WRITE(VEC_FREQ1_0, 0x61d1);
-}
-
static const struct drm_display_mode pal_mode = {
DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 13500,
720, 720 + 20, 720 + 20 + 64, 720 + 20 + 64 + 60, 0,
- 576, 576 + 2, 576 + 2 + 3, 576 + 2 + 3 + 20, 0,
+ 576, 576 + 4, 576 + 4 + 6, 625, 0,
DRM_MODE_FLAG_INTERLACE)
};
static const struct vc4_vec_tv_mode vc4_vec_tv_modes[] = {
[VC4_VEC_TV_MODE_NTSC] = {
.mode = &ntsc_mode,
- .mode_set = vc4_vec_ntsc_mode_set,
+ .config0 = VEC_CONFIG0_NTSC_STD | VEC_CONFIG0_PDEN,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
[VC4_VEC_TV_MODE_NTSC_J] = {
.mode = &ntsc_mode,
- .mode_set = vc4_vec_ntsc_j_mode_set,
+ .config0 = VEC_CONFIG0_NTSC_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
[VC4_VEC_TV_MODE_PAL] = {
.mode = &pal_mode,
- .mode_set = vc4_vec_pal_mode_set,
+ .config0 = VEC_CONFIG0_PAL_BDGHI_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
[VC4_VEC_TV_MODE_PAL_M] = {
.mode = &pal_mode,
- .mode_set = vc4_vec_pal_m_mode_set,
+ .config0 = VEC_CONFIG0_PAL_BDGHI_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ,
+ .custom_freq = 0x223b61d1,
},
};
@@ -307,12 +268,6 @@ vc4_vec_connector_detect(struct drm_connector *connector, bool force)
return connector_status_unknown;
}
-static void vc4_vec_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static int vc4_vec_connector_get_modes(struct drm_connector *connector)
{
struct drm_connector_state *state = connector->state;
@@ -333,7 +288,6 @@ static int vc4_vec_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_funcs vc4_vec_connector_funcs = {
.detect = vc4_vec_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = vc4_vec_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -343,42 +297,38 @@ static const struct drm_connector_helper_funcs vc4_vec_connector_helper_funcs =
.get_modes = vc4_vec_connector_get_modes,
};
-static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev,
- struct vc4_vec *vec)
+static int vc4_vec_connector_init(struct drm_device *dev, struct vc4_vec *vec)
{
- struct drm_connector *connector = NULL;
- struct vc4_vec_connector *vec_connector;
-
- vec_connector = devm_kzalloc(dev->dev, sizeof(*vec_connector),
- GFP_KERNEL);
- if (!vec_connector)
- return ERR_PTR(-ENOMEM);
+ struct drm_connector *connector = &vec->connector;
+ int ret;
- connector = &vec_connector->base;
connector->interlace_allowed = true;
- vec_connector->encoder = vec->encoder;
- vec_connector->vec = vec;
+ ret = drmm_connector_init(dev, connector, &vc4_vec_connector_funcs,
+ DRM_MODE_CONNECTOR_Composite, NULL);
+ if (ret)
+ return ret;
- drm_connector_init(dev, connector, &vc4_vec_connector_funcs,
- DRM_MODE_CONNECTOR_Composite);
drm_connector_helper_add(connector, &vc4_vec_connector_helper_funcs);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_mode_property,
VC4_VEC_TV_MODE_NTSC);
- vec->tv_mode = &vc4_vec_tv_modes[VC4_VEC_TV_MODE_NTSC];
- drm_connector_attach_encoder(connector, vec->encoder);
+ drm_connector_attach_encoder(connector, &vec->encoder.base);
- return connector;
+ return 0;
}
-static void vc4_vec_encoder_disable(struct drm_encoder *encoder)
+static void vc4_vec_encoder_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
- struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
- struct vc4_vec *vec = vc4_vec_encoder->vec;
- int ret;
+ struct drm_device *drm = encoder->dev;
+ struct vc4_vec *vec = encoder_to_vc4_vec(encoder);
+ int idx, ret;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
VEC_WRITE(VEC_CFG, 0);
VEC_WRITE(VEC_DAC_MISC,
@@ -392,20 +342,35 @@ static void vc4_vec_encoder_disable(struct drm_encoder *encoder)
ret = pm_runtime_put(&vec->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to release power domain: %d\n", ret);
- return;
+ goto err_dev_exit;
}
+
+ drm_dev_exit(idx);
+ return;
+
+err_dev_exit:
+ drm_dev_exit(idx);
}
-static void vc4_vec_encoder_enable(struct drm_encoder *encoder)
+static void vc4_vec_encoder_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
- struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
- struct vc4_vec *vec = vc4_vec_encoder->vec;
- int ret;
+ struct drm_device *drm = encoder->dev;
+ struct vc4_vec *vec = encoder_to_vc4_vec(encoder);
+ struct drm_connector *connector = &vec->connector;
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ const struct vc4_vec_tv_mode *tv_mode =
+ &vc4_vec_tv_modes[conn_state->tv.mode];
+ int idx, ret;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
ret = pm_runtime_get_sync(&vec->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
- return;
+ goto err_dev_exit;
}
/*
@@ -418,13 +383,13 @@ static void vc4_vec_encoder_enable(struct drm_encoder *encoder)
ret = clk_set_rate(vec->clock, 108000000);
if (ret) {
DRM_ERROR("Failed to set clock rate: %d\n", ret);
- return;
+ goto err_put_runtime_pm;
}
ret = clk_prepare_enable(vec->clock);
if (ret) {
DRM_ERROR("Failed to turn on core clock: %d\n", ret);
- return;
+ goto err_put_runtime_pm;
}
/* Reset the different blocks */
@@ -455,29 +420,27 @@ static void vc4_vec_encoder_enable(struct drm_encoder *encoder)
/* Mask all interrupts. */
VEC_WRITE(VEC_MASK0, 0);
- vec->tv_mode->mode_set(vec);
+ VEC_WRITE(VEC_CONFIG0, tv_mode->config0);
+ VEC_WRITE(VEC_CONFIG1, tv_mode->config1);
+
+ if (tv_mode->custom_freq) {
+ VEC_WRITE(VEC_FREQ3_2,
+ (tv_mode->custom_freq >> 16) & 0xffff);
+ VEC_WRITE(VEC_FREQ1_0,
+ tv_mode->custom_freq & 0xffff);
+ }
VEC_WRITE(VEC_DAC_MISC,
VEC_DAC_MISC_VID_ACT | VEC_DAC_MISC_DAC_RST_N);
VEC_WRITE(VEC_CFG, VEC_CFG_VEC_EN);
-}
+ drm_dev_exit(idx);
+ return;
-static bool vc4_vec_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static void vc4_vec_encoder_atomic_mode_set(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
- struct vc4_vec *vec = vc4_vec_encoder->vec;
-
- vec->tv_mode = &vc4_vec_tv_modes[conn_state->tv.mode];
+err_put_runtime_pm:
+ pm_runtime_put(&vec->pdev->dev);
+err_dev_exit:
+ drm_dev_exit(idx);
}
static int vc4_vec_encoder_atomic_check(struct drm_encoder *encoder,
@@ -496,11 +459,27 @@ static int vc4_vec_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs vc4_vec_encoder_helper_funcs = {
- .disable = vc4_vec_encoder_disable,
- .enable = vc4_vec_encoder_enable,
- .mode_fixup = vc4_vec_encoder_mode_fixup,
.atomic_check = vc4_vec_encoder_atomic_check,
- .atomic_mode_set = vc4_vec_encoder_atomic_mode_set,
+ .atomic_disable = vc4_vec_encoder_disable,
+ .atomic_enable = vc4_vec_encoder_enable,
+};
+
+static int vc4_vec_late_register(struct drm_encoder *encoder)
+{
+ struct drm_device *drm = encoder->dev;
+ struct vc4_vec *vec = encoder_to_vc4_vec(encoder);
+ int ret;
+
+ ret = vc4_debugfs_add_regset32(drm->primary, "vec_regs",
+ &vec->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs vc4_vec_encoder_funcs = {
+ .late_register = vc4_vec_late_register,
};
static const struct vc4_vec_variant bcm2835_vec_variant = {
@@ -532,9 +511,7 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_vec *vec;
- struct vc4_vec_encoder *vc4_vec_encoder;
int ret;
ret = drm_mode_create_tv_properties(drm, ARRAY_SIZE(tv_mode_names),
@@ -542,18 +519,11 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- vec = devm_kzalloc(dev, sizeof(*vec), GFP_KERNEL);
+ vec = drmm_kzalloc(drm, sizeof(*vec), GFP_KERNEL);
if (!vec)
return -ENOMEM;
- vc4_vec_encoder = devm_kzalloc(dev, sizeof(*vc4_vec_encoder),
- GFP_KERNEL);
- if (!vc4_vec_encoder)
- return -ENOMEM;
- vc4_vec_encoder->base.type = VC4_ENCODER_TYPE_VEC;
- vc4_vec_encoder->vec = vec;
- vec->encoder = &vc4_vec_encoder->base.base;
-
+ vec->encoder.type = VC4_ENCODER_TYPE_VEC;
vec->pdev = pdev;
vec->variant = (const struct vc4_vec_variant *)
of_device_get_match_data(dev);
@@ -572,49 +542,30 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- pm_runtime_enable(dev);
-
- drm_simple_encoder_init(drm, vec->encoder, DRM_MODE_ENCODER_TVDAC);
- drm_encoder_helper_add(vec->encoder, &vc4_vec_encoder_helper_funcs);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
- vec->connector = vc4_vec_connector_init(drm, vec);
- if (IS_ERR(vec->connector)) {
- ret = PTR_ERR(vec->connector);
- goto err_destroy_encoder;
- }
+ ret = drmm_encoder_init(drm, &vec->encoder.base,
+ &vc4_vec_encoder_funcs,
+ DRM_MODE_ENCODER_TVDAC,
+ NULL);
+ if (ret)
+ return ret;
- dev_set_drvdata(dev, vec);
+ drm_encoder_helper_add(&vec->encoder.base, &vc4_vec_encoder_helper_funcs);
- vc4->vec = vec;
+ ret = vc4_vec_connector_init(drm, vec);
+ if (ret)
+ return ret;
- vc4_debugfs_add_regset32(drm, "vec_regs", &vec->regset);
+ dev_set_drvdata(dev, vec);
return 0;
-
-err_destroy_encoder:
- drm_encoder_cleanup(vec->encoder);
- pm_runtime_disable(dev);
-
- return ret;
-}
-
-static void vc4_vec_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
- struct vc4_vec *vec = dev_get_drvdata(dev);
-
- vc4_vec_connector_destroy(vec->connector);
- drm_encoder_cleanup(vec->encoder);
- pm_runtime_disable(dev);
-
- vc4->vec = NULL;
}
static const struct component_ops vc4_vec_ops = {
.bind = vc4_vec_bind,
- .unbind = vc4_vec_unbind,
};
static int vc4_vec_dev_probe(struct platform_device *pdev)