aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/media/platform/ti-vpe/cal.c
diff options
context:
space:
mode:
authorLaurent Pinchart <laurent.pinchart@ideasonboard.com>2020-12-07 00:53:49 +0100
committerMauro Carvalho Chehab <mchehab+huawei@kernel.org>2021-01-04 12:12:25 +0100
commitca4fec54f6ba0825532ac21698cf2b7324d02737 (patch)
tree23272111cd9a412cd48e5fe3bb3f23159052e4bb /drivers/media/platform/ti-vpe/cal.c
parentmedia: ti-vpe: cal: Use list_first_entry() (diff)
downloadwireguard-linux-ca4fec54f6ba0825532ac21698cf2b7324d02737.tar.xz
wireguard-linux-ca4fec54f6ba0825532ac21698cf2b7324d02737.zip
media: ti-vpe: cal: Group all DMA queue fields in struct cal_dmaqueue
The cal_dmaqueue structure only contains the list of queued buffers. Move the other fields that are logically related to the DMA queue (current and next buffer points, state, wait queue and lock) from cal_ctx to cal_dmaqueue. Take this as an opportunity to document the fields usage and to give them more appropriate names. The 'active' field stored the list of all queued buffers, not the active buffers, so rename it to 'queue'. The 'cur_frm' and 'next_frm' are respectively renamed to 'active' and 'pending' to better explain their purpose. The 'dma_state' and 'dma_wait' fields are stripped of their 'dma_' prefix as they're now part of cal_dmaqueue. Finally, 'slock' is renamed to 'lock'. Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Reviewed-by: Benoit Parrot <bparrot@ti.com> Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl> Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
Diffstat (limited to 'drivers/media/platform/ti-vpe/cal.c')
-rw-r--r--drivers/media/platform/ti-vpe/cal.c49
1 files changed, 24 insertions, 25 deletions
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 8f25e7a6f5e8..3e0a69bb7fe5 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -424,9 +424,9 @@ static bool cal_ctx_wr_dma_stopped(struct cal_ctx *ctx)
{
bool stopped;
- spin_lock_irq(&ctx->slock);
- stopped = ctx->dma_state == CAL_DMA_STOPPED;
- spin_unlock_irq(&ctx->slock);
+ spin_lock_irq(&ctx->dma.lock);
+ stopped = ctx->dma.state == CAL_DMA_STOPPED;
+ spin_unlock_irq(&ctx->dma.lock);
return stopped;
}
@@ -436,11 +436,11 @@ int cal_ctx_wr_dma_stop(struct cal_ctx *ctx)
long timeout;
/* Request DMA stop and wait until it completes. */
- spin_lock_irq(&ctx->slock);
- ctx->dma_state = CAL_DMA_STOP_REQUESTED;
- spin_unlock_irq(&ctx->slock);
+ spin_lock_irq(&ctx->dma.lock);
+ ctx->dma.state = CAL_DMA_STOP_REQUESTED;
+ spin_unlock_irq(&ctx->dma.lock);
- timeout = wait_event_timeout(ctx->dma_wait, cal_ctx_wr_dma_stopped(ctx),
+ timeout = wait_event_timeout(ctx->dma.wait, cal_ctx_wr_dma_stopped(ctx),
msecs_to_jiffies(500));
if (!timeout) {
ctx_err(ctx, "failed to disable dma cleanly\n");
@@ -475,20 +475,18 @@ void cal_ctx_disable_irqs(struct cal_ctx *ctx)
static inline void cal_irq_wdma_start(struct cal_ctx *ctx)
{
- struct cal_dmaqueue *dma_q = &ctx->vidq;
+ spin_lock(&ctx->dma.lock);
- spin_lock(&ctx->slock);
-
- if (ctx->dma_state == CAL_DMA_STOP_REQUESTED) {
+ if (ctx->dma.state == CAL_DMA_STOP_REQUESTED) {
/*
* If a stop is requested, disable the write DMA context
* immediately. The CAL_WR_DMA_CTRL_j.MODE field is shadowed,
* the current frame will complete and the DMA will then stop.
*/
cal_ctx_wr_dma_disable(ctx);
- ctx->dma_state = CAL_DMA_STOP_PENDING;
- } else if (!list_empty(&dma_q->active) &&
- ctx->cur_frm == ctx->next_frm) {
+ ctx->dma.state = CAL_DMA_STOP_PENDING;
+ } else if (!list_empty(&ctx->dma.queue) &&
+ ctx->dma.active == ctx->dma.pending) {
/*
* Otherwise, if a new buffer is available, queue it to the
* hardware.
@@ -496,36 +494,37 @@ static inline void cal_irq_wdma_start(struct cal_ctx *ctx)
struct cal_buffer *buf;
unsigned long addr;
- buf = list_first_entry(&dma_q->active, struct cal_buffer, list);
+ buf = list_first_entry(&ctx->dma.queue, struct cal_buffer,
+ list);
addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
cal_ctx_wr_dma_addr(ctx, addr);
- ctx->next_frm = buf;
+ ctx->dma.pending = buf;
list_del(&buf->list);
}
- spin_unlock(&ctx->slock);
+ spin_unlock(&ctx->dma.lock);
}
static inline void cal_irq_wdma_end(struct cal_ctx *ctx)
{
struct cal_buffer *buf = NULL;
- spin_lock(&ctx->slock);
+ spin_lock(&ctx->dma.lock);
/* If the DMA context was stopping, it is now stopped. */
- if (ctx->dma_state == CAL_DMA_STOP_PENDING) {
- ctx->dma_state = CAL_DMA_STOPPED;
- wake_up(&ctx->dma_wait);
+ if (ctx->dma.state == CAL_DMA_STOP_PENDING) {
+ ctx->dma.state = CAL_DMA_STOPPED;
+ wake_up(&ctx->dma.wait);
}
/* If a new buffer was queued, complete the current buffer. */
- if (ctx->cur_frm != ctx->next_frm) {
- buf = ctx->cur_frm;
- ctx->cur_frm = ctx->next_frm;
+ if (ctx->dma.active != ctx->dma.pending) {
+ buf = ctx->dma.active;
+ ctx->dma.active = ctx->dma.pending;
}
- spin_unlock(&ctx->slock);
+ spin_unlock(&ctx->dma.lock);
if (buf) {
buf->vb.vb2_buf.timestamp = ktime_get_ns();