aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/platform
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/platform')
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/aspeed-video.c1
-rw-r--r--drivers/media/platform/coda/coda-bit.c24
-rw-r--r--drivers/media/platform/coda/coda-common.c13
-rw-r--r--drivers/media/platform/coda/coda.h2
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c16
-rw-r--r--drivers/media/platform/fsl-viu.c2
-rw-r--r--drivers/media/platform/imx-pxp.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c163
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h31
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c104
-rw-r--r--drivers/media/platform/omap/omap_vout.c12
-rw-r--r--drivers/media/platform/pxa_camera.c8
-rw-r--r--drivers/media/platform/qcom/venus/core.c12
-rw-r--r--drivers/media/platform/qcom/venus/core.h1
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c53
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c3
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c26
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c66
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c25
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-regs.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c1
-rw-r--r--drivers/media/platform/seco-cec/seco-cec.h2
-rw-r--r--drivers/media/platform/soc_camera/Kconfig18
-rw-r--r--drivers/media/platform/soc_camera/Makefile8
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c1810
-rw-r--r--drivers/media/platform/soc_camera/soc_camera_platform.c188
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.c426
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.h47
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-debug.c34
-rw-r--r--drivers/media/platform/sti/hva/hva-debugfs.c36
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c1
-rw-r--r--drivers/media/platform/vicodec/codec-fwht.c148
-rw-r--r--drivers/media/platform/vicodec/codec-fwht.h27
-rw-r--r--drivers/media/platform/vicodec/codec-v4l2-fwht.c390
-rw-r--r--drivers/media/platform/vicodec/codec-v4l2-fwht.h15
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c658
-rw-r--r--drivers/media/platform/vim2m.c475
-rw-r--r--drivers/media/platform/vimc/Makefile3
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c24
-rw-r--r--drivers/media/platform/vimc/vimc-common.c35
-rw-r--r--drivers/media/platform/vimc/vimc-common.h17
-rw-r--r--drivers/media/platform/vimc/vimc-core.c3
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c26
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c28
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c56
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.c188
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.h38
-rw-r--r--drivers/media/platform/vivid/vivid-core.c22
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c10
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c57
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c2
57 files changed, 1821 insertions, 3550 deletions
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index a505e9f5a1e2..b5ccb60cf664 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -650,7 +650,7 @@ config VIDEO_SECO_CEC
config VIDEO_SECO_RC
bool "SECO Boards IR RC5 support"
depends on VIDEO_SECO_CEC
- select RC_CORE
+ depends on RC_CORE
help
If you say yes here you will get support for the
SECO Boards Consumer-IR in seco-cec driver.
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index dfec813f50a9..692e08ef38c0 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -1661,6 +1661,7 @@ static int aspeed_video_probe(struct platform_device *pdev)
video->frame_rate = 30;
video->dev = &pdev->dev;
+ spin_lock_init(&video->lock);
mutex_init(&video->video_lock);
init_waitqueue_head(&video->wait);
INIT_DELAYED_WORK(&video->res_work, aspeed_video_resolution_work);
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 8e0194993a52..b4f396c2e72c 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -1010,7 +1010,11 @@ static int coda_start_encoding(struct coda_ctx *ctx)
CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
((ctx->params.h264_slice_beta_offset_div2 &
CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
- CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET);
+ CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET) |
+ (ctx->params.h264_constrained_intra_pred_flag <<
+ CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_OFFSET) |
+ (ctx->params.h264_chroma_qp_index_offset &
+ CODA_264PARAM_CHROMAQPOFFSET_MASK);
coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA);
break;
case V4L2_PIX_FMT_JPEG:
@@ -2020,7 +2024,6 @@ static void coda_finish_decode(struct coda_ctx *ctx)
struct coda_q_data *q_data_dst;
struct vb2_v4l2_buffer *dst_buf;
struct coda_buffer_meta *meta;
- unsigned long payload;
int width, height;
int decoded_idx;
int display_idx;
@@ -2226,21 +2229,8 @@ static void coda_finish_decode(struct coda_ctx *ctx)
trace_coda_dec_rot_done(ctx, dst_buf, meta);
- switch (q_data_dst->fourcc) {
- case V4L2_PIX_FMT_YUYV:
- payload = width * height * 2;
- break;
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
- case V4L2_PIX_FMT_NV12:
- default:
- payload = width * height * 3 / 2;
- break;
- case V4L2_PIX_FMT_YUV422P:
- payload = width * height * 2;
- break;
- }
- vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ q_data_dst->sizeimage);
if (ctx->frame_errors[ctx->display_idx] || err_vdoa)
coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_ERROR);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 7518f01c48f7..fa0b22fb7991 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -728,7 +728,7 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
break;
case V4L2_PIX_FMT_NV12:
- if (!disable_tiling) {
+ if (!disable_tiling && ctx->dev->devtype->product == CODA_960) {
ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
break;
}
@@ -1839,6 +1839,12 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
ctx->params.h264_disable_deblocking_filter_idc = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION:
+ ctx->params.h264_constrained_intra_pred_flag = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET:
+ ctx->params.h264_chroma_qp_index_offset = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
/* TODO: switch between baseline and constrained baseline */
if (ctx->inst_type == CODA_INST_ENCODER)
@@ -1925,6 +1931,11 @@ static void coda_encode_ctrls(struct coda_ctx *ctx)
V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY,
0x0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION, 0, 1, 1,
+ 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET, -12, 12, 1, 0);
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_PROFILE,
V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0,
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 31cea72f5b2a..31c80bda2c0b 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -118,6 +118,8 @@ struct coda_params {
u8 h264_disable_deblocking_filter_idc;
s8 h264_slice_alpha_c0_offset_div2;
s8 h264_slice_beta_offset_div2;
+ bool h264_constrained_intra_pred_flag;
+ s8 h264_chroma_qp_index_offset;
u8 h264_profile_idc;
u8 h264_level_idc;
u8 mpeg4_intra_qp;
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 9996bab98fe3..26dadbba930f 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -518,7 +518,7 @@ static void vpfe_schedule_bottom_field(struct vpfe_device *vpfe_dev)
static void vpfe_process_buffer_complete(struct vpfe_device *vpfe_dev)
{
- v4l2_get_timestamp(&vpfe_dev->cur_frm->ts);
+ vpfe_dev->cur_frm->ts = ktime_get_ns();
vpfe_dev->cur_frm->state = VIDEOBUF_DONE;
vpfe_dev->cur_frm->size = vpfe_dev->fmt.fmt.pix.sizeimage;
wake_up_interruptible(&vpfe_dev->cur_frm->done);
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index f5fc54de19da..02da0b06e56a 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -738,7 +738,7 @@ int fimc_is_hw_initialize(struct fimc_is *is)
return 0;
}
-static int fimc_is_log_show(struct seq_file *s, void *data)
+static int fimc_is_show(struct seq_file *s, void *data)
{
struct fimc_is *is = s->private;
const u8 *buf = is->memory.vaddr + FIMC_IS_DEBUG_REGION_OFFSET;
@@ -752,17 +752,7 @@ static int fimc_is_log_show(struct seq_file *s, void *data)
return 0;
}
-static int fimc_is_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, fimc_is_log_show, inode->i_private);
-}
-
-static const struct file_operations fimc_is_debugfs_fops = {
- .open = fimc_is_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(fimc_is);
static void fimc_is_debugfs_remove(struct fimc_is *is)
{
@@ -777,7 +767,7 @@ static int fimc_is_debugfs_create(struct fimc_is *is)
is->debugfs_entry = debugfs_create_dir("fimc_is", NULL);
dentry = debugfs_create_file("fw_log", S_IRUGO, is->debugfs_entry,
- is, &fimc_is_debugfs_fops);
+ is, &fimc_is_fops);
if (!dentry)
fimc_is_debugfs_remove(is);
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index ca6d0317ab42..cffebcaacb90 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1090,7 +1090,7 @@ static void viu_capture_intr(struct viu_dev *dev, u32 status)
if (waitqueue_active(&buf->vb.done)) {
list_del(&buf->vb.queue);
- v4l2_get_timestamp(&buf->vb.ts);
+ buf->vb.ts = ktime_get_ns();
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
wake_up(&buf->vb.done);
diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/imx-pxp.c
index c1c255408d16..f087dc4fc729 100644
--- a/drivers/media/platform/imx-pxp.c
+++ b/drivers/media/platform/imx-pxp.c
@@ -680,7 +680,7 @@ static void pxp_setup_csc(struct pxp_ctx *ctx)
csc2_coef = csc2_coef_rec709_full;
else
csc2_coef = csc2_coef_rec709_lim;
- } else if (ycbcr_enc == V4L2_YCBCR_ENC_709) {
+ } else if (ycbcr_enc == V4L2_YCBCR_ENC_BT2020) {
if (quantization == V4L2_QUANTIZATION_FULL_RANGE)
csc2_coef = csc2_coef_bt2020_full;
else
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
index 79ca03ac449c..7884465afcd2 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
@@ -27,11 +27,14 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev)
struct device_node *node;
struct platform_device *pdev;
struct mtk_vcodec_pm *pm;
- int ret = 0;
+ struct mtk_vcodec_clk *dec_clk;
+ struct mtk_vcodec_clk_info *clk_info;
+ int i = 0, ret = 0;
pdev = mtkdev->plat_dev;
pm = &mtkdev->pm;
pm->mtkdev = mtkdev;
+ dec_clk = &pm->vdec_clk;
node = of_parse_phandle(pdev->dev.of_node, "mediatek,larb", 0);
if (!node) {
mtk_v4l2_err("of_parse_phandle mediatek,larb fail!");
@@ -47,52 +50,34 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev)
pdev = mtkdev->plat_dev;
pm->dev = &pdev->dev;
- pm->vcodecpll = devm_clk_get(&pdev->dev, "vcodecpll");
- if (IS_ERR(pm->vcodecpll)) {
- mtk_v4l2_err("devm_clk_get vcodecpll fail");
- ret = PTR_ERR(pm->vcodecpll);
+ dec_clk->clk_num =
+ of_property_count_strings(pdev->dev.of_node, "clock-names");
+ if (dec_clk->clk_num > 0) {
+ dec_clk->clk_info = devm_kcalloc(&pdev->dev,
+ dec_clk->clk_num, sizeof(*clk_info),
+ GFP_KERNEL);
+ if (!dec_clk->clk_info)
+ return -ENOMEM;
+ } else {
+ mtk_v4l2_err("Failed to get vdec clock count");
+ return -EINVAL;
}
- pm->univpll_d2 = devm_clk_get(&pdev->dev, "univpll_d2");
- if (IS_ERR(pm->univpll_d2)) {
- mtk_v4l2_err("devm_clk_get univpll_d2 fail");
- ret = PTR_ERR(pm->univpll_d2);
- }
-
- pm->clk_cci400_sel = devm_clk_get(&pdev->dev, "clk_cci400_sel");
- if (IS_ERR(pm->clk_cci400_sel)) {
- mtk_v4l2_err("devm_clk_get clk_cci400_sel fail");
- ret = PTR_ERR(pm->clk_cci400_sel);
- }
-
- pm->vdec_sel = devm_clk_get(&pdev->dev, "vdec_sel");
- if (IS_ERR(pm->vdec_sel)) {
- mtk_v4l2_err("devm_clk_get vdec_sel fail");
- ret = PTR_ERR(pm->vdec_sel);
- }
-
- pm->vdecpll = devm_clk_get(&pdev->dev, "vdecpll");
- if (IS_ERR(pm->vdecpll)) {
- mtk_v4l2_err("devm_clk_get vdecpll fail");
- ret = PTR_ERR(pm->vdecpll);
- }
-
- pm->vencpll = devm_clk_get(&pdev->dev, "vencpll");
- if (IS_ERR(pm->vencpll)) {
- mtk_v4l2_err("devm_clk_get vencpll fail");
- ret = PTR_ERR(pm->vencpll);
- }
-
- pm->venc_lt_sel = devm_clk_get(&pdev->dev, "venc_lt_sel");
- if (IS_ERR(pm->venc_lt_sel)) {
- mtk_v4l2_err("devm_clk_get venc_lt_sel fail");
- ret = PTR_ERR(pm->venc_lt_sel);
- }
-
- pm->vdec_bus_clk_src = devm_clk_get(&pdev->dev, "vdec_bus_clk_src");
- if (IS_ERR(pm->vdec_bus_clk_src)) {
- mtk_v4l2_err("devm_clk_get vdec_bus_clk_src");
- ret = PTR_ERR(pm->vdec_bus_clk_src);
+ for (i = 0; i < dec_clk->clk_num; i++) {
+ clk_info = &dec_clk->clk_info[i];
+ ret = of_property_read_string_index(pdev->dev.of_node,
+ "clock-names", i, &clk_info->clk_name);
+ if (ret) {
+ mtk_v4l2_err("Failed to get clock name id = %d", i);
+ return ret;
+ }
+ clk_info->vcodec_clk = devm_clk_get(&pdev->dev,
+ clk_info->clk_name);
+ if (IS_ERR(clk_info->vcodec_clk)) {
+ mtk_v4l2_err("devm_clk_get (%d)%s fail", i,
+ clk_info->clk_name);
+ return PTR_ERR(clk_info->vcodec_clk);
+ }
}
pm_runtime_enable(&pdev->dev);
@@ -125,78 +110,36 @@ void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm)
void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm)
{
- int ret;
-
- ret = clk_set_rate(pm->vcodecpll, 1482 * 1000000);
- if (ret)
- mtk_v4l2_err("clk_set_rate vcodecpll fail %d", ret);
-
- ret = clk_set_rate(pm->vencpll, 800 * 1000000);
- if (ret)
- mtk_v4l2_err("clk_set_rate vencpll fail %d", ret);
-
- ret = clk_prepare_enable(pm->vcodecpll);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable vcodecpll fail %d", ret);
-
- ret = clk_prepare_enable(pm->vencpll);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable vencpll fail %d", ret);
-
- ret = clk_prepare_enable(pm->vdec_bus_clk_src);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable vdec_bus_clk_src fail %d",
- ret);
-
- ret = clk_prepare_enable(pm->venc_lt_sel);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable venc_lt_sel fail %d", ret);
-
- ret = clk_set_parent(pm->venc_lt_sel, pm->vdec_bus_clk_src);
- if (ret)
- mtk_v4l2_err("clk_set_parent venc_lt_sel vdec_bus_clk_src fail %d",
- ret);
-
- ret = clk_prepare_enable(pm->univpll_d2);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable univpll_d2 fail %d", ret);
-
- ret = clk_prepare_enable(pm->clk_cci400_sel);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable clk_cci400_sel fail %d", ret);
-
- ret = clk_set_parent(pm->clk_cci400_sel, pm->univpll_d2);
- if (ret)
- mtk_v4l2_err("clk_set_parent clk_cci400_sel univpll_d2 fail %d",
- ret);
-
- ret = clk_prepare_enable(pm->vdecpll);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable vdecpll fail %d", ret);
-
- ret = clk_prepare_enable(pm->vdec_sel);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable vdec_sel fail %d", ret);
-
- ret = clk_set_parent(pm->vdec_sel, pm->vdecpll);
- if (ret)
- mtk_v4l2_err("clk_set_parent vdec_sel vdecpll fail %d", ret);
+ struct mtk_vcodec_clk *dec_clk = &pm->vdec_clk;
+ int ret, i = 0;
+
+ for (i = 0; i < dec_clk->clk_num; i++) {
+ ret = clk_prepare_enable(dec_clk->clk_info[i].vcodec_clk);
+ if (ret) {
+ mtk_v4l2_err("clk_prepare_enable %d %s fail %d", i,
+ dec_clk->clk_info[i].clk_name, ret);
+ goto error;
+ }
+ }
ret = mtk_smi_larb_get(pm->larbvdec);
- if (ret)
+ if (ret) {
mtk_v4l2_err("mtk_smi_larb_get larbvdec fail %d", ret);
+ goto error;
+ }
+ return;
+error:
+ for (i -= 1; i >= 0; i--)
+ clk_disable_unprepare(dec_clk->clk_info[i].vcodec_clk);
}
void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm)
{
+ struct mtk_vcodec_clk *dec_clk = &pm->vdec_clk;
+ int i = 0;
+
mtk_smi_larb_put(pm->larbvdec);
- clk_disable_unprepare(pm->vdec_sel);
- clk_disable_unprepare(pm->vdecpll);
- clk_disable_unprepare(pm->univpll_d2);
- clk_disable_unprepare(pm->clk_cci400_sel);
- clk_disable_unprepare(pm->venc_lt_sel);
- clk_disable_unprepare(pm->vdec_bus_clk_src);
- clk_disable_unprepare(pm->vencpll);
- clk_disable_unprepare(pm->vcodecpll);
+ for (i = dec_clk->clk_num - 1; i >= 0; i--)
+ clk_disable_unprepare(dec_clk->clk_info[i].vcodec_clk);
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index 3cffb381ac8e..8aba69555b12 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -176,22 +176,29 @@ struct mtk_enc_params {
};
/**
+ * struct mtk_vcodec_clk_info - Structure used to store clock name
+ */
+struct mtk_vcodec_clk_info {
+ const char *clk_name;
+ struct clk *vcodec_clk;
+};
+
+/**
+ * struct mtk_vcodec_clk - Structure used to store vcodec clock information
+ */
+struct mtk_vcodec_clk {
+ struct mtk_vcodec_clk_info *clk_info;
+ int clk_num;
+};
+
+/**
* struct mtk_vcodec_pm - Power management data structure
*/
struct mtk_vcodec_pm {
- struct clk *vdec_bus_clk_src;
- struct clk *vencpll;
-
- struct clk *vcodecpll;
- struct clk *univpll_d2;
- struct clk *clk_cci400_sel;
- struct clk *vdecpll;
- struct clk *vdec_sel;
- struct clk *vencpll_d2;
- struct clk *venc_sel;
- struct clk *univpll1_d2;
- struct clk *venc_lt_sel;
+ struct mtk_vcodec_clk vdec_clk;
struct device *larbvdec;
+
+ struct mtk_vcodec_clk venc_clk;
struct device *larbvenc;
struct device *larbvenclt;
struct device *dev;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
index 7c025045ea90..39375b8ea27c 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
@@ -27,9 +27,11 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
{
struct device_node *node;
struct platform_device *pdev;
- struct device *dev;
struct mtk_vcodec_pm *pm;
- int ret = 0;
+ struct mtk_vcodec_clk *enc_clk;
+ struct mtk_vcodec_clk_info *clk_info;
+ int ret = 0, i = 0;
+ struct device *dev;
pdev = mtkdev->plat_dev;
pm = &mtkdev->pm;
@@ -37,6 +39,7 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
pm->mtkdev = mtkdev;
pm->dev = &pdev->dev;
dev = &pdev->dev;
+ enc_clk = &pm->venc_clk;
node = of_parse_phandle(dev->of_node, "mediatek,larb", 0);
if (!node) {
@@ -68,28 +71,34 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
pdev = mtkdev->plat_dev;
pm->dev = &pdev->dev;
- pm->vencpll_d2 = devm_clk_get(&pdev->dev, "venc_sel_src");
- if (IS_ERR(pm->vencpll_d2)) {
- mtk_v4l2_err("devm_clk_get vencpll_d2 fail");
- ret = PTR_ERR(pm->vencpll_d2);
- }
-
- pm->venc_sel = devm_clk_get(&pdev->dev, "venc_sel");
- if (IS_ERR(pm->venc_sel)) {
- mtk_v4l2_err("devm_clk_get venc_sel fail");
- ret = PTR_ERR(pm->venc_sel);
+ enc_clk->clk_num = of_property_count_strings(pdev->dev.of_node,
+ "clock-names");
+ if (enc_clk->clk_num > 0) {
+ enc_clk->clk_info = devm_kcalloc(&pdev->dev,
+ enc_clk->clk_num, sizeof(*clk_info),
+ GFP_KERNEL);
+ if (!enc_clk->clk_info)
+ return -ENOMEM;
+ } else {
+ mtk_v4l2_err("Failed to get venc clock count");
+ return -EINVAL;
}
- pm->univpll1_d2 = devm_clk_get(&pdev->dev, "venc_lt_sel_src");
- if (IS_ERR(pm->univpll1_d2)) {
- mtk_v4l2_err("devm_clk_get univpll1_d2 fail");
- ret = PTR_ERR(pm->univpll1_d2);
- }
-
- pm->venc_lt_sel = devm_clk_get(&pdev->dev, "venc_lt_sel");
- if (IS_ERR(pm->venc_lt_sel)) {
- mtk_v4l2_err("devm_clk_get venc_lt_sel fail");
- ret = PTR_ERR(pm->venc_lt_sel);
+ for (i = 0; i < enc_clk->clk_num; i++) {
+ clk_info = &enc_clk->clk_info[i];
+ ret = of_property_read_string_index(pdev->dev.of_node,
+ "clock-names", i, &clk_info->clk_name);
+ if (ret) {
+ mtk_v4l2_err("venc failed to get clk name %d", i);
+ return ret;
+ }
+ clk_info->vcodec_clk = devm_clk_get(&pdev->dev,
+ clk_info->clk_name);
+ if (IS_ERR(clk_info->vcodec_clk)) {
+ mtk_v4l2_err("venc devm_clk_get (%d)%s fail", i,
+ clk_info->clk_name);
+ return PTR_ERR(clk_info->vcodec_clk);
+ }
}
return ret;
@@ -102,38 +111,45 @@ void mtk_vcodec_release_enc_pm(struct mtk_vcodec_dev *mtkdev)
void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm)
{
- int ret;
-
- ret = clk_prepare_enable(pm->venc_sel);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable fail %d", ret);
-
- ret = clk_set_parent(pm->venc_sel, pm->vencpll_d2);
- if (ret)
- mtk_v4l2_err("clk_set_parent fail %d", ret);
-
- ret = clk_prepare_enable(pm->venc_lt_sel);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable fail %d", ret);
-
- ret = clk_set_parent(pm->venc_lt_sel, pm->univpll1_d2);
- if (ret)
- mtk_v4l2_err("clk_set_parent fail %d", ret);
+ struct mtk_vcodec_clk *enc_clk = &pm->venc_clk;
+ int ret, i = 0;
+
+ for (i = 0; i < enc_clk->clk_num; i++) {
+ ret = clk_prepare_enable(enc_clk->clk_info[i].vcodec_clk);
+ if (ret) {
+ mtk_v4l2_err("venc clk_prepare_enable %d %s fail %d", i,
+ enc_clk->clk_info[i].clk_name, ret);
+ goto clkerr;
+ }
+ }
ret = mtk_smi_larb_get(pm->larbvenc);
- if (ret)
+ if (ret) {
mtk_v4l2_err("mtk_smi_larb_get larb3 fail %d", ret);
-
+ goto larbvencerr;
+ }
ret = mtk_smi_larb_get(pm->larbvenclt);
- if (ret)
+ if (ret) {
mtk_v4l2_err("mtk_smi_larb_get larb4 fail %d", ret);
+ goto larbvenclterr;
+ }
+ return;
+larbvenclterr:
+ mtk_smi_larb_put(pm->larbvenc);
+larbvencerr:
+clkerr:
+ for (i -= 1; i >= 0; i--)
+ clk_disable_unprepare(enc_clk->clk_info[i].vcodec_clk);
}
void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm)
{
+ struct mtk_vcodec_clk *enc_clk = &pm->venc_clk;
+ int i = 0;
+
mtk_smi_larb_put(pm->larbvenc);
mtk_smi_larb_put(pm->larbvenclt);
- clk_disable_unprepare(pm->venc_lt_sel);
- clk_disable_unprepare(pm->venc_sel);
+ for (i = enc_clk->clk_num - 1; i >= 0; i--)
+ clk_disable_unprepare(enc_clk->clk_info[i].vcodec_clk);
}
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index f447ae3bb465..ff3de2dce5a2 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -513,7 +513,7 @@ static int omapvid_apply_changes(struct omap_vout_device *vout)
}
static int omapvid_handle_interlace_display(struct omap_vout_device *vout,
- unsigned int irqstatus, struct timeval timevalue)
+ unsigned int irqstatus, u64 ts)
{
u32 fid;
@@ -537,7 +537,7 @@ static int omapvid_handle_interlace_display(struct omap_vout_device *vout,
if (vout->cur_frm == vout->next_frm)
goto err;
- vout->cur_frm->ts = timevalue;
+ vout->cur_frm->ts = ts;
vout->cur_frm->state = VIDEOBUF_DONE;
wake_up_interruptible(&vout->cur_frm->done);
vout->cur_frm = vout->next_frm;
@@ -557,7 +557,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
int ret, fid, mgr_id;
u32 addr, irq;
struct omap_overlay *ovl;
- struct timeval timevalue;
+ u64 ts;
struct omapvideo_info *ovid;
struct omap_dss_device *cur_display;
struct omap_vout_device *vout = (struct omap_vout_device *)arg;
@@ -577,7 +577,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
return;
spin_lock(&vout->vbq_lock);
- v4l2_get_timestamp(&timevalue);
+ ts = ktime_get_ns();
switch (cur_display->type) {
case OMAP_DISPLAY_TYPE_DSI:
@@ -595,7 +595,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
break;
case OMAP_DISPLAY_TYPE_VENC:
fid = omapvid_handle_interlace_display(vout, irqstatus,
- timevalue);
+ ts);
if (!fid)
goto vout_isr_err;
break;
@@ -608,7 +608,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
}
if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
- vout->cur_frm->ts = timevalue;
+ vout->cur_frm->ts = ts;
vout->cur_frm->state = VIDEOBUF_DONE;
wake_up_interruptible(&vout->cur_frm->done);
vout->cur_frm = vout->next_frm;
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 5f930560eb30..3cf3c6390cc8 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2394,15 +2394,17 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->res = res;
pcdev->pdata = pdev->dev.platform_data;
- if (pdev->dev.of_node && !pcdev->pdata) {
- err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd);
- } else {
+ if (pcdev->pdata) {
pcdev->platform_flags = pcdev->pdata->flags;
pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C;
pcdev->asd.match.i2c.adapter_id =
pcdev->pdata->sensor_i2c_adapter_id;
pcdev->asd.match.i2c.address = pcdev->pdata->sensor_i2c_address;
+ } else if (pdev->dev.of_node) {
+ err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd);
+ } else {
+ return -ENODEV;
}
if (err < 0)
return err;
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index cb411eb85ee4..739366744e0f 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -455,7 +455,7 @@ static const struct venus_resources msm8996_res = {
.reg_tbl_size = ARRAY_SIZE(msm8996_reg_preset),
.clks = {"core", "iface", "bus", "mbus" },
.clks_num = 4,
- .max_load = 2563200,
+ .max_load = 3110400, /* 4096x2160@90 */
.hfi_version = HFI_VERSION_3XX,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
@@ -465,10 +465,12 @@ static const struct venus_resources msm8996_res = {
};
static const struct freq_tbl sdm845_freq_table[] = {
- { 1944000, 380000000 }, /* 4k UHD @ 60 */
- { 972000, 320000000 }, /* 4k UHD @ 30 */
- { 489600, 200000000 }, /* 1080p @ 60 */
- { 244800, 100000000 }, /* 1080p @ 30 */
+ { 3110400, 533000000 }, /* 4096x2160@90 */
+ { 2073600, 444000000 }, /* 4096x2160@60 */
+ { 1944000, 404000000 }, /* 3840x2160@60 */
+ { 972000, 330000000 }, /* 3840x2160@30 */
+ { 489600, 200000000 }, /* 1920x1080@60 */
+ { 244800, 100000000 }, /* 1920x1080@30 */
};
static const struct venus_resources sdm845_res = {
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 6382cea29185..79c7e816c706 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -134,6 +134,7 @@ struct venus_core {
struct video_firmware {
struct device *dev;
struct iommu_domain *iommu_domain;
+ size_t mapped_mem_size;
} fw;
struct mutex lock;
struct list_head instances;
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index c29acfd70c1b..6cfa8021721e 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -35,14 +35,15 @@
static void venus_reset_cpu(struct venus_core *core)
{
+ u32 fw_size = core->fw.mapped_mem_size;
void __iomem *base = core->base;
writel(0, base + WRAPPER_FW_START_ADDR);
- writel(VENUS_FW_MEM_SIZE, base + WRAPPER_FW_END_ADDR);
+ writel(fw_size, base + WRAPPER_FW_END_ADDR);
writel(0, base + WRAPPER_CPA_START_ADDR);
- writel(VENUS_FW_MEM_SIZE, base + WRAPPER_CPA_END_ADDR);
- writel(VENUS_FW_MEM_SIZE, base + WRAPPER_NONPIX_START_ADDR);
- writel(VENUS_FW_MEM_SIZE, base + WRAPPER_NONPIX_END_ADDR);
+ writel(fw_size, base + WRAPPER_CPA_END_ADDR);
+ writel(fw_size, base + WRAPPER_NONPIX_START_ADDR);
+ writel(fw_size, base + WRAPPER_NONPIX_END_ADDR);
writel(0x0, base + WRAPPER_CPU_CGC_DIS);
writel(0x0, base + WRAPPER_CPU_CLOCK_CONFIG);
@@ -74,6 +75,9 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
void *mem_va;
int ret;
+ *mem_phys = 0;
+ *mem_size = 0;
+
dev = core->dev;
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!node) {
@@ -85,28 +89,30 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
if (ret)
return ret;
+ ret = request_firmware(&mdt, fwname, dev);
+ if (ret < 0)
+ return ret;
+
+ fw_size = qcom_mdt_get_size(mdt);
+ if (fw_size < 0) {
+ ret = fw_size;
+ goto err_release_fw;
+ }
+
*mem_phys = r.start;
*mem_size = resource_size(&r);
- if (*mem_size < VENUS_FW_MEM_SIZE)
- return -EINVAL;
+ if (*mem_size < fw_size || fw_size > VENUS_FW_MEM_SIZE) {
+ ret = -EINVAL;
+ goto err_release_fw;
+ }
mem_va = memremap(r.start, *mem_size, MEMREMAP_WC);
if (!mem_va) {
dev_err(dev, "unable to map memory region: %pa+%zx\n",
&r.start, *mem_size);
- return -ENOMEM;
- }
-
- ret = request_firmware(&mdt, fwname, dev);
- if (ret < 0)
- goto err_unmap;
-
- fw_size = qcom_mdt_get_size(mdt);
- if (fw_size < 0) {
- ret = fw_size;
- release_firmware(mdt);
- goto err_unmap;
+ ret = -ENOMEM;
+ goto err_release_fw;
}
if (core->use_tz)
@@ -116,10 +122,9 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
ret = qcom_mdt_load_no_init(dev, mdt, fwname, VENUS_PAS_ID,
mem_va, *mem_phys, *mem_size, NULL);
- release_firmware(mdt);
-
-err_unmap:
memunmap(mem_va);
+err_release_fw:
+ release_firmware(mdt);
return ret;
}
@@ -135,6 +140,7 @@ static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys,
return -EPROBE_DEFER;
iommu = core->fw.iommu_domain;
+ core->fw.mapped_mem_size = mem_size;
ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size,
IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV);
@@ -150,6 +156,7 @@ static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys,
static int venus_shutdown_no_tz(struct venus_core *core)
{
+ const size_t mapped = core->fw.mapped_mem_size;
struct iommu_domain *iommu;
size_t unmapped;
u32 reg;
@@ -166,8 +173,8 @@ static int venus_shutdown_no_tz(struct venus_core *core)
iommu = core->fw.iommu_domain;
- unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, VENUS_FW_MEM_SIZE);
- if (unmapped != VENUS_FW_MEM_SIZE)
+ unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, mapped);
+ if (unmapped != mapped)
dev_err(dev, "failed to unmap firmware\n");
return 0;
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index e436385bc5ab..5cad601d4c57 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -439,9 +439,6 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
fdata.flags = 0;
fdata.clnt_data = vbuf->vb2_buf.index;
- if (!fdata.timestamp)
- fdata.flags |= HFI_BUFFERFLAG_TIMESTAMPINVALID;
-
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
fdata.buffer_type = HFI_BUFFER_INPUT;
fdata.filled_len = vb2_get_plane_payload(vb, 0);
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index f0719ce24b97..594d80434004 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -131,9 +131,13 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags,
!is_media_entity_v4l2_video_device(link->sink->entity))
return 0;
- /* If any entity is in use don't allow link changes. */
+ /*
+ * Don't allow link changes if any entity in the graph is
+ * streaming, modifying the CHSEL register fields can disrupt
+ * running streams.
+ */
media_device_for_each_entity(entity, &group->mdev)
- if (entity->use_count)
+ if (entity->stream_count)
return -EBUSY;
mutex_lock(&group->lock);
@@ -542,9 +546,7 @@ static void rvin_parallel_notify_unbind(struct v4l2_async_notifier *notifier,
vin_dbg(vin, "unbind parallel subdev %s\n", subdev->name);
- mutex_lock(&vin->lock);
rvin_parallel_subdevice_detach(vin);
- mutex_unlock(&vin->lock);
}
static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier,
@@ -554,9 +556,7 @@ static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier,
struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
int ret;
- mutex_lock(&vin->lock);
ret = rvin_parallel_subdevice_attach(vin, subdev);
- mutex_unlock(&vin->lock);
if (ret)
return ret;
@@ -664,7 +664,6 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
}
/* Create all media device links between VINs and CSI-2's. */
- mutex_lock(&vin->group->lock);
for (route = vin->info->routes; route->mask; route++) {
struct media_pad *source_pad, *sink_pad;
struct media_entity *source, *sink;
@@ -700,7 +699,6 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
break;
}
}
- mutex_unlock(&vin->group->lock);
return ret;
}
@@ -716,8 +714,6 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
if (vin->group->vin[i])
rvin_v4l2_unregister(vin->group->vin[i]);
- mutex_lock(&vin->group->lock);
-
for (i = 0; i < RVIN_CSI_MAX; i++) {
if (vin->group->csi[i].fwnode != asd->match.fwnode)
continue;
@@ -725,8 +721,6 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
vin_dbg(vin, "Unbind CSI-2 %s from slot %u\n", subdev->name, i);
break;
}
-
- mutex_unlock(&vin->group->lock);
}
static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
@@ -736,8 +730,6 @@ static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
unsigned int i;
- mutex_lock(&vin->group->lock);
-
for (i = 0; i < RVIN_CSI_MAX; i++) {
if (vin->group->csi[i].fwnode != asd->match.fwnode)
continue;
@@ -746,8 +738,6 @@ static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
break;
}
- mutex_unlock(&vin->group->lock);
-
return 0;
}
@@ -1146,6 +1136,10 @@ static const struct rvin_info rcar_info_r8a77995 = {
static const struct of_device_id rvin_of_id_table[] = {
{
+ .compatible = "renesas,vin-r8a774c0",
+ .data = &rcar_info_r8a77990,
+ },
+ {
.compatible = "renesas,vin-r8a7778",
.data = &rcar_info_m1,
},
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index 6d356f5a9456..f64528d2be3c 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -152,37 +152,37 @@ static const struct rcsi2_mbps_reg phtw_mbps_h3_v3h_m3n[] = {
};
static const struct rcsi2_mbps_reg phtw_mbps_v3m_e3[] = {
- { .mbps = 89, .reg = 0x00 },
- { .mbps = 99, .reg = 0x20 },
- { .mbps = 109, .reg = 0x40 },
- { .mbps = 129, .reg = 0x02 },
- { .mbps = 139, .reg = 0x22 },
- { .mbps = 149, .reg = 0x42 },
- { .mbps = 169, .reg = 0x04 },
- { .mbps = 179, .reg = 0x24 },
- { .mbps = 199, .reg = 0x44 },
- { .mbps = 219, .reg = 0x06 },
- { .mbps = 239, .reg = 0x26 },
- { .mbps = 249, .reg = 0x46 },
- { .mbps = 269, .reg = 0x08 },
- { .mbps = 299, .reg = 0x28 },
- { .mbps = 329, .reg = 0x0a },
- { .mbps = 359, .reg = 0x2a },
- { .mbps = 399, .reg = 0x4a },
- { .mbps = 449, .reg = 0x0c },
- { .mbps = 499, .reg = 0x2c },
- { .mbps = 549, .reg = 0x0e },
- { .mbps = 599, .reg = 0x2e },
- { .mbps = 649, .reg = 0x10 },
- { .mbps = 699, .reg = 0x30 },
- { .mbps = 749, .reg = 0x12 },
- { .mbps = 799, .reg = 0x32 },
- { .mbps = 849, .reg = 0x52 },
- { .mbps = 899, .reg = 0x72 },
- { .mbps = 949, .reg = 0x14 },
- { .mbps = 999, .reg = 0x34 },
- { .mbps = 1049, .reg = 0x54 },
- { .mbps = 1099, .reg = 0x74 },
+ { .mbps = 80, .reg = 0x00 },
+ { .mbps = 90, .reg = 0x20 },
+ { .mbps = 100, .reg = 0x40 },
+ { .mbps = 110, .reg = 0x02 },
+ { .mbps = 130, .reg = 0x22 },
+ { .mbps = 140, .reg = 0x42 },
+ { .mbps = 150, .reg = 0x04 },
+ { .mbps = 170, .reg = 0x24 },
+ { .mbps = 180, .reg = 0x44 },
+ { .mbps = 200, .reg = 0x06 },
+ { .mbps = 220, .reg = 0x26 },
+ { .mbps = 240, .reg = 0x46 },
+ { .mbps = 250, .reg = 0x08 },
+ { .mbps = 270, .reg = 0x28 },
+ { .mbps = 300, .reg = 0x0a },
+ { .mbps = 330, .reg = 0x2a },
+ { .mbps = 360, .reg = 0x4a },
+ { .mbps = 400, .reg = 0x0c },
+ { .mbps = 450, .reg = 0x2c },
+ { .mbps = 500, .reg = 0x0e },
+ { .mbps = 550, .reg = 0x2e },
+ { .mbps = 600, .reg = 0x10 },
+ { .mbps = 650, .reg = 0x30 },
+ { .mbps = 700, .reg = 0x12 },
+ { .mbps = 750, .reg = 0x32 },
+ { .mbps = 800, .reg = 0x52 },
+ { .mbps = 850, .reg = 0x72 },
+ { .mbps = 900, .reg = 0x14 },
+ { .mbps = 950, .reg = 0x34 },
+ { .mbps = 1000, .reg = 0x54 },
+ { .mbps = 1050, .reg = 0x74 },
{ .mbps = 1125, .reg = 0x16 },
{ /* sentinel */ },
};
@@ -986,6 +986,10 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a77990 = {
static const struct of_device_id rcar_csi2_of_table[] = {
{
+ .compatible = "renesas,r8a774c0-csi2",
+ .data = &rcar_csi2_info_r8a77990,
+ },
+ {
.compatible = "renesas,r8a7795-csi2",
.data = &rcar_csi2_info_r8a7795,
},
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index 92323310f735..beb9248992a4 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -1341,5 +1341,5 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
pm_runtime_put(vin->dev);
- return ret;
+ return 0;
}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 3f9000b70385..0a23b2d19e14 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -3,7 +3,7 @@
* Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
* Author: Jacek Anaszewski <j.anaszewski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -1293,13 +1293,16 @@ static int s5p_jpeg_querycap(struct file *file, void *priv,
return 0;
}
-static int enum_fmt(struct s5p_jpeg_fmt *sjpeg_formats, int n,
+static int enum_fmt(struct s5p_jpeg_ctx *ctx,
+ struct s5p_jpeg_fmt *sjpeg_formats, int n,
struct v4l2_fmtdesc *f, u32 type)
{
int i, num = 0;
+ unsigned int fmt_ver_flag = ctx->jpeg->variant->fmt_ver_flag;
for (i = 0; i < n; ++i) {
- if (sjpeg_formats[i].flags & type) {
+ if (sjpeg_formats[i].flags & type &&
+ sjpeg_formats[i].flags & fmt_ver_flag) {
/* index-th format of type type found ? */
if (num == f->index)
break;
@@ -1326,11 +1329,11 @@ static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (ctx->mode == S5P_JPEG_ENCODE)
- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
SJPEG_FMT_FLAG_ENC_CAPTURE);
- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
- SJPEG_FMT_FLAG_DEC_CAPTURE);
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_DEC_CAPTURE);
}
static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
@@ -1339,11 +1342,11 @@ static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (ctx->mode == S5P_JPEG_ENCODE)
- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
SJPEG_FMT_FLAG_ENC_OUTPUT);
- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
- SJPEG_FMT_FLAG_DEC_OUTPUT);
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_DEC_OUTPUT);
}
static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx,
@@ -2002,7 +2005,7 @@ static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx)
v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
V4L2_CID_JPEG_RESTART_INTERVAL,
- 0, 3, 0xffff, 0);
+ 0, 0xffff, 1, 0);
if (ctx->jpeg->variant->version == SJPEG_S5P)
mask = ~0x06; /* 422, 420 */
}
@@ -3220,7 +3223,7 @@ static struct platform_driver s5p_jpeg_driver = {
module_platform_driver(s5p_jpeg_driver);
-MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzej.p@samsung.com>");
+MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>");
MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
MODULE_DESCRIPTION("Samsung JPEG codec driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index a46465e10351..90fda4b720eb 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -3,7 +3,7 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
index b5f20e722b63..59c6263a71bf 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
@@ -3,7 +3,7 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
index f208fa3ed738..bfe746f8f750 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
@@ -3,7 +3,7 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-regs.h b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
index df790b10140c..574f0e8021e5 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-regs.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
@@ -5,7 +5,7 @@
* Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
* Author: Jacek Anaszewski <j.anaszewski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 8a5ba3bec3af..0a9f59d89185 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -1089,7 +1089,6 @@ static struct device *s5p_mfc_alloc_memdev(struct device *dev,
device_initialize(child);
dev_set_name(child, "%s:%s", dev_name(dev), name);
child->parent = dev;
- child->bus = dev->bus;
child->coherent_dma_mask = dev->coherent_dma_mask;
child->dma_mask = dev->dma_mask;
child->release = s5p_mfc_memdev_release;
diff --git a/drivers/media/platform/seco-cec/seco-cec.h b/drivers/media/platform/seco-cec/seco-cec.h
index e632c4a2a044..843de8c7dfd4 100644
--- a/drivers/media/platform/seco-cec/seco-cec.h
+++ b/drivers/media/platform/seco-cec/seco-cec.h
@@ -106,7 +106,7 @@
#define SECOCEC_IR_COMMAND_MASK 0x007F
#define SECOCEC_IR_COMMAND_SHL 0
#define SECOCEC_IR_ADDRESS_MASK 0x1F00
-#define SECOCEC_IR_ADDRESS_SHL 7
+#define SECOCEC_IR_ADDRESS_SHL 8
#define SECOCEC_IR_TOGGLE_MASK 0x8000
#define SECOCEC_IR_TOGGLE_SHL 15
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index 669d116b8f09..8f9b3bac5450 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -6,21 +6,3 @@ config SOC_CAMERA
SoC Camera is a common API to several cameras, not connecting
over a bus like PCI or USB. For example some i2c camera connected
directly to the data bus of an SoC.
-
-config SOC_CAMERA_SCALE_CROP
- tristate
-
-config SOC_CAMERA_PLATFORM
- tristate "platform camera support"
- depends on SOC_CAMERA
- help
- This is a generic SoC camera platform driver, useful for testing
-
-config VIDEO_SH_MOBILE_CEU
- tristate "SuperH Mobile CEU Interface driver"
- depends on VIDEO_DEV && SOC_CAMERA && HAVE_CLK
- depends on ARCH_SHMOBILE || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- select SOC_CAMERA_SCALE_CROP
- ---help---
- This is a v4l2 driver for the SuperH Mobile CEU Interface
diff --git a/drivers/media/platform/soc_camera/Makefile b/drivers/media/platform/soc_camera/Makefile
index 07a451e8b228..85d5e74f3b2b 100644
--- a/drivers/media/platform/soc_camera/Makefile
+++ b/drivers/media/platform/soc_camera/Makefile
@@ -1,9 +1 @@
obj-$(CONFIG_SOC_CAMERA) += soc_camera.o soc_mediabus.o
-obj-$(CONFIG_SOC_CAMERA_SCALE_CROP) += soc_scale_crop.o
-
-# a platform subdevice driver stub, allowing to support cameras by adding a
-# couple of callback functions to the board code
-obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
-
-# soc-camera host drivers have to be linked after camera drivers
-obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
deleted file mode 100644
index 6803f744e307..000000000000
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ /dev/null
@@ -1,1810 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * V4L2 Driver for SuperH Mobile CEU interface
- *
- * Copyright (C) 2008 Magnus Damm
- *
- * Based on V4L2 Driver for PXA camera host - "pxa_camera.c",
- *
- * Copyright (C) 2006, Sascha Hauer, Pengutronix
- * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/moduleparam.h>
-#include <linux/of.h>
-#include <linux/time.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/videodev2.h>
-#include <linux/pm_runtime.h>
-#include <linux/sched.h>
-
-#include <media/v4l2-async.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-dev.h>
-#include <media/soc_camera.h>
-#include <media/drv-intf/sh_mobile_ceu.h>
-#include <media/videobuf2-dma-contig.h>
-#include <media/v4l2-mediabus.h>
-#include <media/drv-intf/soc_mediabus.h>
-
-#include "soc_scale_crop.h"
-
-/* register offsets for sh7722 / sh7723 */
-
-#define CAPSR 0x00 /* Capture start register */
-#define CAPCR 0x04 /* Capture control register */
-#define CAMCR 0x08 /* Capture interface control register */
-#define CMCYR 0x0c /* Capture interface cycle register */
-#define CAMOR 0x10 /* Capture interface offset register */
-#define CAPWR 0x14 /* Capture interface width register */
-#define CAIFR 0x18 /* Capture interface input format register */
-#define CSTCR 0x20 /* Camera strobe control register (<= sh7722) */
-#define CSECR 0x24 /* Camera strobe emission count register (<= sh7722) */
-#define CRCNTR 0x28 /* CEU register control register */
-#define CRCMPR 0x2c /* CEU register forcible control register */
-#define CFLCR 0x30 /* Capture filter control register */
-#define CFSZR 0x34 /* Capture filter size clip register */
-#define CDWDR 0x38 /* Capture destination width register */
-#define CDAYR 0x3c /* Capture data address Y register */
-#define CDACR 0x40 /* Capture data address C register */
-#define CDBYR 0x44 /* Capture data bottom-field address Y register */
-#define CDBCR 0x48 /* Capture data bottom-field address C register */
-#define CBDSR 0x4c /* Capture bundle destination size register */
-#define CFWCR 0x5c /* Firewall operation control register */
-#define CLFCR 0x60 /* Capture low-pass filter control register */
-#define CDOCR 0x64 /* Capture data output control register */
-#define CDDCR 0x68 /* Capture data complexity level register */
-#define CDDAR 0x6c /* Capture data complexity level address register */
-#define CEIER 0x70 /* Capture event interrupt enable register */
-#define CETCR 0x74 /* Capture event flag clear register */
-#define CSTSR 0x7c /* Capture status register */
-#define CSRTR 0x80 /* Capture software reset register */
-#define CDSSR 0x84 /* Capture data size register */
-#define CDAYR2 0x90 /* Capture data address Y register 2 */
-#define CDACR2 0x94 /* Capture data address C register 2 */
-#define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */
-#define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */
-
-#undef DEBUG_GEOMETRY
-#ifdef DEBUG_GEOMETRY
-#define dev_geo dev_info
-#else
-#define dev_geo dev_dbg
-#endif
-
-/* per video frame buffer */
-struct sh_mobile_ceu_buffer {
- struct vb2_v4l2_buffer vb; /* v4l buffer must be first */
- struct list_head queue;
-};
-
-struct sh_mobile_ceu_dev {
- struct soc_camera_host ici;
-
- unsigned int irq;
- void __iomem *base;
- size_t video_limit;
- size_t buf_total;
-
- spinlock_t lock; /* Protects video buffer lists */
- struct list_head capture;
- struct vb2_v4l2_buffer *active;
-
- struct sh_mobile_ceu_info *pdata;
- struct completion complete;
-
- u32 cflcr;
-
- /* static max sizes either from platform data or default */
- int max_width;
- int max_height;
-
- enum v4l2_field field;
- int sequence;
- unsigned long flags;
-
- unsigned int image_mode:1;
- unsigned int is_16bit:1;
- unsigned int frozen:1;
-};
-
-struct sh_mobile_ceu_cam {
- /* CEU offsets within the camera output, before the CEU scaler */
- unsigned int ceu_left;
- unsigned int ceu_top;
- /* Client output, as seen by the CEU */
- unsigned int width;
- unsigned int height;
- /*
- * User window from S_SELECTION / G_SELECTION, produced by client cropping and
- * scaling, CEU scaling and CEU cropping, mapped back onto the client
- * input window
- */
- struct v4l2_rect subrect;
- /* Camera cropping rectangle */
- struct v4l2_rect rect;
- const struct soc_mbus_pixelfmt *extra_fmt;
- u32 code;
-};
-
-static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_v4l2_buffer *vbuf)
-{
- return container_of(vbuf, struct sh_mobile_ceu_buffer, vb);
-}
-
-static void ceu_write(struct sh_mobile_ceu_dev *priv,
- unsigned long reg_offs, u32 data)
-{
- iowrite32(data, priv->base + reg_offs);
-}
-
-static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs)
-{
- return ioread32(priv->base + reg_offs);
-}
-
-static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev)
-{
- int i, success = 0;
-
- ceu_write(pcdev, CAPSR, 1 << 16); /* reset */
-
- /* wait CSTSR.CPTON bit */
- for (i = 0; i < 1000; i++) {
- if (!(ceu_read(pcdev, CSTSR) & 1)) {
- success++;
- break;
- }
- udelay(1);
- }
-
- /* wait CAPSR.CPKIL bit */
- for (i = 0; i < 1000; i++) {
- if (!(ceu_read(pcdev, CAPSR) & (1 << 16))) {
- success++;
- break;
- }
- udelay(1);
- }
-
- if (2 != success) {
- dev_warn(pcdev->ici.v4l2_dev.dev, "soft reset time out\n");
- return -EIO;
- }
-
- return 0;
-}
-
-/*
- * Videobuf operations
- */
-
-/*
- * .queue_setup() is called to check, whether the driver can accept the
- * requested number of buffers and to fill in plane sizes
- * for the current frame format if required
- */
-static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
- unsigned int *count, unsigned int *num_planes,
- unsigned int sizes[], struct device *alloc_devs[])
-{
- struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
-
- if (!vq->num_buffers)
- pcdev->sequence = 0;
-
- if (!*count)
- *count = 2;
-
- /* Called from VIDIOC_REQBUFS or in compatibility mode */
- if (!*num_planes)
- sizes[0] = icd->sizeimage;
- else if (sizes[0] < icd->sizeimage)
- return -EINVAL;
-
- /* If *num_planes != 0, we have already verified *count. */
- if (pcdev->video_limit) {
- size_t size = PAGE_ALIGN(sizes[0]) * *count;
-
- if (size + pcdev->buf_total > pcdev->video_limit)
- *count = (pcdev->video_limit - pcdev->buf_total) /
- PAGE_ALIGN(sizes[0]);
- }
-
- *num_planes = 1;
-
- dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]);
-
- return 0;
-}
-
-#define CEU_CETCR_MAGIC 0x0317f313 /* acknowledge magical interrupt sources */
-#define CEU_CETCR_IGRW (1 << 4) /* prohibited register access interrupt bit */
-#define CEU_CEIER_CPEIE (1 << 0) /* one-frame capture end interrupt */
-#define CEU_CEIER_VBP (1 << 20) /* vbp error */
-#define CEU_CAPCR_CTNCP (1 << 16) /* continuous capture mode (if set) */
-#define CEU_CEIER_MASK (CEU_CEIER_CPEIE | CEU_CEIER_VBP)
-
-
-/*
- * return value doesn't reflex the success/failure to queue the new buffer,
- * but rather the status of the previous buffer.
- */
-static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
-{
- struct soc_camera_device *icd = pcdev->ici.icd;
- dma_addr_t phys_addr_top, phys_addr_bottom;
- unsigned long top1, top2;
- unsigned long bottom1, bottom2;
- u32 status;
- bool planar;
- int ret = 0;
-
- /*
- * The hardware is _very_ picky about this sequence. Especially
- * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge
- * several not-so-well documented interrupt sources in CETCR.
- */
- ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_MASK);
- status = ceu_read(pcdev, CETCR);
- ceu_write(pcdev, CETCR, ~status & CEU_CETCR_MAGIC);
- if (!pcdev->frozen)
- ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_MASK);
- ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP);
- ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW);
-
- /*
- * When a VBP interrupt occurs, a capture end interrupt does not occur
- * and the image of that frame is not captured correctly. So, soft reset
- * is needed here.
- */
- if (status & CEU_CEIER_VBP) {
- sh_mobile_ceu_soft_reset(pcdev);
- ret = -EIO;
- }
-
- if (pcdev->frozen) {
- complete(&pcdev->complete);
- return ret;
- }
-
- if (!pcdev->active)
- return ret;
-
- if (V4L2_FIELD_INTERLACED_BT == pcdev->field) {
- top1 = CDBYR;
- top2 = CDBCR;
- bottom1 = CDAYR;
- bottom2 = CDACR;
- } else {
- top1 = CDAYR;
- top2 = CDACR;
- bottom1 = CDBYR;
- bottom2 = CDBCR;
- }
-
- phys_addr_top =
- vb2_dma_contig_plane_dma_addr(&pcdev->active->vb2_buf, 0);
-
- switch (icd->current_fmt->host_fmt->fourcc) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- planar = true;
- break;
- default:
- planar = false;
- }
-
- ceu_write(pcdev, top1, phys_addr_top);
- if (V4L2_FIELD_NONE != pcdev->field) {
- phys_addr_bottom = phys_addr_top + icd->bytesperline;
- ceu_write(pcdev, bottom1, phys_addr_bottom);
- }
-
- if (planar) {
- phys_addr_top += icd->bytesperline * icd->user_height;
- ceu_write(pcdev, top2, phys_addr_top);
- if (V4L2_FIELD_NONE != pcdev->field) {
- phys_addr_bottom = phys_addr_top + icd->bytesperline;
- ceu_write(pcdev, bottom2, phys_addr_bottom);
- }
- }
-
- ceu_write(pcdev, CAPSR, 0x1); /* start capture */
-
- return ret;
-}
-
-static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
-
- /* Added list head initialization on alloc */
- WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb);
-
- return 0;
-}
-
-static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
- unsigned long size;
-
- size = icd->sizeimage;
-
- if (vb2_plane_size(vb, 0) < size) {
- dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
- vb->index, vb2_plane_size(vb, 0), size);
- goto error;
- }
-
- vb2_set_plane_payload(vb, 0, size);
-
- dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
- vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
-
-#ifdef DEBUG
- /*
- * This can be useful if you want to see if we actually fill
- * the buffer with something
- */
- if (vb2_plane_vaddr(vb, 0))
- memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
-#endif
-
- spin_lock_irq(&pcdev->lock);
- list_add_tail(&buf->queue, &pcdev->capture);
-
- if (!pcdev->active) {
- /*
- * Because there were no active buffer at this moment,
- * we are not interested in the return value of
- * sh_mobile_ceu_capture here.
- */
- pcdev->active = vbuf;
- sh_mobile_ceu_capture(pcdev);
- }
- spin_unlock_irq(&pcdev->lock);
-
- return;
-
-error:
- vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
-}
-
-static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
-
- spin_lock_irq(&pcdev->lock);
-
- if (pcdev->active == vbuf) {
- /* disable capture (release DMA buffer), reset */
- ceu_write(pcdev, CAPSR, 1 << 16);
- pcdev->active = NULL;
- }
-
- /*
- * Doesn't hurt also if the list is empty, but it hurts, if queuing the
- * buffer failed, and .buf_init() hasn't been called
- */
- if (buf->queue.next)
- list_del_init(&buf->queue);
-
- pcdev->buf_total -= PAGE_ALIGN(vb2_plane_size(vb, 0));
- dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__,
- pcdev->buf_total);
-
- spin_unlock_irq(&pcdev->lock);
-}
-
-static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
-
- pcdev->buf_total += PAGE_ALIGN(vb2_plane_size(vb, 0));
- dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__,
- pcdev->buf_total);
-
- /* This is for locking debugging only */
- INIT_LIST_HEAD(&to_ceu_vb(vbuf)->queue);
- return 0;
-}
-
-static void sh_mobile_ceu_stop_streaming(struct vb2_queue *q)
-{
- struct soc_camera_device *icd = soc_camera_from_vb2q(q);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct list_head *buf_head, *tmp;
- struct vb2_v4l2_buffer *vbuf;
-
- spin_lock_irq(&pcdev->lock);
-
- pcdev->active = NULL;
-
- list_for_each_safe(buf_head, tmp, &pcdev->capture) {
- vbuf = &list_entry(buf_head, struct sh_mobile_ceu_buffer,
- queue)->vb;
- vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
- list_del_init(buf_head);
- }
-
- spin_unlock_irq(&pcdev->lock);
-
- sh_mobile_ceu_soft_reset(pcdev);
-}
-
-static const struct vb2_ops sh_mobile_ceu_videobuf_ops = {
- .queue_setup = sh_mobile_ceu_videobuf_setup,
- .buf_prepare = sh_mobile_ceu_videobuf_prepare,
- .buf_queue = sh_mobile_ceu_videobuf_queue,
- .buf_cleanup = sh_mobile_ceu_videobuf_release,
- .buf_init = sh_mobile_ceu_videobuf_init,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
- .stop_streaming = sh_mobile_ceu_stop_streaming,
-};
-
-static irqreturn_t sh_mobile_ceu_irq(int irq, void *data)
-{
- struct sh_mobile_ceu_dev *pcdev = data;
- struct vb2_v4l2_buffer *vbuf;
- int ret;
-
- spin_lock(&pcdev->lock);
-
- vbuf = pcdev->active;
- if (!vbuf)
- /* Stale interrupt from a released buffer */
- goto out;
-
- list_del_init(&to_ceu_vb(vbuf)->queue);
-
- if (!list_empty(&pcdev->capture))
- pcdev->active = &list_entry(pcdev->capture.next,
- struct sh_mobile_ceu_buffer, queue)->vb;
- else
- pcdev->active = NULL;
-
- ret = sh_mobile_ceu_capture(pcdev);
- vbuf->vb2_buf.timestamp = ktime_get_ns();
- if (!ret) {
- vbuf->field = pcdev->field;
- vbuf->sequence = pcdev->sequence++;
- }
- vb2_buffer_done(&vbuf->vb2_buf,
- ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
-
-out:
- spin_unlock(&pcdev->lock);
-
- return IRQ_HANDLED;
-}
-
-static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
-{
- dev_info(icd->parent,
- "SuperH Mobile CEU driver attached to camera %d\n",
- icd->devnum);
-
- return 0;
-}
-
-static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
-{
- dev_info(icd->parent,
- "SuperH Mobile CEU driver detached from camera %d\n",
- icd->devnum);
-}
-
-/* Called with .host_lock held */
-static int sh_mobile_ceu_clock_start(struct soc_camera_host *ici)
-{
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
-
- pm_runtime_get_sync(ici->v4l2_dev.dev);
-
- pcdev->buf_total = 0;
-
- sh_mobile_ceu_soft_reset(pcdev);
-
- return 0;
-}
-
-/* Called with .host_lock held */
-static void sh_mobile_ceu_clock_stop(struct soc_camera_host *ici)
-{
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
-
- /* disable capture, disable interrupts */
- ceu_write(pcdev, CEIER, 0);
- sh_mobile_ceu_soft_reset(pcdev);
-
- /* make sure active buffer is canceled */
- spin_lock_irq(&pcdev->lock);
- if (pcdev->active) {
- list_del_init(&to_ceu_vb(pcdev->active)->queue);
- vb2_buffer_done(&pcdev->active->vb2_buf, VB2_BUF_STATE_ERROR);
- pcdev->active = NULL;
- }
- spin_unlock_irq(&pcdev->lock);
-
- pm_runtime_put(ici->v4l2_dev.dev);
-}
-
-/*
- * See chapter 29.4.12 "Capture Filter Control Register (CFLCR)"
- * in SH7722 Hardware Manual
- */
-static unsigned int size_dst(unsigned int src, unsigned int scale)
-{
- unsigned int mant_pre = scale >> 12;
- if (!src || !scale)
- return src;
- return ((mant_pre + 2 * (src - 1)) / (2 * mant_pre) - 1) *
- mant_pre * 4096 / scale + 1;
-}
-
-static u16 calc_scale(unsigned int src, unsigned int *dst)
-{
- u16 scale;
-
- if (src == *dst)
- return 0;
-
- scale = (src * 4096 / *dst) & ~7;
-
- while (scale > 4096 && size_dst(src, scale) < *dst)
- scale -= 8;
-
- *dst = size_dst(src, scale);
-
- return scale;
-}
-
-/* rect is guaranteed to not exceed the scaled camera rectangle */
-static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- unsigned int height, width, cdwdr_width, in_width, in_height;
- unsigned int left_offset, top_offset;
- u32 camor;
-
- dev_geo(icd->parent, "Crop %ux%u@%u:%u\n",
- icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top);
-
- left_offset = cam->ceu_left;
- top_offset = cam->ceu_top;
-
- WARN_ON(icd->user_width & 3 || icd->user_height & 3);
-
- width = icd->user_width;
-
- if (pcdev->image_mode) {
- in_width = cam->width;
- if (!pcdev->is_16bit) {
- in_width *= 2;
- left_offset *= 2;
- }
- } else {
- unsigned int w_factor;
-
- switch (icd->current_fmt->host_fmt->packing) {
- case SOC_MBUS_PACKING_2X8_PADHI:
- w_factor = 2;
- break;
- default:
- w_factor = 1;
- }
-
- in_width = cam->width * w_factor;
- left_offset *= w_factor;
- }
-
- cdwdr_width = icd->bytesperline;
-
- height = icd->user_height;
- in_height = cam->height;
- if (V4L2_FIELD_NONE != pcdev->field) {
- height = (height / 2) & ~3;
- in_height /= 2;
- top_offset /= 2;
- cdwdr_width *= 2;
- }
-
- /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */
- camor = left_offset | (top_offset << 16);
-
- dev_geo(icd->parent,
- "CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor,
- (in_height << 16) | in_width, (height << 16) | width,
- cdwdr_width);
-
- ceu_write(pcdev, CAMOR, camor);
- ceu_write(pcdev, CAPWR, (in_height << 16) | in_width);
- /* CFSZR clipping is applied _after_ the scaling filter (CFLCR) */
- ceu_write(pcdev, CFSZR, (height << 16) | width);
- ceu_write(pcdev, CDWDR, cdwdr_width);
-}
-
-static u32 capture_save_reset(struct sh_mobile_ceu_dev *pcdev)
-{
- u32 capsr = ceu_read(pcdev, CAPSR);
- ceu_write(pcdev, CAPSR, 1 << 16); /* reset, stop capture */
- return capsr;
-}
-
-static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr)
-{
- unsigned long timeout = jiffies + 10 * HZ;
-
- /*
- * Wait until the end of the current frame. It can take a long time,
- * but if it has been aborted by a CAPSR reset, it shoule exit sooner.
- */
- while ((ceu_read(pcdev, CSTSR) & 1) && time_before(jiffies, timeout))
- msleep(1);
-
- if (time_after(jiffies, timeout)) {
- dev_err(pcdev->ici.v4l2_dev.dev,
- "Timeout waiting for frame end! Interface problem?\n");
- return;
- }
-
- /* Wait until reset clears, this shall not hang... */
- while (ceu_read(pcdev, CAPSR) & (1 << 16))
- udelay(10);
-
- /* Anything to restore? */
- if (capsr & ~(1 << 16))
- ceu_write(pcdev, CAPSR, capsr);
-}
-
-#define CEU_BUS_FLAGS (V4L2_MBUS_MASTER | \
- V4L2_MBUS_PCLK_SAMPLE_RISING | \
- V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
- V4L2_MBUS_HSYNC_ACTIVE_LOW | \
- V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
- V4L2_MBUS_VSYNC_ACTIVE_LOW | \
- V4L2_MBUS_DATA_ACTIVE_HIGH)
-
-/* Capture is not running, no interrupts, no locking needed */
-static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
- unsigned long value, common_flags = CEU_BUS_FLAGS;
- u32 capsr = capture_save_reset(pcdev);
- unsigned int yuv_lineskip;
- int ret;
-
- /*
- * If the client doesn't implement g_mbus_config, we just use our
- * platform data
- */
- ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
- if (!ret) {
- common_flags = soc_mbus_config_compatible(&cfg,
- common_flags);
- if (!common_flags)
- return -EINVAL;
- } else if (ret != -ENOIOCTLCMD) {
- return ret;
- }
-
- /* Make choises, based on platform preferences */
- if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
- (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
- if (pcdev->flags & SH_CEU_FLAG_HSYNC_LOW)
- common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
- else
- common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
- }
-
- if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
- (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
- if (pcdev->flags & SH_CEU_FLAG_VSYNC_LOW)
- common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
- else
- common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
- }
-
- cfg.flags = common_flags;
- ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
-
- if (icd->current_fmt->host_fmt->bits_per_sample > 8)
- pcdev->is_16bit = 1;
- else
- pcdev->is_16bit = 0;
-
- ceu_write(pcdev, CRCNTR, 0);
- ceu_write(pcdev, CRCMPR, 0);
-
- value = 0x00000010; /* data fetch by default */
- yuv_lineskip = 0x10;
-
- switch (icd->current_fmt->host_fmt->fourcc) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- /* convert 4:2:2 -> 4:2:0 */
- yuv_lineskip = 0; /* skip for NV12/21, no skip for NV16/61 */
- /* fall-through */
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- switch (cam->code) {
- case MEDIA_BUS_FMT_UYVY8_2X8:
- value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */
- break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
- value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */
- break;
- case MEDIA_BUS_FMT_YUYV8_2X8:
- value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */
- break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
- value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */
- break;
- default:
- BUG();
- }
- }
-
- if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 ||
- icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61)
- value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */
-
- value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
- value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
-
- if (pcdev->is_16bit)
- value |= 1 << 12;
- else if (pcdev->flags & SH_CEU_FLAG_LOWER_8BIT)
- value |= 2 << 12;
-
- ceu_write(pcdev, CAMCR, value);
-
- ceu_write(pcdev, CAPCR, 0x00300000);
-
- switch (pcdev->field) {
- case V4L2_FIELD_INTERLACED_TB:
- value = 0x101;
- break;
- case V4L2_FIELD_INTERLACED_BT:
- value = 0x102;
- break;
- default:
- value = 0;
- break;
- }
- ceu_write(pcdev, CAIFR, value);
-
- sh_mobile_ceu_set_rect(icd);
- mdelay(1);
-
- dev_geo(icd->parent, "CFLCR 0x%x\n", pcdev->cflcr);
- ceu_write(pcdev, CFLCR, pcdev->cflcr);
-
- /*
- * A few words about byte order (observed in Big Endian mode)
- *
- * In data fetch mode bytes are received in chunks of 8 bytes.
- * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first)
- *
- * The data is however by default written to memory in reverse order:
- * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte)
- *
- * The lowest three bits of CDOCR allows us to do swapping,
- * using 7 we swap the data bytes to match the incoming order:
- * D0, D1, D2, D3, D4, D5, D6, D7
- */
- value = 0x00000007 | yuv_lineskip;
-
- ceu_write(pcdev, CDOCR, value);
- ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */
-
- capture_restore(pcdev, capsr);
-
- /* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */
- return 0;
-}
-
-static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
- unsigned char buswidth)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- unsigned long common_flags = CEU_BUS_FLAGS;
- struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
- int ret;
-
- ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
- if (!ret)
- common_flags = soc_mbus_config_compatible(&cfg,
- common_flags);
- else if (ret != -ENOIOCTLCMD)
- return ret;
-
- if (!common_flags || buswidth > 16)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = {
- {
- .fourcc = V4L2_PIX_FMT_NV12,
- .name = "NV12",
- .bits_per_sample = 8,
- .packing = SOC_MBUS_PACKING_1_5X8,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C,
- }, {
- .fourcc = V4L2_PIX_FMT_NV21,
- .name = "NV21",
- .bits_per_sample = 8,
- .packing = SOC_MBUS_PACKING_1_5X8,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C,
- }, {
- .fourcc = V4L2_PIX_FMT_NV16,
- .name = "NV16",
- .bits_per_sample = 8,
- .packing = SOC_MBUS_PACKING_2X8_PADHI,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
- }, {
- .fourcc = V4L2_PIX_FMT_NV61,
- .name = "NV61",
- .bits_per_sample = 8,
- .packing = SOC_MBUS_PACKING_2X8_PADHI,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
- },
-};
-
-/* This will be corrected as we get more formats */
-static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt)
-{
- return fmt->packing == SOC_MBUS_PACKING_NONE ||
- (fmt->bits_per_sample == 8 &&
- fmt->packing == SOC_MBUS_PACKING_1_5X8) ||
- (fmt->bits_per_sample == 8 &&
- fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
- (fmt->bits_per_sample > 8 &&
- fmt->packing == SOC_MBUS_PACKING_EXTEND16);
-}
-
-static struct soc_camera_device *ctrl_to_icd(struct v4l2_ctrl *ctrl)
-{
- return container_of(ctrl->handler, struct soc_camera_device,
- ctrl_handler);
-}
-
-static int sh_mobile_ceu_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct soc_camera_device *icd = ctrl_to_icd(ctrl);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
-
- switch (ctrl->id) {
- case V4L2_CID_SHARPNESS:
- switch (icd->current_fmt->host_fmt->fourcc) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- ceu_write(pcdev, CLFCR, !ctrl->val);
- return 0;
- }
- break;
- }
-
- return -EINVAL;
-}
-
-static const struct v4l2_ctrl_ops sh_mobile_ceu_ctrl_ops = {
- .s_ctrl = sh_mobile_ceu_s_ctrl,
-};
-
-static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int idx,
- struct soc_camera_format_xlate *xlate)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->parent;
- struct soc_camera_host *ici = to_soc_camera_host(dev);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- int ret, k, n;
- int formats = 0;
- struct sh_mobile_ceu_cam *cam;
- struct v4l2_subdev_mbus_code_enum code = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .index = idx,
- };
- const struct soc_mbus_pixelfmt *fmt;
-
- ret = v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code);
- if (ret < 0)
- /* No more formats */
- return 0;
-
- fmt = soc_mbus_get_fmtdesc(code.code);
- if (!fmt) {
- dev_warn(dev, "unsupported format code #%u: %d\n", idx, code.code);
- return 0;
- }
-
- ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
- if (ret < 0)
- return 0;
-
- if (!icd->host_priv) {
- struct v4l2_subdev_format fmt = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- struct v4l2_mbus_framefmt *mf = &fmt.format;
- struct v4l2_rect rect;
- int shift = 0;
-
- /* Add our control */
- v4l2_ctrl_new_std(&icd->ctrl_handler, &sh_mobile_ceu_ctrl_ops,
- V4L2_CID_SHARPNESS, 0, 1, 1, 1);
- if (icd->ctrl_handler.error)
- return icd->ctrl_handler.error;
-
- /* FIXME: subwindow is lost between close / open */
-
- /* Cache current client geometry */
- ret = soc_camera_client_g_rect(sd, &rect);
- if (ret < 0)
- return ret;
-
- /* First time */
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
- if (ret < 0)
- return ret;
-
- /*
- * All currently existing CEU implementations support 2560x1920
- * or larger frames. If the sensor is proposing too big a frame,
- * don't bother with possibly supportred by the CEU larger
- * sizes, just try VGA multiples. If needed, this can be
- * adjusted in the future.
- */
- while ((mf->width > pcdev->max_width ||
- mf->height > pcdev->max_height) && shift < 4) {
- /* Try 2560x1920, 1280x960, 640x480, 320x240 */
- mf->width = 2560 >> shift;
- mf->height = 1920 >> shift;
- ret = v4l2_device_call_until_err(sd->v4l2_dev,
- soc_camera_grp_id(icd), pad,
- set_fmt, NULL, &fmt);
- if (ret < 0)
- return ret;
- shift++;
- }
-
- if (shift == 4) {
- dev_err(dev, "Failed to configure the client below %ux%x\n",
- mf->width, mf->height);
- return -EIO;
- }
-
- dev_geo(dev, "camera fmt %ux%u\n", mf->width, mf->height);
-
- cam = kzalloc(sizeof(*cam), GFP_KERNEL);
- if (!cam)
- return -ENOMEM;
-
- /* We are called with current camera crop, initialise subrect with it */
- cam->rect = rect;
- cam->subrect = rect;
-
- cam->width = mf->width;
- cam->height = mf->height;
-
- icd->host_priv = cam;
- } else {
- cam = icd->host_priv;
- }
-
- /* Beginning of a pass */
- if (!idx)
- cam->extra_fmt = NULL;
-
- switch (code.code) {
- case MEDIA_BUS_FMT_UYVY8_2X8:
- case MEDIA_BUS_FMT_VYUY8_2X8:
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_YVYU8_2X8:
- if (cam->extra_fmt)
- break;
-
- /*
- * Our case is simple so far: for any of the above four camera
- * formats we add all our four synthesized NV* formats, so,
- * just marking the device with a single flag suffices. If
- * the format generation rules are more complex, you would have
- * to actually hang your already added / counted formats onto
- * the host_priv pointer and check whether the format you're
- * going to add now is already there.
- */
- cam->extra_fmt = sh_mobile_ceu_formats;
-
- n = ARRAY_SIZE(sh_mobile_ceu_formats);
- formats += n;
- for (k = 0; xlate && k < n; k++) {
- xlate->host_fmt = &sh_mobile_ceu_formats[k];
- xlate->code = code.code;
- xlate++;
- dev_dbg(dev, "Providing format %s using code %d\n",
- sh_mobile_ceu_formats[k].name, code.code);
- }
- break;
- default:
- if (!sh_mobile_ceu_packing_supported(fmt))
- return 0;
- }
-
- /* Generic pass-through */
- formats++;
- if (xlate) {
- xlate->host_fmt = fmt;
- xlate->code = code.code;
- xlate++;
- dev_dbg(dev, "Providing format %s in pass-through mode\n",
- fmt->name);
- }
-
- return formats;
-}
-
-static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd)
-{
- kfree(icd->host_priv);
- icd->host_priv = NULL;
-}
-
-#define scale_down(size, scale) soc_camera_shift_scale(size, 12, scale)
-#define calc_generic_scale(in, out) soc_camera_calc_scale(in, 12, out)
-
-/*
- * CEU can scale and crop, but we don't want to waste bandwidth and kill the
- * framerate by always requesting the maximum image from the client. See
- * Documentation/media/v4l-drivers/sh_mobile_ceu_camera.rst for a description of
- * scaling and cropping algorithms and for the meaning of referenced here steps.
- */
-static int sh_mobile_ceu_set_selection(struct soc_camera_device *icd,
- struct v4l2_selection *sel)
-{
- struct v4l2_rect *rect = &sel->r;
- struct device *dev = icd->parent;
- struct soc_camera_host *ici = to_soc_camera_host(dev);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct v4l2_selection cam_sel;
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_rect *cam_rect = &cam_sel.r;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct v4l2_subdev_format fmt = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- struct v4l2_mbus_framefmt *mf = &fmt.format;
- unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v,
- out_width, out_height;
- int interm_width, interm_height;
- u32 capsr, cflcr;
- int ret;
-
- dev_geo(dev, "S_SELECTION(%ux%u@%u:%u)\n", rect->width, rect->height,
- rect->left, rect->top);
-
- /* During camera cropping its output window can change too, stop CEU */
- capsr = capture_save_reset(pcdev);
- dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
-
- /*
- * 1. - 2. Apply iterative camera S_SELECTION for new input window, read back
- * actual camera rectangle.
- */
- ret = soc_camera_client_s_selection(sd, sel, &cam_sel,
- &cam->rect, &cam->subrect);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n",
- cam_rect->width, cam_rect->height,
- cam_rect->left, cam_rect->top);
-
- /* On success cam_crop contains current camera crop */
-
- /* 3. Retrieve camera output window */
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
- if (ret < 0)
- return ret;
-
- if (mf->width > pcdev->max_width || mf->height > pcdev->max_height)
- return -EINVAL;
-
- /* 4. Calculate camera scales */
- scale_cam_h = calc_generic_scale(cam_rect->width, mf->width);
- scale_cam_v = calc_generic_scale(cam_rect->height, mf->height);
-
- /* Calculate intermediate window */
- interm_width = scale_down(rect->width, scale_cam_h);
- interm_height = scale_down(rect->height, scale_cam_v);
-
- if (interm_width < icd->user_width) {
- u32 new_scale_h;
-
- new_scale_h = calc_generic_scale(rect->width, icd->user_width);
-
- mf->width = scale_down(cam_rect->width, new_scale_h);
- }
-
- if (interm_height < icd->user_height) {
- u32 new_scale_v;
-
- new_scale_v = calc_generic_scale(rect->height, icd->user_height);
-
- mf->height = scale_down(cam_rect->height, new_scale_v);
- }
-
- if (interm_width < icd->user_width || interm_height < icd->user_height) {
- ret = v4l2_device_call_until_err(sd->v4l2_dev,
- soc_camera_grp_id(icd), pad,
- set_fmt, NULL, &fmt);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "New camera output %ux%u\n", mf->width, mf->height);
- scale_cam_h = calc_generic_scale(cam_rect->width, mf->width);
- scale_cam_v = calc_generic_scale(cam_rect->height, mf->height);
- interm_width = scale_down(rect->width, scale_cam_h);
- interm_height = scale_down(rect->height, scale_cam_v);
- }
-
- /* Cache camera output window */
- cam->width = mf->width;
- cam->height = mf->height;
-
- if (pcdev->image_mode) {
- out_width = min(interm_width, icd->user_width);
- out_height = min(interm_height, icd->user_height);
- } else {
- out_width = interm_width;
- out_height = interm_height;
- }
-
- /*
- * 5. Calculate CEU scales from camera scales from results of (5) and
- * the user window
- */
- scale_ceu_h = calc_scale(interm_width, &out_width);
- scale_ceu_v = calc_scale(interm_height, &out_height);
-
- dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v);
-
- /* Apply CEU scales. */
- cflcr = scale_ceu_h | (scale_ceu_v << 16);
- if (cflcr != pcdev->cflcr) {
- pcdev->cflcr = cflcr;
- ceu_write(pcdev, CFLCR, cflcr);
- }
-
- icd->user_width = out_width & ~3;
- icd->user_height = out_height & ~3;
- /* Offsets are applied at the CEU scaling filter input */
- cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_cam_h) & ~1;
- cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_cam_v) & ~1;
-
- /* 6. Use CEU cropping to crop to the new window. */
- sh_mobile_ceu_set_rect(icd);
-
- cam->subrect = *rect;
-
- dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n",
- icd->user_width, icd->user_height,
- cam->ceu_left, cam->ceu_top);
-
- /* Restore capture. The CE bit can be cleared by the hardware */
- if (pcdev->active)
- capsr |= 1;
- capture_restore(pcdev, capsr);
-
- /* Even if only camera cropping succeeded */
- return ret;
-}
-
-static int sh_mobile_ceu_get_selection(struct soc_camera_device *icd,
- struct v4l2_selection *sel)
-{
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
-
- sel->r = cam->subrect;
-
- return 0;
-}
-
-/* Similar to set_crop multistage iterative algorithm */
-static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
- struct v4l2_format *f)
-{
- struct device *dev = icd->parent;
- struct soc_camera_host *ici = to_soc_camera_host(dev);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_pix_format *pix = &f->fmt.pix;
- struct v4l2_mbus_framefmt mf;
- __u32 pixfmt = pix->pixelformat;
- const struct soc_camera_format_xlate *xlate;
- unsigned int ceu_sub_width = pcdev->max_width,
- ceu_sub_height = pcdev->max_height;
- u16 scale_v, scale_h;
- int ret;
- bool image_mode;
- enum v4l2_field field;
-
- switch (pix->field) {
- default:
- pix->field = V4L2_FIELD_NONE;
- /* fall-through */
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_INTERLACED_BT:
- case V4L2_FIELD_NONE:
- field = pix->field;
- break;
- case V4L2_FIELD_INTERLACED:
- field = V4L2_FIELD_INTERLACED_TB;
- break;
- }
-
- xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
- if (!xlate) {
- dev_warn(dev, "Format %x not found\n", pixfmt);
- return -EINVAL;
- }
-
- /* 1.-4. Calculate desired client output geometry */
- soc_camera_calc_client_output(icd, &cam->rect, &cam->subrect, pix, &mf, 12);
- mf.field = pix->field;
- mf.colorspace = pix->colorspace;
- mf.code = xlate->code;
-
- switch (pixfmt) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- image_mode = true;
- break;
- default:
- image_mode = false;
- }
-
- dev_geo(dev, "S_FMT(pix=0x%x, fld 0x%x, code 0x%x, %ux%u)\n", pixfmt, mf.field, mf.code,
- pix->width, pix->height);
-
- dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height);
-
- /* 5. - 9. */
- ret = soc_camera_client_scale(icd, &cam->rect, &cam->subrect,
- &mf, &ceu_sub_width, &ceu_sub_height,
- image_mode && V4L2_FIELD_NONE == field, 12);
-
- dev_geo(dev, "5-9: client scale return %d\n", ret);
-
- /* Done with the camera. Now see if we can improve the result */
-
- dev_geo(dev, "fmt %ux%u, requested %ux%u\n",
- mf.width, mf.height, pix->width, pix->height);
- if (ret < 0)
- return ret;
-
- if (mf.code != xlate->code)
- return -EINVAL;
-
- /* 9. Prepare CEU crop */
- cam->width = mf.width;
- cam->height = mf.height;
-
- /* 10. Use CEU scaling to scale to the requested user window. */
-
- /* We cannot scale up */
- if (pix->width > ceu_sub_width)
- ceu_sub_width = pix->width;
-
- if (pix->height > ceu_sub_height)
- ceu_sub_height = pix->height;
-
- pix->colorspace = mf.colorspace;
-
- if (image_mode) {
- /* Scale pix->{width x height} down to width x height */
- scale_h = calc_scale(ceu_sub_width, &pix->width);
- scale_v = calc_scale(ceu_sub_height, &pix->height);
- } else {
- pix->width = ceu_sub_width;
- pix->height = ceu_sub_height;
- scale_h = 0;
- scale_v = 0;
- }
-
- pcdev->cflcr = scale_h | (scale_v << 16);
-
- /*
- * We have calculated CFLCR, the actual configuration will be performed
- * in sh_mobile_ceu_set_bus_param()
- */
-
- dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n",
- ceu_sub_width, scale_h, pix->width,
- ceu_sub_height, scale_v, pix->height);
-
- cam->code = xlate->code;
- icd->current_fmt = xlate;
-
- pcdev->field = field;
- pcdev->image_mode = image_mode;
-
- /* CFSZR requirement */
- pix->width &= ~3;
- pix->height &= ~3;
-
- return 0;
-}
-
-#define CEU_CHDW_MAX 8188U /* Maximum line stride */
-
-static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
- struct v4l2_format *f)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- const struct soc_camera_format_xlate *xlate;
- struct v4l2_pix_format *pix = &f->fmt.pix;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct v4l2_subdev_pad_config pad_cfg;
- struct v4l2_subdev_format format = {
- .which = V4L2_SUBDEV_FORMAT_TRY,
- };
- struct v4l2_mbus_framefmt *mf = &format.format;
- __u32 pixfmt = pix->pixelformat;
- int width, height;
- int ret;
-
- dev_geo(icd->parent, "TRY_FMT(pix=0x%x, %ux%u)\n",
- pixfmt, pix->width, pix->height);
-
- xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
- if (!xlate) {
- xlate = icd->current_fmt;
- dev_dbg(icd->parent, "Format %x not found, keeping %x\n",
- pixfmt, xlate->host_fmt->fourcc);
- pixfmt = xlate->host_fmt->fourcc;
- pix->pixelformat = pixfmt;
- pix->colorspace = icd->colorspace;
- }
-
- /* FIXME: calculate using depth and bus width */
-
- /* CFSZR requires height and width to be 4-pixel aligned */
- v4l_bound_align_image(&pix->width, 2, pcdev->max_width, 2,
- &pix->height, 4, pcdev->max_height, 2, 0);
-
- width = pix->width;
- height = pix->height;
-
- /* limit to sensor capabilities */
- mf->width = pix->width;
- mf->height = pix->height;
- mf->field = pix->field;
- mf->code = xlate->code;
- mf->colorspace = pix->colorspace;
-
- ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
- pad, set_fmt, &pad_cfg, &format);
- if (ret < 0)
- return ret;
-
- pix->width = mf->width;
- pix->height = mf->height;
- pix->field = mf->field;
- pix->colorspace = mf->colorspace;
-
- switch (pixfmt) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- /* FIXME: check against rect_max after converting soc-camera */
- /* We can scale precisely, need a bigger image from camera */
- if (pix->width < width || pix->height < height) {
- /*
- * We presume, the sensor behaves sanely, i.e., if
- * requested a bigger rectangle, it will not return a
- * smaller one.
- */
- mf->width = pcdev->max_width;
- mf->height = pcdev->max_height;
- ret = v4l2_device_call_until_err(sd->v4l2_dev,
- soc_camera_grp_id(icd), pad,
- set_fmt, &pad_cfg, &format);
- if (ret < 0) {
- /* Shouldn't actually happen... */
- dev_err(icd->parent,
- "FIXME: client try_fmt() = %d\n", ret);
- return ret;
- }
- }
- /* We will scale exactly */
- if (mf->width > width)
- pix->width = width;
- if (mf->height > height)
- pix->height = height;
-
- pix->bytesperline = max(pix->bytesperline, pix->width);
- pix->bytesperline = min(pix->bytesperline, CEU_CHDW_MAX);
- pix->bytesperline &= ~3;
- break;
-
- default:
- /* Configurable stride isn't supported in pass-through mode. */
- pix->bytesperline = 0;
- }
-
- pix->width &= ~3;
- pix->height &= ~3;
- pix->sizeimage = 0;
-
- dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n",
- __func__, ret, pix->pixelformat, pix->width, pix->height);
-
- return ret;
-}
-
-static int sh_mobile_ceu_set_liveselection(struct soc_camera_device *icd,
- struct v4l2_selection *sel)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- u32 out_width = icd->user_width, out_height = icd->user_height;
- int ret;
-
- /* Freeze queue */
- pcdev->frozen = 1;
- /* Wait for frame */
- ret = wait_for_completion_interruptible(&pcdev->complete);
- /* Stop the client */
- ret = v4l2_subdev_call(sd, video, s_stream, 0);
- if (ret < 0)
- dev_warn(icd->parent,
- "Client failed to stop the stream: %d\n", ret);
- else
- /* Do the crop, if it fails, there's nothing more we can do */
- sh_mobile_ceu_set_selection(icd, sel);
-
- dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height);
-
- if (icd->user_width != out_width || icd->user_height != out_height) {
- struct v4l2_format f = {
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- .fmt.pix = {
- .width = out_width,
- .height = out_height,
- .pixelformat = icd->current_fmt->host_fmt->fourcc,
- .field = pcdev->field,
- .colorspace = icd->colorspace,
- },
- };
- ret = sh_mobile_ceu_set_fmt(icd, &f);
- if (!ret && (out_width != f.fmt.pix.width ||
- out_height != f.fmt.pix.height))
- ret = -EINVAL;
- if (!ret) {
- icd->user_width = out_width & ~3;
- icd->user_height = out_height & ~3;
- ret = sh_mobile_ceu_set_bus_param(icd);
- }
- }
-
- /* Thaw the queue */
- pcdev->frozen = 0;
- spin_lock_irq(&pcdev->lock);
- sh_mobile_ceu_capture(pcdev);
- spin_unlock_irq(&pcdev->lock);
- /* Start the client */
- ret = v4l2_subdev_call(sd, video, s_stream, 1);
- return ret;
-}
-
-static __poll_t sh_mobile_ceu_poll(struct file *file, poll_table *pt)
-{
- struct soc_camera_device *icd = file->private_data;
-
- return vb2_poll(&icd->vb2_vidq, file, pt);
-}
-
-static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
- struct v4l2_capability *cap)
-{
- strscpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
- strscpy(cap->driver, "sh_mobile_ceu", sizeof(cap->driver));
- strscpy(cap->bus_info, "platform:sh_mobile_ceu", sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
- return 0;
-}
-
-static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
- struct soc_camera_device *icd)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-
- q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_USERPTR;
- q->drv_priv = icd;
- q->ops = &sh_mobile_ceu_videobuf_ops;
- q->mem_ops = &vb2_dma_contig_memops;
- q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer);
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->lock = &ici->host_lock;
- q->dev = ici->v4l2_dev.dev;
-
- return vb2_queue_init(q);
-}
-
-static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
- .owner = THIS_MODULE,
- .add = sh_mobile_ceu_add_device,
- .remove = sh_mobile_ceu_remove_device,
- .clock_start = sh_mobile_ceu_clock_start,
- .clock_stop = sh_mobile_ceu_clock_stop,
- .get_formats = sh_mobile_ceu_get_formats,
- .put_formats = sh_mobile_ceu_put_formats,
- .get_selection = sh_mobile_ceu_get_selection,
- .set_selection = sh_mobile_ceu_set_selection,
- .set_liveselection = sh_mobile_ceu_set_liveselection,
- .set_fmt = sh_mobile_ceu_set_fmt,
- .try_fmt = sh_mobile_ceu_try_fmt,
- .poll = sh_mobile_ceu_poll,
- .querycap = sh_mobile_ceu_querycap,
- .set_bus_param = sh_mobile_ceu_set_bus_param,
- .init_videobuf2 = sh_mobile_ceu_init_videobuf,
-};
-
-struct bus_wait {
- struct notifier_block notifier;
- struct completion completion;
- struct device *dev;
-};
-
-static int bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct device *dev = data;
- struct bus_wait *wait = container_of(nb, struct bus_wait, notifier);
-
- if (wait->dev != dev)
- return NOTIFY_DONE;
-
- switch (action) {
- case BUS_NOTIFY_UNBOUND_DRIVER:
- /* Protect from module unloading */
- wait_for_completion(&wait->completion);
- return NOTIFY_OK;
- }
- return NOTIFY_DONE;
-}
-
-static int sh_mobile_ceu_probe(struct platform_device *pdev)
-{
- struct sh_mobile_ceu_dev *pcdev;
- struct resource *res;
- void __iomem *base;
- unsigned int irq;
- int err;
- struct bus_wait wait = {
- .completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion),
- .notifier.notifier_call = bus_notify,
- };
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
- if (!res || (int)irq <= 0) {
- dev_err(&pdev->dev, "Not enough CEU platform resources.\n");
- return -ENODEV;
- }
-
- pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
- if (!pcdev) {
- dev_err(&pdev->dev, "Could not allocate pcdev\n");
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&pcdev->capture);
- spin_lock_init(&pcdev->lock);
- init_completion(&pcdev->complete);
-
- pcdev->pdata = pdev->dev.platform_data;
- if (!pcdev->pdata && !pdev->dev.of_node) {
- dev_err(&pdev->dev, "CEU platform data not set.\n");
- return -EINVAL;
- }
-
- /* TODO: implement per-device bus flags */
- if (pcdev->pdata) {
- pcdev->max_width = pcdev->pdata->max_width;
- pcdev->max_height = pcdev->pdata->max_height;
- pcdev->flags = pcdev->pdata->flags;
- }
- pcdev->field = V4L2_FIELD_NONE;
-
- if (!pcdev->max_width) {
- unsigned int v;
- err = of_property_read_u32(pdev->dev.of_node, "renesas,max-width", &v);
- if (!err)
- pcdev->max_width = v;
-
- if (!pcdev->max_width)
- pcdev->max_width = 2560;
- }
- if (!pcdev->max_height) {
- unsigned int v;
- err = of_property_read_u32(pdev->dev.of_node, "renesas,max-height", &v);
- if (!err)
- pcdev->max_height = v;
-
- if (!pcdev->max_height)
- pcdev->max_height = 1920;
- }
-
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- pcdev->irq = irq;
- pcdev->base = base;
- pcdev->video_limit = 0; /* only enabled if second resource exists */
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res) {
- err = dma_declare_coherent_memory(&pdev->dev, res->start,
- res->start,
- resource_size(res),
- DMA_MEMORY_EXCLUSIVE);
- if (err) {
- dev_err(&pdev->dev, "Unable to declare CEU memory.\n");
- return err;
- }
-
- pcdev->video_limit = resource_size(res);
- }
-
- /* request irq */
- err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq,
- 0, dev_name(&pdev->dev), pcdev);
- if (err) {
- dev_err(&pdev->dev, "Unable to register CEU interrupt.\n");
- goto exit_release_mem;
- }
-
- pm_suspend_ignore_children(&pdev->dev, true);
- pm_runtime_enable(&pdev->dev);
- pm_runtime_resume(&pdev->dev);
-
- pcdev->ici.priv = pcdev;
- pcdev->ici.v4l2_dev.dev = &pdev->dev;
- pcdev->ici.nr = pdev->id;
- pcdev->ici.drv_name = dev_name(&pdev->dev);
- pcdev->ici.ops = &sh_mobile_ceu_host_ops;
- pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE;
-
- if (pcdev->pdata && pcdev->pdata->asd_sizes) {
- pcdev->ici.asd = pcdev->pdata->asd;
- pcdev->ici.asd_sizes = pcdev->pdata->asd_sizes;
- }
-
- err = soc_camera_host_register(&pcdev->ici);
- if (err)
- goto exit_free_clk;
-
- return 0;
-
-exit_free_clk:
- pm_runtime_disable(&pdev->dev);
-exit_release_mem:
- if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
- dma_release_declared_memory(&pdev->dev);
- return err;
-}
-
-static int sh_mobile_ceu_remove(struct platform_device *pdev)
-{
- struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
-
- soc_camera_host_unregister(soc_host);
- pm_runtime_disable(&pdev->dev);
- if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
- dma_release_declared_memory(&pdev->dev);
-
- return 0;
-}
-
-static int sh_mobile_ceu_runtime_nop(struct device *dev)
-{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * This driver re-initializes all registers after
- * pm_runtime_get_sync() anyway so there is no need
- * to save and restore registers here.
- */
- return 0;
-}
-
-static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = {
- .runtime_suspend = sh_mobile_ceu_runtime_nop,
- .runtime_resume = sh_mobile_ceu_runtime_nop,
-};
-
-static const struct of_device_id sh_mobile_ceu_of_match[] = {
- { .compatible = "renesas,sh-mobile-ceu" },
- { }
-};
-MODULE_DEVICE_TABLE(of, sh_mobile_ceu_of_match);
-
-static struct platform_driver sh_mobile_ceu_driver = {
- .driver = {
- .name = "sh_mobile_ceu",
- .pm = &sh_mobile_ceu_dev_pm_ops,
- .of_match_table = sh_mobile_ceu_of_match,
- },
- .probe = sh_mobile_ceu_probe,
- .remove = sh_mobile_ceu_remove,
-};
-
-module_platform_driver(sh_mobile_ceu_driver);
-
-MODULE_DESCRIPTION("SuperH Mobile CEU driver");
-MODULE_AUTHOR("Magnus Damm");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("0.1.0");
-MODULE_ALIAS("platform:sh_mobile_ceu");
diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c
deleted file mode 100644
index 79fbe1fea95f..000000000000
--- a/drivers/media/platform/soc_camera/soc_camera_platform.c
+++ /dev/null
@@ -1,188 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Generic Platform Camera Driver
- *
- * Copyright (C) 2008 Magnus Damm
- * Based on mt9m001 driver,
- * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-subdev.h>
-#include <media/soc_camera.h>
-#include <linux/platform_data/media/soc_camera_platform.h>
-
-struct soc_camera_platform_priv {
- struct v4l2_subdev subdev;
-};
-
-static struct soc_camera_platform_priv *get_priv(struct platform_device *pdev)
-{
- struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
- return container_of(subdev, struct soc_camera_platform_priv, subdev);
-}
-
-static int soc_camera_platform_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
- return p->set_capture(p, enable);
-}
-
-static int soc_camera_platform_fill_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *mf = &format->format;
-
- mf->width = p->format.width;
- mf->height = p->format.height;
- mf->code = p->format.code;
- mf->colorspace = p->format.colorspace;
- mf->field = p->format.field;
-
- return 0;
-}
-
-static int soc_camera_platform_s_power(struct v4l2_subdev *sd, int on)
-{
- struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
-
- return soc_camera_set_power(p->icd->control, &p->icd->sdesc->subdev_desc, NULL, on);
-}
-
-static const struct v4l2_subdev_core_ops platform_subdev_core_ops = {
- .s_power = soc_camera_platform_s_power,
-};
-
-static int soc_camera_platform_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
-
- if (code->pad || code->index)
- return -EINVAL;
-
- code->code = p->format.code;
- return 0;
-}
-
-static int soc_camera_platform_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
-{
- struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
-
- if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP_BOUNDS:
- case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_CROP:
- sel->r.left = 0;
- sel->r.top = 0;
- sel->r.width = p->format.width;
- sel->r.height = p->format.height;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static int soc_camera_platform_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
-{
- struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
-
- cfg->flags = p->mbus_param;
- cfg->type = p->mbus_type;
-
- return 0;
-}
-
-static const struct v4l2_subdev_video_ops platform_subdev_video_ops = {
- .s_stream = soc_camera_platform_s_stream,
- .g_mbus_config = soc_camera_platform_g_mbus_config,
-};
-
-static const struct v4l2_subdev_pad_ops platform_subdev_pad_ops = {
- .enum_mbus_code = soc_camera_platform_enum_mbus_code,
- .get_selection = soc_camera_platform_get_selection,
- .get_fmt = soc_camera_platform_fill_fmt,
- .set_fmt = soc_camera_platform_fill_fmt,
-};
-
-static const struct v4l2_subdev_ops platform_subdev_ops = {
- .core = &platform_subdev_core_ops,
- .video = &platform_subdev_video_ops,
- .pad = &platform_subdev_pad_ops,
-};
-
-static int soc_camera_platform_probe(struct platform_device *pdev)
-{
- struct soc_camera_host *ici;
- struct soc_camera_platform_priv *priv;
- struct soc_camera_platform_info *p = pdev->dev.platform_data;
- struct soc_camera_device *icd;
-
- if (!p)
- return -EINVAL;
-
- if (!p->icd) {
- dev_err(&pdev->dev,
- "Platform has not set soc_camera_device pointer!\n");
- return -EINVAL;
- }
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- icd = p->icd;
-
- /* soc-camera convention: control's drvdata points to the subdev */
- platform_set_drvdata(pdev, &priv->subdev);
- /* Set the control device reference */
- icd->control = &pdev->dev;
-
- ici = to_soc_camera_host(icd->parent);
-
- v4l2_subdev_init(&priv->subdev, &platform_subdev_ops);
- v4l2_set_subdevdata(&priv->subdev, p);
- strscpy(priv->subdev.name, dev_name(&pdev->dev),
- sizeof(priv->subdev.name));
-
- return v4l2_device_register_subdev(&ici->v4l2_dev, &priv->subdev);
-}
-
-static int soc_camera_platform_remove(struct platform_device *pdev)
-{
- struct soc_camera_platform_priv *priv = get_priv(pdev);
- struct soc_camera_platform_info *p = v4l2_get_subdevdata(&priv->subdev);
-
- p->icd->control = NULL;
- v4l2_device_unregister_subdev(&priv->subdev);
- return 0;
-}
-
-static struct platform_driver soc_camera_platform_driver = {
- .driver = {
- .name = "soc_camera_platform",
- },
- .probe = soc_camera_platform_probe,
- .remove = soc_camera_platform_remove,
-};
-
-module_platform_driver(soc_camera_platform_driver);
-
-MODULE_DESCRIPTION("SoC Camera Platform driver");
-MODULE_AUTHOR("Magnus Damm");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:soc_camera_platform");
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
deleted file mode 100644
index 8d25ca0490f7..000000000000
--- a/drivers/media/platform/soc_camera/soc_scale_crop.c
+++ /dev/null
@@ -1,426 +0,0 @@
-/*
- * soc-camera generic scaling-cropping manipulation functions
- *
- * Copyright (C) 2013 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/device.h>
-#include <linux/module.h>
-
-#include <media/soc_camera.h>
-#include <media/v4l2-common.h>
-
-#include "soc_scale_crop.h"
-
-#ifdef DEBUG_GEOMETRY
-#define dev_geo dev_info
-#else
-#define dev_geo dev_dbg
-#endif
-
-/* Check if any dimension of r1 is smaller than respective one of r2 */
-static bool is_smaller(const struct v4l2_rect *r1, const struct v4l2_rect *r2)
-{
- return r1->width < r2->width || r1->height < r2->height;
-}
-
-/* Check if r1 fails to cover r2 */
-static bool is_inside(const struct v4l2_rect *r1, const struct v4l2_rect *r2)
-{
- return r1->left > r2->left || r1->top > r2->top ||
- r1->left + r1->width < r2->left + r2->width ||
- r1->top + r1->height < r2->top + r2->height;
-}
-
-/* Get and store current client crop */
-int soc_camera_client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect)
-{
- struct v4l2_subdev_selection sdsel = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .target = V4L2_SEL_TGT_CROP,
- };
- int ret;
-
- ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
- if (!ret) {
- *rect = sdsel.r;
- return ret;
- }
-
- sdsel.target = V4L2_SEL_TGT_CROP_BOUNDS;
- ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
- if (!ret)
- *rect = sdsel.r;
-
- return ret;
-}
-EXPORT_SYMBOL(soc_camera_client_g_rect);
-
-/* Client crop has changed, update our sub-rectangle to remain within the area */
-static void move_and_crop_subrect(struct v4l2_rect *rect,
- struct v4l2_rect *subrect)
-{
- if (rect->width < subrect->width)
- subrect->width = rect->width;
-
- if (rect->height < subrect->height)
- subrect->height = rect->height;
-
- if (rect->left > subrect->left)
- subrect->left = rect->left;
- else if (rect->left + rect->width <
- subrect->left + subrect->width)
- subrect->left = rect->left + rect->width -
- subrect->width;
-
- if (rect->top > subrect->top)
- subrect->top = rect->top;
- else if (rect->top + rect->height <
- subrect->top + subrect->height)
- subrect->top = rect->top + rect->height -
- subrect->height;
-}
-
-/*
- * The common for both scaling and cropping iterative approach is:
- * 1. try if the client can produce exactly what requested by the user
- * 2. if (1) failed, try to double the client image until we get one big enough
- * 3. if (2) failed, try to request the maximum image
- */
-int soc_camera_client_s_selection(struct v4l2_subdev *sd,
- struct v4l2_selection *sel, struct v4l2_selection *cam_sel,
- struct v4l2_rect *target_rect, struct v4l2_rect *subrect)
-{
- struct v4l2_subdev_selection sdsel = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .target = sel->target,
- .flags = sel->flags,
- .r = sel->r,
- };
- struct v4l2_subdev_selection bounds = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .target = V4L2_SEL_TGT_CROP_BOUNDS,
- };
- struct v4l2_rect *rect = &sel->r, *cam_rect = &cam_sel->r;
- struct device *dev = sd->v4l2_dev->dev;
- int ret;
- unsigned int width, height;
-
- v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
- sel->r = sdsel.r;
- ret = soc_camera_client_g_rect(sd, cam_rect);
- if (ret < 0)
- return ret;
-
- /*
- * Now cam_crop contains the current camera input rectangle, and it must
- * be within camera cropcap bounds
- */
- if (!memcmp(rect, cam_rect, sizeof(*rect))) {
- /* Even if camera S_SELECTION failed, but camera rectangle matches */
- dev_dbg(dev, "Camera S_SELECTION successful for %dx%d@%d:%d\n",
- rect->width, rect->height, rect->left, rect->top);
- *target_rect = *cam_rect;
- return 0;
- }
-
- /* Try to fix cropping, that camera hasn't managed to set */
- dev_geo(dev, "Fix camera S_SELECTION for %dx%d@%d:%d to %dx%d@%d:%d\n",
- cam_rect->width, cam_rect->height,
- cam_rect->left, cam_rect->top,
- rect->width, rect->height, rect->left, rect->top);
-
- /* We need sensor maximum rectangle */
- ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &bounds);
- if (ret < 0)
- return ret;
-
- /* Put user requested rectangle within sensor bounds */
- soc_camera_limit_side(&rect->left, &rect->width, sdsel.r.left, 2,
- bounds.r.width);
- soc_camera_limit_side(&rect->top, &rect->height, sdsel.r.top, 4,
- bounds.r.height);
-
- /*
- * Popular special case - some cameras can only handle fixed sizes like
- * QVGA, VGA,... Take care to avoid infinite loop.
- */
- width = max_t(unsigned int, cam_rect->width, 2);
- height = max_t(unsigned int, cam_rect->height, 2);
-
- /*
- * Loop as long as sensor is not covering the requested rectangle and
- * is still within its bounds
- */
- while (!ret && (is_smaller(cam_rect, rect) ||
- is_inside(cam_rect, rect)) &&
- (bounds.r.width > width || bounds.r.height > height)) {
-
- width *= 2;
- height *= 2;
-
- cam_rect->width = width;
- cam_rect->height = height;
-
- /*
- * We do not know what capabilities the camera has to set up
- * left and top borders. We could try to be smarter in iterating
- * them, e.g., if camera current left is to the right of the
- * target left, set it to the middle point between the current
- * left and minimum left. But that would add too much
- * complexity: we would have to iterate each border separately.
- * Instead we just drop to the left and top bounds.
- */
- if (cam_rect->left > rect->left)
- cam_rect->left = bounds.r.left;
-
- if (cam_rect->left + cam_rect->width < rect->left + rect->width)
- cam_rect->width = rect->left + rect->width -
- cam_rect->left;
-
- if (cam_rect->top > rect->top)
- cam_rect->top = bounds.r.top;
-
- if (cam_rect->top + cam_rect->height < rect->top + rect->height)
- cam_rect->height = rect->top + rect->height -
- cam_rect->top;
-
- sdsel.r = *cam_rect;
- v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
- *cam_rect = sdsel.r;
- ret = soc_camera_client_g_rect(sd, cam_rect);
- dev_geo(dev, "Camera S_SELECTION %d for %dx%d@%d:%d\n", ret,
- cam_rect->width, cam_rect->height,
- cam_rect->left, cam_rect->top);
- }
-
- /* S_SELECTION must not modify the rectangle */
- if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) {
- /*
- * The camera failed to configure a suitable cropping,
- * we cannot use the current rectangle, set to max
- */
- sdsel.r = bounds.r;
- v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
- *cam_rect = sdsel.r;
-
- ret = soc_camera_client_g_rect(sd, cam_rect);
- dev_geo(dev, "Camera S_SELECTION %d for max %dx%d@%d:%d\n", ret,
- cam_rect->width, cam_rect->height,
- cam_rect->left, cam_rect->top);
- }
-
- if (!ret) {
- *target_rect = *cam_rect;
- move_and_crop_subrect(target_rect, subrect);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(soc_camera_client_s_selection);
-
-/* Iterative set_fmt, also updates cached client crop on success */
-static int client_set_fmt(struct soc_camera_device *icd,
- struct v4l2_rect *rect, struct v4l2_rect *subrect,
- unsigned int max_width, unsigned int max_height,
- struct v4l2_subdev_format *format, bool host_can_scale)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->parent;
- struct v4l2_mbus_framefmt *mf = &format->format;
- unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
- struct v4l2_subdev_selection sdsel = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .target = V4L2_SEL_TGT_CROP_BOUNDS,
- };
- bool host_1to1;
- int ret;
-
- ret = v4l2_device_call_until_err(sd->v4l2_dev,
- soc_camera_grp_id(icd), pad,
- set_fmt, NULL, format);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height);
-
- if (width == mf->width && height == mf->height) {
- /* Perfect! The client has done it all. */
- host_1to1 = true;
- goto update_cache;
- }
-
- host_1to1 = false;
- if (!host_can_scale)
- goto update_cache;
-
- ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
- if (ret < 0)
- return ret;
-
- if (max_width > sdsel.r.width)
- max_width = sdsel.r.width;
- if (max_height > sdsel.r.height)
- max_height = sdsel.r.height;
-
- /* Camera set a format, but geometry is not precise, try to improve */
- tmp_w = mf->width;
- tmp_h = mf->height;
-
- /* width <= max_width && height <= max_height - guaranteed by try_fmt */
- while ((width > tmp_w || height > tmp_h) &&
- tmp_w < max_width && tmp_h < max_height) {
- tmp_w = min(2 * tmp_w, max_width);
- tmp_h = min(2 * tmp_h, max_height);
- mf->width = tmp_w;
- mf->height = tmp_h;
- ret = v4l2_device_call_until_err(sd->v4l2_dev,
- soc_camera_grp_id(icd), pad,
- set_fmt, NULL, format);
- dev_geo(dev, "Camera scaled to %ux%u\n",
- mf->width, mf->height);
- if (ret < 0) {
- /* This shouldn't happen */
- dev_err(dev, "Client failed to set format: %d\n", ret);
- return ret;
- }
- }
-
-update_cache:
- /* Update cache */
- ret = soc_camera_client_g_rect(sd, rect);
- if (ret < 0)
- return ret;
-
- if (host_1to1)
- *subrect = *rect;
- else
- move_and_crop_subrect(rect, subrect);
-
- return 0;
-}
-
-/**
- * soc_camera_client_scale
- * @icd: soc-camera device
- * @rect: camera cropping window
- * @subrect: part of rect, sent to the user
- * @mf: in- / output camera output window
- * @width: on input: max host input width;
- * on output: user width, mapped back to input
- * @height: on input: max host input height;
- * on output: user height, mapped back to input
- * @host_can_scale: host can scale this pixel format
- * @shift: shift, used for scaling
- */
-int soc_camera_client_scale(struct soc_camera_device *icd,
- struct v4l2_rect *rect, struct v4l2_rect *subrect,
- struct v4l2_mbus_framefmt *mf,
- unsigned int *width, unsigned int *height,
- bool host_can_scale, unsigned int shift)
-{
- struct device *dev = icd->parent;
- struct v4l2_subdev_format fmt_tmp = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .format = *mf,
- };
- struct v4l2_mbus_framefmt *mf_tmp = &fmt_tmp.format;
- unsigned int scale_h, scale_v;
- int ret;
-
- /*
- * 5. Apply iterative camera S_FMT for camera user window (also updates
- * client crop cache and the imaginary sub-rectangle).
- */
- ret = client_set_fmt(icd, rect, subrect, *width, *height,
- &fmt_tmp, host_can_scale);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "5: camera scaled to %ux%u\n",
- mf_tmp->width, mf_tmp->height);
-
- /* 6. Retrieve camera output window (g_fmt) */
-
- /* unneeded - it is already in "mf_tmp" */
-
- /* 7. Calculate new client scales. */
- scale_h = soc_camera_calc_scale(rect->width, shift, mf_tmp->width);
- scale_v = soc_camera_calc_scale(rect->height, shift, mf_tmp->height);
-
- mf->width = mf_tmp->width;
- mf->height = mf_tmp->height;
- mf->colorspace = mf_tmp->colorspace;
-
- /*
- * 8. Calculate new host crop - apply camera scales to previously
- * updated "effective" crop.
- */
- *width = soc_camera_shift_scale(subrect->width, shift, scale_h);
- *height = soc_camera_shift_scale(subrect->height, shift, scale_v);
-
- dev_geo(dev, "8: new client sub-window %ux%u\n", *width, *height);
-
- return 0;
-}
-EXPORT_SYMBOL(soc_camera_client_scale);
-
-/*
- * Calculate real client output window by applying new scales to the current
- * client crop. New scales are calculated from the requested output format and
- * host crop, mapped backed onto the client input (subrect).
- */
-void soc_camera_calc_client_output(struct soc_camera_device *icd,
- struct v4l2_rect *rect, struct v4l2_rect *subrect,
- const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf,
- unsigned int shift)
-{
- struct device *dev = icd->parent;
- unsigned int scale_v, scale_h;
-
- if (subrect->width == rect->width &&
- subrect->height == rect->height) {
- /* No sub-cropping */
- mf->width = pix->width;
- mf->height = pix->height;
- return;
- }
-
- /* 1.-2. Current camera scales and subwin - cached. */
-
- dev_geo(dev, "2: subwin %ux%u@%u:%u\n",
- subrect->width, subrect->height,
- subrect->left, subrect->top);
-
- /*
- * 3. Calculate new combined scales from input sub-window to requested
- * user window.
- */
-
- /*
- * TODO: CEU cannot scale images larger than VGA to smaller than SubQCIF
- * (128x96) or larger than VGA. This and similar limitations have to be
- * taken into account here.
- */
- scale_h = soc_camera_calc_scale(subrect->width, shift, pix->width);
- scale_v = soc_camera_calc_scale(subrect->height, shift, pix->height);
-
- dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v);
-
- /*
- * 4. Calculate desired client output window by applying combined scales
- * to client (real) input window.
- */
- mf->width = soc_camera_shift_scale(rect->width, shift, scale_h);
- mf->height = soc_camera_shift_scale(rect->height, shift, scale_v);
-}
-EXPORT_SYMBOL(soc_camera_calc_client_output);
-
-MODULE_DESCRIPTION("soc-camera scaling-cropping functions");
-MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.h b/drivers/media/platform/soc_camera/soc_scale_crop.h
deleted file mode 100644
index 9ca469312a1f..000000000000
--- a/drivers/media/platform/soc_camera/soc_scale_crop.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * soc-camera generic scaling-cropping manipulation functions
- *
- * Copyright (C) 2013 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef SOC_SCALE_CROP_H
-#define SOC_SCALE_CROP_H
-
-#include <linux/kernel.h>
-
-struct soc_camera_device;
-
-struct v4l2_selection;
-struct v4l2_mbus_framefmt;
-struct v4l2_pix_format;
-struct v4l2_rect;
-struct v4l2_subdev;
-
-static inline unsigned int soc_camera_shift_scale(unsigned int size,
- unsigned int shift, unsigned int scale)
-{
- return DIV_ROUND_CLOSEST(size << shift, scale);
-}
-
-#define soc_camera_calc_scale(in, shift, out) soc_camera_shift_scale(in, shift, out)
-
-int soc_camera_client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect);
-int soc_camera_client_s_selection(struct v4l2_subdev *sd,
- struct v4l2_selection *sel, struct v4l2_selection *cam_sel,
- struct v4l2_rect *target_rect, struct v4l2_rect *subrect);
-int soc_camera_client_scale(struct soc_camera_device *icd,
- struct v4l2_rect *rect, struct v4l2_rect *subrect,
- struct v4l2_mbus_framefmt *mf,
- unsigned int *width, unsigned int *height,
- bool host_can_scale, unsigned int shift);
-void soc_camera_calc_client_output(struct soc_camera_device *icd,
- struct v4l2_rect *rect, struct v4l2_rect *subrect,
- const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf,
- unsigned int shift);
-
-#endif
diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
index c6a4e2de5c0c..77ca7517fa3e 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
@@ -315,7 +315,7 @@ static void bdisp_dbg_dump_ivmx(struct seq_file *s,
seq_puts(s, "Unknown conversion\n");
}
-static int bdisp_dbg_last_nodes(struct seq_file *s, void *data)
+static int last_nodes_show(struct seq_file *s, void *data)
{
/* Not dumping all fields, focusing on significant ones */
struct bdisp_dev *bdisp = s->private;
@@ -388,7 +388,7 @@ static int bdisp_dbg_last_nodes(struct seq_file *s, void *data)
return 0;
}
-static int bdisp_dbg_last_nodes_raw(struct seq_file *s, void *data)
+static int last_nodes_raw_show(struct seq_file *s, void *data)
{
struct bdisp_dev *bdisp = s->private;
struct bdisp_node *node;
@@ -437,7 +437,7 @@ static const char *bdisp_fmt_to_str(struct bdisp_frame frame)
}
}
-static int bdisp_dbg_last_request(struct seq_file *s, void *data)
+static int last_request_show(struct seq_file *s, void *data)
{
struct bdisp_dev *bdisp = s->private;
struct bdisp_request *request = &bdisp->dbg.copy_request;
@@ -474,7 +474,7 @@ static int bdisp_dbg_last_request(struct seq_file *s, void *data)
#define DUMP(reg) seq_printf(s, #reg " \t0x%08X\n", readl(bdisp->regs + reg))
-static int bdisp_dbg_regs(struct seq_file *s, void *data)
+static int regs_show(struct seq_file *s, void *data)
{
struct bdisp_dev *bdisp = s->private;
int ret;
@@ -582,7 +582,7 @@ static int bdisp_dbg_regs(struct seq_file *s, void *data)
#define SECOND 1000000
-static int bdisp_dbg_perf(struct seq_file *s, void *data)
+static int perf_show(struct seq_file *s, void *data)
{
struct bdisp_dev *bdisp = s->private;
struct bdisp_request *request = &bdisp->dbg.copy_request;
@@ -627,27 +627,15 @@ static int bdisp_dbg_perf(struct seq_file *s, void *data)
return 0;
}
-#define bdisp_dbg_declare(name) \
- static int bdisp_dbg_##name##_open(struct inode *i, struct file *f) \
- { \
- return single_open(f, bdisp_dbg_##name, i->i_private); \
- } \
- static const struct file_operations bdisp_dbg_##name##_fops = { \
- .open = bdisp_dbg_##name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
- }
-
#define bdisp_dbg_create_entry(name) \
debugfs_create_file(#name, S_IRUGO, bdisp->dbg.debugfs_entry, bdisp, \
- &bdisp_dbg_##name##_fops)
+ &name##_fops)
-bdisp_dbg_declare(regs);
-bdisp_dbg_declare(last_nodes);
-bdisp_dbg_declare(last_nodes_raw);
-bdisp_dbg_declare(last_request);
-bdisp_dbg_declare(perf);
+DEFINE_SHOW_ATTRIBUTE(regs);
+DEFINE_SHOW_ATTRIBUTE(last_nodes);
+DEFINE_SHOW_ATTRIBUTE(last_nodes_raw);
+DEFINE_SHOW_ATTRIBUTE(last_request);
+DEFINE_SHOW_ATTRIBUTE(perf);
int bdisp_debugfs_create(struct bdisp_dev *bdisp)
{
diff --git a/drivers/media/platform/sti/hva/hva-debugfs.c b/drivers/media/platform/sti/hva/hva-debugfs.c
index 9f7e8ac875d1..7d12a5b5d914 100644
--- a/drivers/media/platform/sti/hva/hva-debugfs.c
+++ b/drivers/media/platform/sti/hva/hva-debugfs.c
@@ -271,7 +271,7 @@ static void hva_dbg_perf_compute(struct hva_ctx *ctx)
* device debug info
*/
-static int hva_dbg_device(struct seq_file *s, void *data)
+static int device_show(struct seq_file *s, void *data)
{
struct hva_dev *hva = s->private;
@@ -281,7 +281,7 @@ static int hva_dbg_device(struct seq_file *s, void *data)
return 0;
}
-static int hva_dbg_encoders(struct seq_file *s, void *data)
+static int encoders_show(struct seq_file *s, void *data)
{
struct hva_dev *hva = s->private;
unsigned int i = 0;
@@ -299,7 +299,7 @@ static int hva_dbg_encoders(struct seq_file *s, void *data)
return 0;
}
-static int hva_dbg_last(struct seq_file *s, void *data)
+static int last_show(struct seq_file *s, void *data)
{
struct hva_dev *hva = s->private;
struct hva_ctx *last_ctx = &hva->dbg.last_ctx;
@@ -316,7 +316,7 @@ static int hva_dbg_last(struct seq_file *s, void *data)
return 0;
}
-static int hva_dbg_regs(struct seq_file *s, void *data)
+static int regs_show(struct seq_file *s, void *data)
{
struct hva_dev *hva = s->private;
@@ -325,26 +325,14 @@ static int hva_dbg_regs(struct seq_file *s, void *data)
return 0;
}
-#define hva_dbg_declare(name) \
- static int hva_dbg_##name##_open(struct inode *i, struct file *f) \
- { \
- return single_open(f, hva_dbg_##name, i->i_private); \
- } \
- static const struct file_operations hva_dbg_##name##_fops = { \
- .open = hva_dbg_##name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
- }
-
#define hva_dbg_create_entry(name) \
debugfs_create_file(#name, 0444, hva->dbg.debugfs_entry, hva, \
- &hva_dbg_##name##_fops)
+ &name##_fops)
-hva_dbg_declare(device);
-hva_dbg_declare(encoders);
-hva_dbg_declare(last);
-hva_dbg_declare(regs);
+DEFINE_SHOW_ATTRIBUTE(device);
+DEFINE_SHOW_ATTRIBUTE(encoders);
+DEFINE_SHOW_ATTRIBUTE(last);
+DEFINE_SHOW_ATTRIBUTE(regs);
void hva_debugfs_create(struct hva_dev *hva)
{
@@ -380,7 +368,7 @@ void hva_debugfs_remove(struct hva_dev *hva)
* context (instance) debug info
*/
-static int hva_dbg_ctx(struct seq_file *s, void *data)
+static int ctx_show(struct seq_file *s, void *data)
{
struct hva_ctx *ctx = s->private;
@@ -392,7 +380,7 @@ static int hva_dbg_ctx(struct seq_file *s, void *data)
return 0;
}
-hva_dbg_declare(ctx);
+DEFINE_SHOW_ATTRIBUTE(ctx);
void hva_dbg_ctx_create(struct hva_ctx *ctx)
{
@@ -407,7 +395,7 @@ void hva_dbg_ctx_create(struct hva_ctx *ctx)
ctx->dbg.debugfs_entry = debugfs_create_file(name, 0444,
hva->dbg.debugfs_entry,
- ctx, &hva_dbg_ctx_fops);
+ ctx, &ctx_fops);
}
void hva_dbg_ctx_remove(struct hva_ctx *ctx)
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
index 6950585edb5a..ee882b66a5ea 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
@@ -893,6 +893,7 @@ static int sun6i_csi_remove(struct platform_device *pdev)
static const struct of_device_id sun6i_csi_of_match[] = {
{ .compatible = "allwinner,sun6i-a31-csi", },
+ { .compatible = "allwinner,sun8i-h3-csi", },
{ .compatible = "allwinner,sun8i-v3s-csi", },
{},
};
diff --git a/drivers/media/platform/vicodec/codec-fwht.c b/drivers/media/platform/vicodec/codec-fwht.c
index 5630f1dc45e6..d1d6085da9f1 100644
--- a/drivers/media/platform/vicodec/codec-fwht.c
+++ b/drivers/media/platform/vicodec/codec-fwht.c
@@ -10,8 +10,11 @@
*/
#include <linux/string.h>
+#include <linux/kernel.h>
#include "codec-fwht.h"
+#define OVERFLOW_BIT BIT(14)
+
/*
* Note: bit 0 of the header must always be 0. Otherwise it cannot
* be guaranteed that the magic 8 byte sequence (see below) can
@@ -103,16 +106,21 @@ static int rlc(const s16 *in, __be16 *output, int blocktype)
* This function will worst-case increase rlc_in by 65*2 bytes:
* one s16 value for the header and 8 * 8 coefficients of type s16.
*/
-static s16 derlc(const __be16 **rlc_in, s16 *dwht_out)
+static u16 derlc(const __be16 **rlc_in, s16 *dwht_out,
+ const __be16 *end_of_input)
{
/* header */
const __be16 *input = *rlc_in;
- s16 ret = ntohs(*input++);
+ u16 stat;
int dec_count = 0;
s16 block[8 * 8 + 16];
s16 *wp = block;
int i;
+ if (input > end_of_input)
+ return OVERFLOW_BIT;
+ stat = ntohs(*input++);
+
/*
* Now de-compress, it expands one byte to up to 15 bytes
* (or fills the remainder of the 64 bytes with zeroes if it
@@ -122,9 +130,15 @@ static s16 derlc(const __be16 **rlc_in, s16 *dwht_out)
* allow for overflow if the incoming data was malformed.
*/
while (dec_count < 8 * 8) {
- s16 in = ntohs(*input++);
- int length = in & 0xf;
- int coeff = in >> 4;
+ s16 in;
+ int length;
+ int coeff;
+
+ if (input > end_of_input)
+ return OVERFLOW_BIT;
+ in = ntohs(*input++);
+ length = in & 0xf;
+ coeff = in >> 4;
/* fill remainder with zeros */
if (length == 15) {
@@ -149,7 +163,7 @@ static s16 derlc(const __be16 **rlc_in, s16 *dwht_out)
dwht_out[x + y * 8] = *wp++;
}
*rlc_in = input;
- return ret;
+ return stat;
}
static const int quant_table[] = {
@@ -237,8 +251,6 @@ static void fwht(const u8 *block, s16 *output_block, unsigned int stride,
unsigned int i;
/* stage 1 */
- stride *= input_step;
-
for (i = 0; i < 8; i++, tmp += stride, out += 8) {
switch (input_step) {
case 1:
@@ -562,7 +574,7 @@ static void fill_encoder_block(const u8 *input, s16 *dst,
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++, input += input_step)
*dst++ = *input;
- input += (stride - 8) * input_step;
+ input += stride - 8 * input_step;
}
}
@@ -660,7 +672,7 @@ static void add_deltas(s16 *deltas, const u8 *ref, int stride)
static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max,
struct fwht_cframe *cf, u32 height, u32 width,
- unsigned int input_step,
+ u32 stride, unsigned int input_step,
bool is_intra, bool next_is_intra)
{
u8 *input_start = input;
@@ -671,7 +683,11 @@ static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max,
unsigned int last_size = 0;
unsigned int i, j;
+ width = round_up(width, 8);
+ height = round_up(height, 8);
+
for (j = 0; j < height / 8; j++) {
+ input = input_start + j * 8 * stride;
for (i = 0; i < width / 8; i++) {
/* intra code, first frame is always intra coded. */
int blocktype = IBLOCK;
@@ -679,9 +695,9 @@ static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max,
if (!is_intra)
blocktype = decide_blocktype(input, refp,
- deltablock, width, input_step);
+ deltablock, stride, input_step);
if (blocktype == IBLOCK) {
- fwht(input, cf->coeffs, width, input_step, 1);
+ fwht(input, cf->coeffs, stride, input_step, 1);
quantize_intra(cf->coeffs, cf->de_coeffs,
cf->i_frame_qp);
} else {
@@ -722,12 +738,12 @@ static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max,
}
last_size = size;
}
- input += width * 7 * input_step;
}
exit_loop:
if (encoding & FWHT_FRAME_UNENCODED) {
u8 *out = (u8 *)rlco_start;
+ u8 *p;
input = input_start;
/*
@@ -736,8 +752,11 @@ exit_loop:
* by 0xfe. Since YUV is limited range such values
* shouldn't appear anyway.
*/
- for (i = 0; i < height * width; i++, input += input_step)
- *out++ = (*input == 0xff) ? 0xfe : *input;
+ for (j = 0; j < height; j++) {
+ for (i = 0, p = input; i < width; i++, p += input_step)
+ *out++ = (*p == 0xff) ? 0xfe : *p;
+ input += stride;
+ }
*rlco = (__be16 *)out;
encoding &= ~FWHT_FRAME_PCODED;
}
@@ -747,30 +766,32 @@ exit_loop:
u32 fwht_encode_frame(struct fwht_raw_frame *frm,
struct fwht_raw_frame *ref_frm,
struct fwht_cframe *cf,
- bool is_intra, bool next_is_intra)
+ bool is_intra, bool next_is_intra,
+ unsigned int width, unsigned int height,
+ unsigned int stride, unsigned int chroma_stride)
{
- unsigned int size = frm->height * frm->width;
+ unsigned int size = height * width;
__be16 *rlco = cf->rlc_data;
__be16 *rlco_max;
u32 encoding;
rlco_max = rlco + size / 2 - 256;
encoding = encode_plane(frm->luma, ref_frm->luma, &rlco, rlco_max, cf,
- frm->height, frm->width,
+ height, width, stride,
frm->luma_alpha_step, is_intra, next_is_intra);
if (encoding & FWHT_FRAME_UNENCODED)
encoding |= FWHT_LUMA_UNENCODED;
encoding &= ~FWHT_FRAME_UNENCODED;
if (frm->components_num >= 3) {
- u32 chroma_h = frm->height / frm->height_div;
- u32 chroma_w = frm->width / frm->width_div;
+ u32 chroma_h = height / frm->height_div;
+ u32 chroma_w = width / frm->width_div;
unsigned int chroma_size = chroma_h * chroma_w;
rlco_max = rlco + chroma_size / 2 - 256;
encoding |= encode_plane(frm->cb, ref_frm->cb, &rlco, rlco_max,
cf, chroma_h, chroma_w,
- frm->chroma_step,
+ chroma_stride, frm->chroma_step,
is_intra, next_is_intra);
if (encoding & FWHT_FRAME_UNENCODED)
encoding |= FWHT_CB_UNENCODED;
@@ -778,7 +799,7 @@ u32 fwht_encode_frame(struct fwht_raw_frame *frm,
rlco_max = rlco + chroma_size / 2 - 256;
encoding |= encode_plane(frm->cr, ref_frm->cr, &rlco, rlco_max,
cf, chroma_h, chroma_w,
- frm->chroma_step,
+ chroma_stride, frm->chroma_step,
is_intra, next_is_intra);
if (encoding & FWHT_FRAME_UNENCODED)
encoding |= FWHT_CR_UNENCODED;
@@ -787,10 +808,10 @@ u32 fwht_encode_frame(struct fwht_raw_frame *frm,
if (frm->components_num == 4) {
rlco_max = rlco + size / 2 - 256;
- encoding = encode_plane(frm->alpha, ref_frm->alpha, &rlco,
- rlco_max, cf, frm->height, frm->width,
- frm->luma_alpha_step,
- is_intra, next_is_intra);
+ encoding |= encode_plane(frm->alpha, ref_frm->alpha, &rlco,
+ rlco_max, cf, height, width,
+ stride, frm->luma_alpha_step,
+ is_intra, next_is_intra);
if (encoding & FWHT_FRAME_UNENCODED)
encoding |= FWHT_ALPHA_UNENCODED;
encoding &= ~FWHT_FRAME_UNENCODED;
@@ -800,18 +821,24 @@ u32 fwht_encode_frame(struct fwht_raw_frame *frm,
return encoding;
}
-static void decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref,
- u32 height, u32 width, bool uncompressed)
+static bool decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref,
+ u32 height, u32 width, u32 coded_width,
+ bool uncompressed, const __be16 *end_of_rlco_buf)
{
unsigned int copies = 0;
s16 copy[8 * 8];
- s16 stat;
+ u16 stat;
unsigned int i, j;
+ width = round_up(width, 8);
+ height = round_up(height, 8);
+
if (uncompressed) {
+ if (end_of_rlco_buf + 1 < *rlco + width * height / 2)
+ return false;
memcpy(ref, *rlco, width * height);
*rlco += width * height / 2;
- return;
+ return true;
}
/*
@@ -822,19 +849,22 @@ static void decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref,
*/
for (j = 0; j < height / 8; j++) {
for (i = 0; i < width / 8; i++) {
- u8 *refp = ref + j * 8 * width + i * 8;
+ u8 *refp = ref + j * 8 * coded_width + i * 8;
if (copies) {
memcpy(cf->de_fwht, copy, sizeof(copy));
if (stat & PFRAME_BIT)
- add_deltas(cf->de_fwht, refp, width);
- fill_decoder_block(refp, cf->de_fwht, width);
+ add_deltas(cf->de_fwht, refp,
+ coded_width);
+ fill_decoder_block(refp, cf->de_fwht,
+ coded_width);
copies--;
continue;
}
- stat = derlc(rlco, cf->coeffs);
-
+ stat = derlc(rlco, cf->coeffs, end_of_rlco_buf);
+ if (stat & OVERFLOW_BIT)
+ return false;
if (stat & PFRAME_BIT)
dequantize_inter(cf->coeffs);
else
@@ -847,35 +877,53 @@ static void decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref,
if (copies)
memcpy(copy, cf->de_fwht, sizeof(copy));
if (stat & PFRAME_BIT)
- add_deltas(cf->de_fwht, refp, width);
- fill_decoder_block(refp, cf->de_fwht, width);
+ add_deltas(cf->de_fwht, refp, coded_width);
+ fill_decoder_block(refp, cf->de_fwht, coded_width);
}
}
+ return true;
}
-void fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref,
- u32 hdr_flags, unsigned int components_num)
+bool fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref,
+ u32 hdr_flags, unsigned int components_num,
+ unsigned int width, unsigned int height,
+ unsigned int coded_width)
{
const __be16 *rlco = cf->rlc_data;
+ const __be16 *end_of_rlco_buf = cf->rlc_data +
+ (cf->size / sizeof(*rlco)) - 1;
- decode_plane(cf, &rlco, ref->luma, cf->height, cf->width,
- hdr_flags & FWHT_FL_LUMA_IS_UNCOMPRESSED);
+ if (!decode_plane(cf, &rlco, ref->luma, height, width, coded_width,
+ hdr_flags & FWHT_FL_LUMA_IS_UNCOMPRESSED,
+ end_of_rlco_buf))
+ return false;
if (components_num >= 3) {
- u32 h = cf->height;
- u32 w = cf->width;
+ u32 h = height;
+ u32 w = width;
+ u32 c = coded_width;
if (!(hdr_flags & FWHT_FL_CHROMA_FULL_HEIGHT))
h /= 2;
- if (!(hdr_flags & FWHT_FL_CHROMA_FULL_WIDTH))
+ if (!(hdr_flags & FWHT_FL_CHROMA_FULL_WIDTH)) {
w /= 2;
- decode_plane(cf, &rlco, ref->cb, h, w,
- hdr_flags & FWHT_FL_CB_IS_UNCOMPRESSED);
- decode_plane(cf, &rlco, ref->cr, h, w,
- hdr_flags & FWHT_FL_CR_IS_UNCOMPRESSED);
+ c /= 2;
+ }
+ if (!decode_plane(cf, &rlco, ref->cb, h, w, c,
+ hdr_flags & FWHT_FL_CB_IS_UNCOMPRESSED,
+ end_of_rlco_buf))
+ return false;
+ if (!decode_plane(cf, &rlco, ref->cr, h, w, c,
+ hdr_flags & FWHT_FL_CR_IS_UNCOMPRESSED,
+ end_of_rlco_buf))
+ return false;
}
if (components_num == 4)
- decode_plane(cf, &rlco, ref->alpha, cf->height, cf->width,
- hdr_flags & FWHT_FL_ALPHA_IS_UNCOMPRESSED);
+ if (!decode_plane(cf, &rlco, ref->alpha, height, width,
+ coded_width,
+ hdr_flags & FWHT_FL_ALPHA_IS_UNCOMPRESSED,
+ end_of_rlco_buf))
+ return false;
+ return true;
}
diff --git a/drivers/media/platform/vicodec/codec-fwht.h b/drivers/media/platform/vicodec/codec-fwht.h
index 90ff8962fca7..60d71d9dacb3 100644
--- a/drivers/media/platform/vicodec/codec-fwht.h
+++ b/drivers/media/platform/vicodec/codec-fwht.h
@@ -78,9 +78,22 @@
#define FWHT_FL_ALPHA_IS_UNCOMPRESSED BIT(9)
/* A 4-values flag - the number of components - 1 */
-#define FWHT_FL_COMPONENTS_NUM_MSK GENMASK(17, 16)
+#define FWHT_FL_COMPONENTS_NUM_MSK GENMASK(18, 16)
#define FWHT_FL_COMPONENTS_NUM_OFFSET 16
+#define FWHT_FL_PIXENC_MSK GENMASK(20, 19)
+#define FWHT_FL_PIXENC_OFFSET 19
+#define FWHT_FL_PIXENC_YUV (1 << FWHT_FL_PIXENC_OFFSET)
+#define FWHT_FL_PIXENC_RGB (2 << FWHT_FL_PIXENC_OFFSET)
+#define FWHT_FL_PIXENC_HSV (3 << FWHT_FL_PIXENC_OFFSET)
+
+/*
+ * A macro to calculate the needed padding in order to make sure
+ * both luma and chroma components resolutions are rounded up to
+ * a multiple of 8
+ */
+#define vic_round_dim(dim, div) (round_up((dim) / (div), 8) * (div))
+
struct fwht_cframe_hdr {
u32 magic1;
u32 magic2;
@@ -95,7 +108,6 @@ struct fwht_cframe_hdr {
};
struct fwht_cframe {
- unsigned int width, height;
u16 i_frame_qp;
u16 p_frame_qp;
__be16 *rlc_data;
@@ -106,7 +118,6 @@ struct fwht_cframe {
};
struct fwht_raw_frame {
- unsigned int width, height;
unsigned int width_div;
unsigned int height_div;
unsigned int luma_alpha_step;
@@ -125,8 +136,12 @@ struct fwht_raw_frame {
u32 fwht_encode_frame(struct fwht_raw_frame *frm,
struct fwht_raw_frame *ref_frm,
struct fwht_cframe *cf,
- bool is_intra, bool next_is_intra);
-void fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref,
- u32 hdr_flags, unsigned int components_num);
+ bool is_intra, bool next_is_intra,
+ unsigned int width, unsigned int height,
+ unsigned int stride, unsigned int chroma_stride);
+bool fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref,
+ u32 hdr_flags, unsigned int components_num,
+ unsigned int width, unsigned int height,
+ unsigned int coded_width);
#endif
diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.c b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
index 8cb0212df67f..c15034849133 100644
--- a/drivers/media/platform/vicodec/codec-v4l2-fwht.c
+++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
@@ -11,32 +11,53 @@
#include "codec-v4l2-fwht.h"
static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = {
- { V4L2_PIX_FMT_YUV420, 1, 3, 2, 1, 1, 2, 2, 3},
- { V4L2_PIX_FMT_YVU420, 1, 3, 2, 1, 1, 2, 2, 3},
- { V4L2_PIX_FMT_YUV422P, 1, 2, 1, 1, 1, 2, 1, 3},
- { V4L2_PIX_FMT_NV12, 1, 3, 2, 1, 2, 2, 2, 3},
- { V4L2_PIX_FMT_NV21, 1, 3, 2, 1, 2, 2, 2, 3},
- { V4L2_PIX_FMT_NV16, 1, 2, 1, 1, 2, 2, 1, 3},
- { V4L2_PIX_FMT_NV61, 1, 2, 1, 1, 2, 2, 1, 3},
- { V4L2_PIX_FMT_NV24, 1, 3, 1, 1, 2, 1, 1, 3},
- { V4L2_PIX_FMT_NV42, 1, 3, 1, 1, 2, 1, 1, 3},
- { V4L2_PIX_FMT_YUYV, 2, 2, 1, 2, 4, 2, 1, 3},
- { V4L2_PIX_FMT_YVYU, 2, 2, 1, 2, 4, 2, 1, 3},
- { V4L2_PIX_FMT_UYVY, 2, 2, 1, 2, 4, 2, 1, 3},
- { V4L2_PIX_FMT_VYUY, 2, 2, 1, 2, 4, 2, 1, 3},
- { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3},
- { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3},
- { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3},
- { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3},
- { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3},
- { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3},
- { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3},
- { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3},
- { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4},
- { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4},
- { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1},
+ { V4L2_PIX_FMT_YUV420, 1, 3, 2, 1, 1, 2, 2, 3, 3, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_YVU420, 1, 3, 2, 1, 1, 2, 2, 3, 3, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_YUV422P, 1, 2, 1, 1, 1, 2, 1, 3, 3, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_NV12, 1, 3, 2, 1, 2, 2, 2, 3, 2, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_NV21, 1, 3, 2, 1, 2, 2, 2, 3, 2, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_NV16, 1, 2, 1, 1, 2, 2, 1, 3, 2, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_NV61, 1, 2, 1, 1, 2, 2, 1, 3, 2, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_NV24, 1, 3, 1, 1, 2, 1, 1, 3, 2, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_NV42, 1, 3, 1, 1, 2, 1, 1, 3, 2, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_YUYV, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_YVYU, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_UYVY, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_VYUY, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
+ { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
+ { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1, 1, FWHT_FL_PIXENC_RGB},
};
+const struct v4l2_fwht_pixfmt_info *v4l2_fwht_default_fmt(u32 width_div,
+ u32 height_div,
+ u32 components_num,
+ u32 pixenc,
+ unsigned int start_idx)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(v4l2_fwht_pixfmts); i++) {
+ if (v4l2_fwht_pixfmts[i].width_div == width_div &&
+ v4l2_fwht_pixfmts[i].height_div == height_div &&
+ (!pixenc || v4l2_fwht_pixfmts[i].pixenc == pixenc) &&
+ v4l2_fwht_pixfmts[i].components_num == components_num) {
+ if (start_idx == 0)
+ return v4l2_fwht_pixfmts + i;
+ start_idx--;
+ }
+ }
+ return NULL;
+}
+
const struct v4l2_fwht_pixfmt_info *v4l2_fwht_find_pixfmt(u32 pixelformat)
{
unsigned int i;
@@ -56,7 +77,8 @@ const struct v4l2_fwht_pixfmt_info *v4l2_fwht_get_pixfmt(u32 idx)
int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
{
- unsigned int size = state->width * state->height;
+ unsigned int size = state->stride * state->coded_height;
+ unsigned int chroma_stride = state->stride;
const struct v4l2_fwht_pixfmt_info *info = state->info;
struct fwht_cframe_hdr *p_hdr;
struct fwht_cframe cf;
@@ -66,8 +88,7 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
if (!info)
return -EINVAL;
- rf.width = state->width;
- rf.height = state->height;
+
rf.luma = p_in;
rf.width_div = info->width_div;
rf.height_div = info->height_div;
@@ -84,14 +105,17 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
case V4L2_PIX_FMT_YUV420:
rf.cb = rf.luma + size;
rf.cr = rf.cb + size / 4;
+ chroma_stride /= 2;
break;
case V4L2_PIX_FMT_YVU420:
rf.cr = rf.luma + size;
rf.cb = rf.cr + size / 4;
+ chroma_stride /= 2;
break;
case V4L2_PIX_FMT_YUV422P:
rf.cb = rf.luma + size;
rf.cr = rf.cb + size / 2;
+ chroma_stride /= 2;
break;
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
@@ -163,15 +187,16 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
return -EINVAL;
}
- cf.width = state->width;
- cf.height = state->height;
cf.i_frame_qp = state->i_frame_qp;
cf.p_frame_qp = state->p_frame_qp;
cf.rlc_data = (__be16 *)(p_out + sizeof(*p_hdr));
encoding = fwht_encode_frame(&rf, &state->ref_frame, &cf,
!state->gop_cnt,
- state->gop_cnt == state->gop_size - 1);
+ state->gop_cnt == state->gop_size - 1,
+ state->visible_width,
+ state->visible_height,
+ state->stride, chroma_stride);
if (!(encoding & FWHT_FRAME_PCODED))
state->gop_cnt = 0;
if (++state->gop_cnt >= state->gop_size)
@@ -181,9 +206,10 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
p_hdr->magic1 = FWHT_MAGIC1;
p_hdr->magic2 = FWHT_MAGIC2;
p_hdr->version = htonl(FWHT_VERSION);
- p_hdr->width = htonl(cf.width);
- p_hdr->height = htonl(cf.height);
+ p_hdr->width = htonl(state->visible_width);
+ p_hdr->height = htonl(state->visible_height);
flags |= (info->components_num - 1) << FWHT_FL_COMPONENTS_NUM_OFFSET;
+ flags |= info->pixenc;
if (encoding & FWHT_LUMA_UNENCODED)
flags |= FWHT_FL_LUMA_IS_UNCOMPRESSED;
if (encoding & FWHT_CB_UNENCODED)
@@ -202,65 +228,70 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
p_hdr->ycbcr_enc = htonl(state->ycbcr_enc);
p_hdr->quantization = htonl(state->quantization);
p_hdr->size = htonl(cf.size);
- state->ref_frame.width = cf.width;
- state->ref_frame.height = cf.height;
return cf.size + sizeof(*p_hdr);
}
int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
{
- unsigned int size = state->width * state->height;
- unsigned int chroma_size = size;
- unsigned int i;
+ unsigned int i, j, k;
u32 flags;
- struct fwht_cframe_hdr *p_hdr;
struct fwht_cframe cf;
- u8 *p;
+ u8 *p, *ref_p;
unsigned int components_num = 3;
unsigned int version;
+ const struct v4l2_fwht_pixfmt_info *info;
+ unsigned int hdr_width_div, hdr_height_div;
if (!state->info)
return -EINVAL;
- p_hdr = (struct fwht_cframe_hdr *)p_in;
- cf.width = ntohl(p_hdr->width);
- cf.height = ntohl(p_hdr->height);
+ info = state->info;
- version = ntohl(p_hdr->version);
+ version = ntohl(state->header.version);
if (!version || version > FWHT_VERSION) {
pr_err("version %d is not supported, current version is %d\n",
version, FWHT_VERSION);
return -EINVAL;
}
- if (p_hdr->magic1 != FWHT_MAGIC1 ||
- p_hdr->magic2 != FWHT_MAGIC2 ||
- (cf.width & 7) || (cf.height & 7))
+ if (state->header.magic1 != FWHT_MAGIC1 ||
+ state->header.magic2 != FWHT_MAGIC2)
return -EINVAL;
/* TODO: support resolution changes */
- if (cf.width != state->width || cf.height != state->height)
+ if (ntohl(state->header.width) != state->visible_width ||
+ ntohl(state->header.height) != state->visible_height)
return -EINVAL;
- flags = ntohl(p_hdr->flags);
+ flags = ntohl(state->header.flags);
if (version == FWHT_VERSION) {
+ if ((flags & FWHT_FL_PIXENC_MSK) != info->pixenc)
+ return -EINVAL;
components_num = 1 + ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >>
- FWHT_FL_COMPONENTS_NUM_OFFSET);
+ FWHT_FL_COMPONENTS_NUM_OFFSET);
}
- state->colorspace = ntohl(p_hdr->colorspace);
- state->xfer_func = ntohl(p_hdr->xfer_func);
- state->ycbcr_enc = ntohl(p_hdr->ycbcr_enc);
- state->quantization = ntohl(p_hdr->quantization);
- cf.rlc_data = (__be16 *)(p_in + sizeof(*p_hdr));
+ if (components_num != info->components_num)
+ return -EINVAL;
+
+ state->colorspace = ntohl(state->header.colorspace);
+ state->xfer_func = ntohl(state->header.xfer_func);
+ state->ycbcr_enc = ntohl(state->header.ycbcr_enc);
+ state->quantization = ntohl(state->header.quantization);
+ cf.rlc_data = (__be16 *)p_in;
+ cf.size = ntohl(state->header.size);
- if (!(flags & FWHT_FL_CHROMA_FULL_WIDTH))
- chroma_size /= 2;
- if (!(flags & FWHT_FL_CHROMA_FULL_HEIGHT))
- chroma_size /= 2;
+ hdr_width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2;
+ hdr_height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2;
+ if (hdr_width_div != info->width_div ||
+ hdr_height_div != info->height_div)
+ return -EINVAL;
- fwht_decode_frame(&cf, &state->ref_frame, flags, components_num);
+ if (!fwht_decode_frame(&cf, &state->ref_frame, flags, components_num,
+ state->visible_width, state->visible_height,
+ state->coded_width))
+ return -EINVAL;
/*
* TODO - handle the case where the compressed stream encodes a
@@ -268,123 +299,226 @@ int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
*/
switch (state->info->id) {
case V4L2_PIX_FMT_GREY:
- memcpy(p_out, state->ref_frame.luma, size);
+ ref_p = state->ref_frame.luma;
+ for (i = 0; i < state->coded_height; i++) {
+ memcpy(p_out, ref_p, state->visible_width);
+ p_out += state->stride;
+ ref_p += state->coded_width;
+ }
break;
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YUV422P:
- memcpy(p_out, state->ref_frame.luma, size);
- p_out += size;
- memcpy(p_out, state->ref_frame.cb, chroma_size);
- p_out += chroma_size;
- memcpy(p_out, state->ref_frame.cr, chroma_size);
+ ref_p = state->ref_frame.luma;
+ for (i = 0; i < state->coded_height; i++) {
+ memcpy(p_out, ref_p, state->visible_width);
+ p_out += state->stride;
+ ref_p += state->coded_width;
+ }
+
+ ref_p = state->ref_frame.cb;
+ for (i = 0; i < state->coded_height / 2; i++) {
+ memcpy(p_out, ref_p, state->visible_width / 2);
+ p_out += state->stride / 2;
+ ref_p += state->coded_width / 2;
+ }
+ ref_p = state->ref_frame.cr;
+ for (i = 0; i < state->coded_height / 2; i++) {
+ memcpy(p_out, ref_p, state->visible_width / 2);
+ p_out += state->stride / 2;
+ ref_p += state->coded_width / 2;
+ }
break;
case V4L2_PIX_FMT_YVU420:
- memcpy(p_out, state->ref_frame.luma, size);
- p_out += size;
- memcpy(p_out, state->ref_frame.cr, chroma_size);
- p_out += chroma_size;
- memcpy(p_out, state->ref_frame.cb, chroma_size);
+ ref_p = state->ref_frame.luma;
+ for (i = 0; i < state->coded_height; i++) {
+ memcpy(p_out, ref_p, state->visible_width);
+ p_out += state->stride;
+ ref_p += state->coded_width;
+ }
+
+ ref_p = state->ref_frame.cr;
+ for (i = 0; i < state->coded_height / 2; i++) {
+ memcpy(p_out, ref_p, state->visible_width / 2);
+ p_out += state->stride / 2;
+ ref_p += state->coded_width / 2;
+ }
+ ref_p = state->ref_frame.cb;
+ for (i = 0; i < state->coded_height / 2; i++) {
+ memcpy(p_out, ref_p, state->visible_width / 2);
+ p_out += state->stride / 2;
+ ref_p += state->coded_width / 2;
+ }
break;
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV24:
- memcpy(p_out, state->ref_frame.luma, size);
- p_out += size;
- for (i = 0, p = p_out; i < chroma_size; i++) {
- *p++ = state->ref_frame.cb[i];
- *p++ = state->ref_frame.cr[i];
+ ref_p = state->ref_frame.luma;
+ for (i = 0; i < state->coded_height; i++) {
+ memcpy(p_out, ref_p, state->visible_width);
+ p_out += state->stride;
+ ref_p += state->coded_width;
+ }
+
+ k = 0;
+ for (i = 0; i < state->coded_height / 2; i++) {
+ for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
+ *p++ = state->ref_frame.cb[k];
+ *p++ = state->ref_frame.cr[k];
+ k++;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV61:
case V4L2_PIX_FMT_NV42:
- memcpy(p_out, state->ref_frame.luma, size);
- p_out += size;
- for (i = 0, p = p_out; i < chroma_size; i++) {
- *p++ = state->ref_frame.cr[i];
- *p++ = state->ref_frame.cb[i];
+ ref_p = state->ref_frame.luma;
+ for (i = 0; i < state->coded_height; i++) {
+ memcpy(p_out, ref_p, state->visible_width);
+ p_out += state->stride;
+ ref_p += state->coded_width;
+ }
+
+ k = 0;
+ for (i = 0; i < state->coded_height / 2; i++) {
+ for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
+ *p++ = state->ref_frame.cr[k];
+ *p++ = state->ref_frame.cb[k];
+ k++;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_YUYV:
- for (i = 0, p = p_out; i < size; i += 2) {
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cb[i / 2];
- *p++ = state->ref_frame.luma[i + 1];
- *p++ = state->ref_frame.cr[i / 2];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cb[k / 2];
+ *p++ = state->ref_frame.luma[k + 1];
+ *p++ = state->ref_frame.cr[k / 2];
+ k += 2;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_YVYU:
- for (i = 0, p = p_out; i < size; i += 2) {
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cr[i / 2];
- *p++ = state->ref_frame.luma[i + 1];
- *p++ = state->ref_frame.cb[i / 2];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cr[k / 2];
+ *p++ = state->ref_frame.luma[k + 1];
+ *p++ = state->ref_frame.cb[k / 2];
+ k += 2;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_UYVY:
- for (i = 0, p = p_out; i < size; i += 2) {
- *p++ = state->ref_frame.cb[i / 2];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cr[i / 2];
- *p++ = state->ref_frame.luma[i + 1];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
+ *p++ = state->ref_frame.cb[k / 2];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cr[k / 2];
+ *p++ = state->ref_frame.luma[k + 1];
+ k += 2;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_VYUY:
- for (i = 0, p = p_out; i < size; i += 2) {
- *p++ = state->ref_frame.cr[i / 2];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cb[i / 2];
- *p++ = state->ref_frame.luma[i + 1];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
+ *p++ = state->ref_frame.cr[k / 2];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cb[k / 2];
+ *p++ = state->ref_frame.luma[k + 1];
+ k += 2;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_RGB24:
case V4L2_PIX_FMT_HSV24:
- for (i = 0, p = p_out; i < size; i++) {
- *p++ = state->ref_frame.cr[i];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cb[i];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width; j++) {
+ *p++ = state->ref_frame.cr[k];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cb[k];
+ k++;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_BGR24:
- for (i = 0, p = p_out; i < size; i++) {
- *p++ = state->ref_frame.cb[i];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cr[i];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width; j++) {
+ *p++ = state->ref_frame.cb[k];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cr[k];
+ k++;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_XRGB32:
case V4L2_PIX_FMT_HSV32:
- for (i = 0, p = p_out; i < size; i++) {
- *p++ = 0;
- *p++ = state->ref_frame.cr[i];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cb[i];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width; j++) {
+ *p++ = 0;
+ *p++ = state->ref_frame.cr[k];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cb[k];
+ k++;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_XBGR32:
- for (i = 0, p = p_out; i < size; i++) {
- *p++ = state->ref_frame.cb[i];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cr[i];
- *p++ = 0;
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width; j++) {
+ *p++ = state->ref_frame.cb[k];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cr[k];
+ *p++ = 0;
+ k++;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_ARGB32:
- for (i = 0, p = p_out; i < size; i++) {
- *p++ = state->ref_frame.alpha[i];
- *p++ = state->ref_frame.cr[i];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cb[i];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width; j++) {
+ *p++ = state->ref_frame.alpha[k];
+ *p++ = state->ref_frame.cr[k];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cb[k];
+ k++;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_ABGR32:
- for (i = 0, p = p_out; i < size; i++) {
- *p++ = state->ref_frame.cb[i];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cr[i];
- *p++ = state->ref_frame.alpha[i];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width; j++) {
+ *p++ = state->ref_frame.cb[k];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cr[k];
+ *p++ = state->ref_frame.alpha[k];
+ k++;
+ }
+ p_out += state->stride;
}
break;
default:
diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.h b/drivers/media/platform/vicodec/codec-v4l2-fwht.h
index ed53e28d4f9c..aa6fa90a48be 100644
--- a/drivers/media/platform/vicodec/codec-v4l2-fwht.h
+++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.h
@@ -19,12 +19,17 @@ struct v4l2_fwht_pixfmt_info {
unsigned int width_div;
unsigned int height_div;
unsigned int components_num;
+ unsigned int planes_num;
+ unsigned int pixenc;
};
struct v4l2_fwht_state {
const struct v4l2_fwht_pixfmt_info *info;
- unsigned int width;
- unsigned int height;
+ unsigned int visible_width;
+ unsigned int visible_height;
+ unsigned int coded_width;
+ unsigned int coded_height;
+ unsigned int stride;
unsigned int gop_size;
unsigned int gop_cnt;
u16 i_frame_qp;
@@ -36,11 +41,17 @@ struct v4l2_fwht_state {
enum v4l2_quantization quantization;
struct fwht_raw_frame ref_frame;
+ struct fwht_cframe_hdr header;
u8 *compressed_frame;
};
const struct v4l2_fwht_pixfmt_info *v4l2_fwht_find_pixfmt(u32 pixelformat);
const struct v4l2_fwht_pixfmt_info *v4l2_fwht_get_pixfmt(u32 idx);
+const struct v4l2_fwht_pixfmt_info *v4l2_fwht_default_fmt(u32 width_div,
+ u32 height_div,
+ u32 components_num,
+ u32 pixenc,
+ unsigned int start_idx);
int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out);
int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out);
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index 0d7876f5acf0..9d739ea5542d 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -61,7 +61,7 @@ struct pixfmt_info {
};
static const struct v4l2_fwht_pixfmt_info pixfmt_fwht = {
- V4L2_PIX_FMT_FWHT, 0, 3, 1, 1, 1, 1, 1, 0
+ V4L2_PIX_FMT_FWHT, 0, 3, 1, 1, 1, 1, 1, 0, 1
};
static void vicodec_dev_release(struct device *dev)
@@ -75,8 +75,10 @@ static struct platform_device vicodec_pdev = {
/* Per-queue, driver-specific private data */
struct vicodec_q_data {
- unsigned int width;
- unsigned int height;
+ unsigned int coded_width;
+ unsigned int coded_height;
+ unsigned int visible_width;
+ unsigned int visible_height;
unsigned int sizeimage;
unsigned int sequence;
const struct v4l2_fwht_pixfmt_info *info;
@@ -122,10 +124,12 @@ struct vicodec_ctx {
u32 cur_buf_offset;
u32 comp_max_size;
u32 comp_size;
+ u32 header_size;
u32 comp_magic_cnt;
- u32 comp_frame_size;
bool comp_has_frame;
bool comp_has_next_frame;
+ bool first_source_change_sent;
+ bool source_changed;
};
static inline struct vicodec_ctx *file2ctx(struct file *file)
@@ -182,6 +186,10 @@ static int device_process(struct vicodec_ctx *ctx,
return ret;
vb2_set_plane_payload(&dst_vb->vb2_buf, 0, ret);
} else {
+ unsigned int comp_frame_size = ntohl(ctx->state.header.size);
+
+ if (comp_frame_size > ctx->comp_max_size)
+ return -EINVAL;
state->info = q_dst->info;
ret = v4l2_fwht_decode(state, p_src, p_dst);
if (ret < 0)
@@ -190,18 +198,8 @@ static int device_process(struct vicodec_ctx *ctx,
}
dst_vb->sequence = q_dst->sequence++;
- dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
-
- if (src_vb->flags & V4L2_BUF_FLAG_TIMECODE)
- dst_vb->timecode = src_vb->timecode;
- dst_vb->field = src_vb->field;
dst_vb->flags &= ~V4L2_BUF_FLAG_LAST;
- dst_vb->flags |= src_vb->flags &
- (V4L2_BUF_FLAG_TIMECODE |
- V4L2_BUF_FLAG_KEYFRAME |
- V4L2_BUF_FLAG_PFRAME |
- V4L2_BUF_FLAG_BFRAME |
- V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
+ v4l2_m2m_buf_copy_metadata(src_vb, dst_vb, !ctx->is_enc);
return 0;
}
@@ -209,6 +207,63 @@ static int device_process(struct vicodec_ctx *ctx,
/*
* mem2mem callbacks
*/
+static enum vb2_buffer_state get_next_header(struct vicodec_ctx *ctx,
+ u8 **pp, u32 sz)
+{
+ static const u8 magic[] = {
+ 0x4f, 0x4f, 0x4f, 0x4f, 0xff, 0xff, 0xff, 0xff
+ };
+ u8 *p = *pp;
+ u32 state;
+ u8 *header = (u8 *)&ctx->state.header;
+
+ state = VB2_BUF_STATE_DONE;
+
+ if (!ctx->header_size) {
+ state = VB2_BUF_STATE_ERROR;
+ for (; p < *pp + sz; p++) {
+ u32 copy;
+
+ p = memchr(p, magic[ctx->comp_magic_cnt],
+ *pp + sz - p);
+ if (!p) {
+ ctx->comp_magic_cnt = 0;
+ p = *pp + sz;
+ break;
+ }
+ copy = sizeof(magic) - ctx->comp_magic_cnt;
+ if (*pp + sz - p < copy)
+ copy = *pp + sz - p;
+
+ memcpy(header + ctx->comp_magic_cnt, p, copy);
+ ctx->comp_magic_cnt += copy;
+ if (!memcmp(header, magic, ctx->comp_magic_cnt)) {
+ p += copy;
+ state = VB2_BUF_STATE_DONE;
+ break;
+ }
+ ctx->comp_magic_cnt = 0;
+ }
+ if (ctx->comp_magic_cnt < sizeof(magic)) {
+ *pp = p;
+ return state;
+ }
+ ctx->header_size = sizeof(magic);
+ }
+
+ if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) {
+ u32 copy = sizeof(struct fwht_cframe_hdr) - ctx->header_size;
+
+ if (*pp + sz - p < copy)
+ copy = *pp + sz - p;
+
+ memcpy(header + ctx->header_size, p, copy);
+ p += copy;
+ ctx->header_size += copy;
+ }
+ *pp = p;
+ return state;
+}
/* device_run() - prepares and starts the device */
static void device_run(void *priv)
@@ -249,6 +304,7 @@ static void device_run(void *priv)
}
v4l2_m2m_buf_done(dst_buf, state);
ctx->comp_size = 0;
+ ctx->header_size = 0;
ctx->comp_magic_cnt = 0;
ctx->comp_has_frame = false;
spin_unlock(ctx->lock);
@@ -273,6 +329,96 @@ static void job_remove_src_buf(struct vicodec_ctx *ctx, u32 state)
spin_unlock(ctx->lock);
}
+static const struct v4l2_fwht_pixfmt_info *
+info_from_header(const struct fwht_cframe_hdr *p_hdr)
+{
+ unsigned int flags = ntohl(p_hdr->flags);
+ unsigned int width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2;
+ unsigned int height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2;
+ unsigned int components_num = 3;
+ unsigned int pixenc = 0;
+ unsigned int version = ntohl(p_hdr->version);
+
+ if (version == FWHT_VERSION) {
+ components_num = 1 + ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >>
+ FWHT_FL_COMPONENTS_NUM_OFFSET);
+ pixenc = (flags & FWHT_FL_PIXENC_MSK);
+ }
+ return v4l2_fwht_default_fmt(width_div, height_div,
+ components_num, pixenc, 0);
+}
+
+static bool is_header_valid(const struct fwht_cframe_hdr *p_hdr)
+{
+ const struct v4l2_fwht_pixfmt_info *info;
+ unsigned int w = ntohl(p_hdr->width);
+ unsigned int h = ntohl(p_hdr->height);
+ unsigned int version = ntohl(p_hdr->version);
+ unsigned int flags = ntohl(p_hdr->flags);
+
+ if (!version || version > FWHT_VERSION)
+ return false;
+
+ if (w < MIN_WIDTH || w > MAX_WIDTH || h < MIN_HEIGHT || h > MAX_HEIGHT)
+ return false;
+
+ if (version == FWHT_VERSION) {
+ unsigned int components_num = 1 +
+ ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >>
+ FWHT_FL_COMPONENTS_NUM_OFFSET);
+ unsigned int pixenc = flags & FWHT_FL_PIXENC_MSK;
+
+ if (components_num == 0 || components_num > 4 || !pixenc)
+ return false;
+ }
+
+ info = info_from_header(p_hdr);
+ if (!info)
+ return false;
+ return true;
+}
+
+static void update_capture_data_from_header(struct vicodec_ctx *ctx)
+{
+ struct vicodec_q_data *q_dst = get_q_data(ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ const struct fwht_cframe_hdr *p_hdr = &ctx->state.header;
+ const struct v4l2_fwht_pixfmt_info *info = info_from_header(p_hdr);
+ unsigned int flags = ntohl(p_hdr->flags);
+ unsigned int hdr_width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2;
+ unsigned int hdr_height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2;
+
+ q_dst->info = info;
+ q_dst->visible_width = ntohl(p_hdr->width);
+ q_dst->visible_height = ntohl(p_hdr->height);
+ q_dst->coded_width = vic_round_dim(q_dst->visible_width, hdr_width_div);
+ q_dst->coded_height = vic_round_dim(q_dst->visible_height,
+ hdr_height_div);
+
+ q_dst->sizeimage = q_dst->coded_width * q_dst->coded_height *
+ q_dst->info->sizeimage_mult / q_dst->info->sizeimage_div;
+ ctx->state.colorspace = ntohl(p_hdr->colorspace);
+
+ ctx->state.xfer_func = ntohl(p_hdr->xfer_func);
+ ctx->state.ycbcr_enc = ntohl(p_hdr->ycbcr_enc);
+ ctx->state.quantization = ntohl(p_hdr->quantization);
+}
+
+static void set_last_buffer(struct vb2_v4l2_buffer *dst_buf,
+ const struct vb2_v4l2_buffer *src_buf,
+ struct vicodec_ctx *ctx)
+{
+ struct vicodec_q_data *q_dst = get_q_data(ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
+ dst_buf->sequence = q_dst->sequence++;
+
+ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, !ctx->is_enc);
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+}
+
static int job_ready(void *priv)
{
static const u8 magic[] = {
@@ -284,7 +430,16 @@ static int job_ready(void *priv)
u8 *p;
u32 sz;
u32 state;
-
+ struct vicodec_q_data *q_dst = get_q_data(ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ unsigned int flags;
+ unsigned int hdr_width_div;
+ unsigned int hdr_height_div;
+ unsigned int max_to_copy;
+ unsigned int comp_frame_size;
+
+ if (ctx->source_changed)
+ return 0;
if (ctx->is_enc || ctx->comp_has_frame)
return 1;
@@ -299,59 +454,27 @@ restart:
state = VB2_BUF_STATE_DONE;
- if (!ctx->comp_size) {
- state = VB2_BUF_STATE_ERROR;
- for (; p < p_src + sz; p++) {
- u32 copy;
-
- p = memchr(p, magic[ctx->comp_magic_cnt],
- p_src + sz - p);
- if (!p) {
- ctx->comp_magic_cnt = 0;
- break;
- }
- copy = sizeof(magic) - ctx->comp_magic_cnt;
- if (p_src + sz - p < copy)
- copy = p_src + sz - p;
-
- memcpy(ctx->state.compressed_frame + ctx->comp_magic_cnt,
- p, copy);
- ctx->comp_magic_cnt += copy;
- if (!memcmp(ctx->state.compressed_frame, magic,
- ctx->comp_magic_cnt)) {
- p += copy;
- state = VB2_BUF_STATE_DONE;
- break;
- }
- ctx->comp_magic_cnt = 0;
- }
- if (ctx->comp_magic_cnt < sizeof(magic)) {
+ if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) {
+ state = get_next_header(ctx, &p, p_src + sz - p);
+ if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) {
job_remove_src_buf(ctx, state);
goto restart;
}
- ctx->comp_size = sizeof(magic);
}
- if (ctx->comp_size < sizeof(struct fwht_cframe_hdr)) {
- struct fwht_cframe_hdr *p_hdr =
- (struct fwht_cframe_hdr *)ctx->state.compressed_frame;
- u32 copy = sizeof(struct fwht_cframe_hdr) - ctx->comp_size;
- if (copy > p_src + sz - p)
- copy = p_src + sz - p;
- memcpy(ctx->state.compressed_frame + ctx->comp_size,
- p, copy);
- p += copy;
- ctx->comp_size += copy;
- if (ctx->comp_size < sizeof(struct fwht_cframe_hdr)) {
- job_remove_src_buf(ctx, state);
- goto restart;
- }
- ctx->comp_frame_size = ntohl(p_hdr->size) + sizeof(*p_hdr);
- if (ctx->comp_frame_size > ctx->comp_max_size)
- ctx->comp_frame_size = ctx->comp_max_size;
- }
- if (ctx->comp_size < ctx->comp_frame_size) {
- u32 copy = ctx->comp_frame_size - ctx->comp_size;
+ comp_frame_size = ntohl(ctx->state.header.size);
+
+ /*
+ * The current scanned frame might be the first frame of a new
+ * resolution so its size might be larger than ctx->comp_max_size.
+ * In that case it is copied up to the current buffer capacity and
+ * the copy will continue after allocating new large enough buffer
+ * when restreaming
+ */
+ max_to_copy = min(comp_frame_size, ctx->comp_max_size);
+
+ if (ctx->comp_size < max_to_copy) {
+ u32 copy = max_to_copy - ctx->comp_size;
if (copy > p_src + sz - p)
copy = p_src + sz - p;
@@ -360,15 +483,17 @@ restart:
p, copy);
p += copy;
ctx->comp_size += copy;
- if (ctx->comp_size < ctx->comp_frame_size) {
+ if (ctx->comp_size < max_to_copy) {
job_remove_src_buf(ctx, state);
goto restart;
}
}
ctx->cur_buf_offset = p - p_src;
- ctx->comp_has_frame = true;
+ if (ctx->comp_size == comp_frame_size)
+ ctx->comp_has_frame = true;
ctx->comp_has_next_frame = false;
- if (sz - ctx->cur_buf_offset >= sizeof(struct fwht_cframe_hdr)) {
+ if (ctx->comp_has_frame && sz - ctx->cur_buf_offset >=
+ sizeof(struct fwht_cframe_hdr)) {
struct fwht_cframe_hdr *p_hdr = (struct fwht_cframe_hdr *)p;
u32 frame_size = ntohl(p_hdr->size);
u32 remaining = sz - ctx->cur_buf_offset - sizeof(*p_hdr);
@@ -376,6 +501,36 @@ restart:
if (!memcmp(p, magic, sizeof(magic)))
ctx->comp_has_next_frame = remaining >= frame_size;
}
+ /*
+ * if the header is invalid the device_run will just drop the frame
+ * with an error
+ */
+ if (!is_header_valid(&ctx->state.header) && ctx->comp_has_frame)
+ return 1;
+ flags = ntohl(ctx->state.header.flags);
+ hdr_width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2;
+ hdr_height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2;
+
+ if (ntohl(ctx->state.header.width) != q_dst->visible_width ||
+ ntohl(ctx->state.header.height) != q_dst->visible_height ||
+ !q_dst->info ||
+ hdr_width_div != q_dst->info->width_div ||
+ hdr_height_div != q_dst->info->height_div) {
+ static const struct v4l2_event rs_event = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ struct vb2_v4l2_buffer *dst_buf =
+ v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ update_capture_data_from_header(ctx);
+ ctx->first_source_change_sent = true;
+ v4l2_event_queue_fh(&ctx->fh, &rs_event);
+ set_last_buffer(dst_buf, src_buf, ctx);
+ ctx->source_changed = true;
+ return 0;
+ }
return 1;
}
@@ -403,9 +558,10 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int enum_fmt(struct v4l2_fmtdesc *f, bool is_enc, bool is_out)
+static int enum_fmt(struct v4l2_fmtdesc *f, struct vicodec_ctx *ctx,
+ bool is_out)
{
- bool is_uncomp = (is_enc && is_out) || (!is_enc && !is_out);
+ bool is_uncomp = (ctx->is_enc && is_out) || (!ctx->is_enc && !is_out);
if (V4L2_TYPE_IS_MULTIPLANAR(f->type) && !multiplanar)
return -EINVAL;
@@ -414,8 +570,16 @@ static int enum_fmt(struct v4l2_fmtdesc *f, bool is_enc, bool is_out)
if (is_uncomp) {
const struct v4l2_fwht_pixfmt_info *info =
- v4l2_fwht_get_pixfmt(f->index);
+ get_q_data(ctx, f->type)->info;
+ if (!info || ctx->is_enc)
+ info = v4l2_fwht_get_pixfmt(f->index);
+ else
+ info = v4l2_fwht_default_fmt(info->width_div,
+ info->height_div,
+ info->components_num,
+ info->pixenc,
+ f->index);
if (!info)
return -EINVAL;
f->pixelformat = info->id;
@@ -432,7 +596,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
{
struct vicodec_ctx *ctx = file2ctx(file);
- return enum_fmt(f, ctx->is_enc, false);
+ return enum_fmt(f, ctx, false);
}
static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
@@ -440,7 +604,7 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
{
struct vicodec_ctx *ctx = file2ctx(file);
- return enum_fmt(f, ctx->is_enc, true);
+ return enum_fmt(f, ctx, true);
}
static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
@@ -458,17 +622,21 @@ static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
q_data = get_q_data(ctx, f->type);
info = q_data->info;
+ if (!info)
+ info = v4l2_fwht_get_pixfmt(0);
+
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (multiplanar)
return -EINVAL;
pix = &f->fmt.pix;
- pix->width = q_data->width;
- pix->height = q_data->height;
+ pix->width = q_data->coded_width;
+ pix->height = q_data->coded_height;
pix->field = V4L2_FIELD_NONE;
pix->pixelformat = info->id;
- pix->bytesperline = q_data->width * info->bytesperline_mult;
+ pix->bytesperline = q_data->coded_width *
+ info->bytesperline_mult;
pix->sizeimage = q_data->sizeimage;
pix->colorspace = ctx->state.colorspace;
pix->xfer_func = ctx->state.xfer_func;
@@ -481,13 +649,13 @@ static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
if (!multiplanar)
return -EINVAL;
pix_mp = &f->fmt.pix_mp;
- pix_mp->width = q_data->width;
- pix_mp->height = q_data->height;
+ pix_mp->width = q_data->coded_width;
+ pix_mp->height = q_data->coded_height;
pix_mp->field = V4L2_FIELD_NONE;
pix_mp->pixelformat = info->id;
pix_mp->num_planes = 1;
pix_mp->plane_fmt[0].bytesperline =
- q_data->width * info->bytesperline_mult;
+ q_data->coded_width * info->bytesperline_mult;
pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage;
pix_mp->colorspace = ctx->state.colorspace;
pix_mp->xfer_func = ctx->state.xfer_func;
@@ -528,8 +696,13 @@ static int vidioc_try_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
pix = &f->fmt.pix;
if (pix->pixelformat != V4L2_PIX_FMT_FWHT)
info = find_fmt(pix->pixelformat);
- pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH) & ~7;
- pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT) & ~7;
+
+ pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH);
+ pix->width = vic_round_dim(pix->width, info->width_div);
+
+ pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT);
+ pix->height = vic_round_dim(pix->height, info->height_div);
+
pix->field = V4L2_FIELD_NONE;
pix->bytesperline =
pix->width * info->bytesperline_mult;
@@ -545,9 +718,14 @@ static int vidioc_try_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
if (pix_mp->pixelformat != V4L2_PIX_FMT_FWHT)
info = find_fmt(pix_mp->pixelformat);
pix_mp->num_planes = 1;
- pix_mp->width = clamp(pix_mp->width, MIN_WIDTH, MAX_WIDTH) & ~7;
- pix_mp->height =
- clamp(pix_mp->height, MIN_HEIGHT, MAX_HEIGHT) & ~7;
+
+ pix_mp->width = clamp(pix_mp->width, MIN_WIDTH, MAX_WIDTH);
+ pix_mp->width = vic_round_dim(pix_mp->width, info->width_div);
+
+ pix_mp->height = clamp(pix_mp->height, MIN_HEIGHT, MAX_HEIGHT);
+ pix_mp->height = vic_round_dim(pix_mp->height,
+ info->height_div);
+
pix_mp->field = V4L2_FIELD_NONE;
plane->bytesperline =
pix_mp->width * info->bytesperline_mult;
@@ -657,9 +835,10 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
pix = &f->fmt.pix;
if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type))
fmt_changed =
+ !q_data->info ||
q_data->info->id != pix->pixelformat ||
- q_data->width != pix->width ||
- q_data->height != pix->height;
+ q_data->coded_width != pix->width ||
+ q_data->coded_height != pix->height;
if (vb2_is_busy(vq) && fmt_changed)
return -EBUSY;
@@ -668,8 +847,8 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
q_data->info = &pixfmt_fwht;
else
q_data->info = find_fmt(pix->pixelformat);
- q_data->width = pix->width;
- q_data->height = pix->height;
+ q_data->coded_width = pix->width;
+ q_data->coded_height = pix->height;
q_data->sizeimage = pix->sizeimage;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
@@ -677,9 +856,10 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
pix_mp = &f->fmt.pix_mp;
if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type))
fmt_changed =
+ !q_data->info ||
q_data->info->id != pix_mp->pixelformat ||
- q_data->width != pix_mp->width ||
- q_data->height != pix_mp->height;
+ q_data->coded_width != pix_mp->width ||
+ q_data->coded_height != pix_mp->height;
if (vb2_is_busy(vq) && fmt_changed)
return -EBUSY;
@@ -688,17 +868,24 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
q_data->info = &pixfmt_fwht;
else
q_data->info = find_fmt(pix_mp->pixelformat);
- q_data->width = pix_mp->width;
- q_data->height = pix_mp->height;
+ q_data->coded_width = pix_mp->width;
+ q_data->coded_height = pix_mp->height;
q_data->sizeimage = pix_mp->plane_fmt[0].sizeimage;
break;
default:
return -EINVAL;
}
+ if (q_data->visible_width > q_data->coded_width)
+ q_data->visible_width = q_data->coded_width;
+ if (q_data->visible_height > q_data->coded_height)
+ q_data->visible_height = q_data->coded_height;
+
dprintk(ctx->dev,
- "Setting format for type %d, wxh: %dx%d, fourcc: %08x\n",
- f->type, q_data->width, q_data->height, q_data->info->id);
+ "Setting format for type %d, coded wxh: %dx%d, visible wxh: %dx%d, fourcc: %08x\n",
+ f->type, q_data->coded_width, q_data->coded_height,
+ q_data->visible_width, q_data->visible_height,
+ q_data->info->id);
return 0;
}
@@ -753,6 +940,84 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
return ret;
}
+static int vidioc_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ struct vicodec_q_data *q_data;
+ enum v4l2_buf_type valid_cap_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ enum v4l2_buf_type valid_out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ if (multiplanar) {
+ valid_cap_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ valid_out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ }
+
+ if (s->type != valid_cap_type && s->type != valid_out_type)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+ /*
+ * encoder supports only cropping on the OUTPUT buffer
+ * decoder supports only composing on the CAPTURE buffer
+ */
+ if ((ctx->is_enc && s->type == valid_out_type) ||
+ (!ctx->is_enc && s->type == valid_cap_type)) {
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->visible_width;
+ s->r.height = q_data->visible_height;
+ return 0;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->coded_width;
+ s->r.height = q_data->coded_height;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int vidioc_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ struct vicodec_q_data *q_data;
+ enum v4l2_buf_type out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ if (multiplanar)
+ out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+
+ if (s->type != out_type)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (!ctx->is_enc || s->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ s->r.left = 0;
+ s->r.top = 0;
+ q_data->visible_width = clamp(s->r.width, MIN_WIDTH,
+ q_data->coded_width);
+ s->r.width = q_data->visible_width;
+ q_data->visible_height = clamp(s->r.height, MIN_HEIGHT,
+ q_data->coded_height);
+ s->r.height = q_data->visible_height;
+ return 0;
+}
+
static void vicodec_mark_last_buf(struct vicodec_ctx *ctx)
{
static const struct v4l2_event eos_event = {
@@ -853,7 +1118,13 @@ static int vicodec_enum_framesizes(struct file *file, void *fh,
static int vicodec_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
+ struct vicodec_ctx *ctx = container_of(fh, struct vicodec_ctx, fh);
+
switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ if (ctx->is_enc)
+ return -EINVAL;
+ /* fall through */
case V4L2_EVENT_EOS:
return v4l2_event_subscribe(fh, sub, 0, NULL);
default:
@@ -895,6 +1166,9 @@ static const struct v4l2_ioctl_ops vicodec_ioctl_ops = {
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_g_selection = vidioc_g_selection,
+ .vidioc_s_selection = vidioc_s_selection,
+
.vidioc_try_encoder_cmd = vicodec_try_encoder_cmd,
.vidioc_encoder_cmd = vicodec_encoder_cmd,
.vidioc_try_decoder_cmd = vicodec_try_decoder_cmd,
@@ -960,7 +1234,71 @@ static void vicodec_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vicodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int sz = vb2_get_plane_payload(&vbuf->vb2_buf, 0);
+ u8 *p_src = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ u8 *p = p_src;
+ struct vb2_queue *vq_out = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ struct vb2_queue *vq_cap = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ bool header_valid = false;
+ static const struct v4l2_event rs_event = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+ /* buf_queue handles only the first source change event */
+ if (ctx->first_source_change_sent) {
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ return;
+ }
+
+ /*
+ * if both queues are streaming, the source change event is
+ * handled in job_ready
+ */
+ if (vb2_is_streaming(vq_cap) && vb2_is_streaming(vq_out)) {
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ return;
+ }
+
+ /*
+ * source change event is relevant only for the decoder
+ * in the compressed stream
+ */
+ if (ctx->is_enc || !V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ return;
+ }
+
+ do {
+ enum vb2_buffer_state state =
+ get_next_header(ctx, &p, p_src + sz - p);
+
+ if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) {
+ v4l2_m2m_buf_done(vbuf, state);
+ return;
+ }
+ header_valid = is_header_valid(&ctx->state.header);
+ /*
+ * p points right after the end of the header in the
+ * buffer. If the header is invalid we set p to point
+ * to the next byte after the start of the header
+ */
+ if (!header_valid) {
+ p = p - sizeof(struct fwht_cframe_hdr) + 1;
+ if (p < p_src)
+ p = p_src;
+ ctx->header_size = 0;
+ ctx->comp_magic_cnt = 0;
+ }
+
+ } while (!header_valid);
+
+ ctx->cur_buf_offset = p - p_src;
+ update_capture_data_from_header(ctx);
+ ctx->first_source_change_sent = true;
+ v4l2_event_queue_fh(&ctx->fh, &rs_event);
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
@@ -988,47 +1326,70 @@ static int vicodec_start_streaming(struct vb2_queue *q,
struct vicodec_ctx *ctx = vb2_get_drv_priv(q);
struct vicodec_q_data *q_data = get_q_data(ctx, q->type);
struct v4l2_fwht_state *state = &ctx->state;
- unsigned int size = q_data->width * q_data->height;
const struct v4l2_fwht_pixfmt_info *info = q_data->info;
- unsigned int chroma_div = info->width_div * info->height_div;
+ unsigned int size = q_data->coded_width * q_data->coded_height;
+ unsigned int chroma_div;
unsigned int total_planes_size;
+ u8 *new_comp_frame;
- /*
- * we don't know ahead how many components are in the encoding type
- * V4L2_PIX_FMT_FWHT, so we will allocate space for 4 planes.
- */
- if (info->id == V4L2_PIX_FMT_FWHT || info->components_num == 4)
+ if (!info)
+ return -EINVAL;
+
+ chroma_div = info->width_div * info->height_div;
+ q_data->sequence = 0;
+
+ ctx->last_src_buf = NULL;
+ ctx->last_dst_buf = NULL;
+ state->gop_cnt = 0;
+
+ if ((V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) ||
+ (!V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc))
+ return 0;
+
+ if (info->id == V4L2_PIX_FMT_FWHT) {
+ vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED);
+ return -EINVAL;
+ }
+ if (info->components_num == 4)
total_planes_size = 2 * size + 2 * (size / chroma_div);
else if (info->components_num == 3)
total_planes_size = size + 2 * (size / chroma_div);
else
total_planes_size = size;
- q_data->sequence = 0;
+ state->visible_width = q_data->visible_width;
+ state->visible_height = q_data->visible_height;
+ state->coded_width = q_data->coded_width;
+ state->coded_height = q_data->coded_height;
+ state->stride = q_data->coded_width *
+ info->bytesperline_mult;
- if (!V4L2_TYPE_IS_OUTPUT(q->type)) {
- if (!ctx->is_enc) {
- state->width = q_data->width;
- state->height = q_data->height;
- }
- return 0;
- }
-
- if (ctx->is_enc) {
- state->width = q_data->width;
- state->height = q_data->height;
- }
- state->ref_frame.width = state->ref_frame.height = 0;
state->ref_frame.luma = kvmalloc(total_planes_size, GFP_KERNEL);
- ctx->comp_max_size = total_planes_size + sizeof(struct fwht_cframe_hdr);
- state->compressed_frame = kvmalloc(ctx->comp_max_size, GFP_KERNEL);
- if (!state->ref_frame.luma || !state->compressed_frame) {
+ ctx->comp_max_size = total_planes_size;
+ new_comp_frame = kvmalloc(ctx->comp_max_size, GFP_KERNEL);
+
+ if (!state->ref_frame.luma || !new_comp_frame) {
kvfree(state->ref_frame.luma);
- kvfree(state->compressed_frame);
+ kvfree(new_comp_frame);
vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED);
return -ENOMEM;
}
- if (info->id == V4L2_PIX_FMT_FWHT || info->components_num >= 3) {
+ /*
+ * if state->compressed_frame was already allocated then
+ * it contain data of the first frame of the new resolution
+ */
+ if (state->compressed_frame) {
+ if (ctx->comp_size > ctx->comp_max_size)
+ ctx->comp_size = ctx->comp_max_size;
+
+ memcpy(new_comp_frame,
+ state->compressed_frame, ctx->comp_size);
+ }
+
+ kvfree(state->compressed_frame);
+ state->compressed_frame = new_comp_frame;
+
+ if (info->components_num >= 3) {
state->ref_frame.cb = state->ref_frame.luma + size;
state->ref_frame.cr = state->ref_frame.cb + size / chroma_div;
} else {
@@ -1036,20 +1397,11 @@ static int vicodec_start_streaming(struct vb2_queue *q,
state->ref_frame.cr = NULL;
}
- if (info->id == V4L2_PIX_FMT_FWHT || info->components_num == 4)
+ if (info->components_num == 4)
state->ref_frame.alpha =
state->ref_frame.cr + size / chroma_div;
else
state->ref_frame.alpha = NULL;
-
- ctx->last_src_buf = NULL;
- ctx->last_dst_buf = NULL;
- state->gop_cnt = 0;
- ctx->cur_buf_offset = 0;
- ctx->comp_size = 0;
- ctx->comp_magic_cnt = 0;
- ctx->comp_has_frame = false;
-
return 0;
}
@@ -1059,11 +1411,20 @@ static void vicodec_stop_streaming(struct vb2_queue *q)
vicodec_return_bufs(q, VB2_BUF_STATE_ERROR);
- if (!V4L2_TYPE_IS_OUTPUT(q->type))
- return;
-
- kvfree(ctx->state.ref_frame.luma);
- kvfree(ctx->state.compressed_frame);
+ if ((!V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) ||
+ (V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc)) {
+ kvfree(ctx->state.ref_frame.luma);
+ ctx->comp_max_size = 0;
+ ctx->source_changed = false;
+ }
+ if (V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) {
+ ctx->cur_buf_offset = 0;
+ ctx->comp_size = 0;
+ ctx->header_size = 0;
+ ctx->comp_magic_cnt = 0;
+ ctx->comp_has_frame = 0;
+ ctx->comp_has_next_frame = 0;
+ }
}
static const struct vb2_ops vicodec_qops = {
@@ -1204,8 +1565,10 @@ static int vicodec_open(struct file *file)
ctx->q_data[V4L2_M2M_SRC].info =
ctx->is_enc ? v4l2_fwht_get_pixfmt(0) : &pixfmt_fwht;
- ctx->q_data[V4L2_M2M_SRC].width = 1280;
- ctx->q_data[V4L2_M2M_SRC].height = 720;
+ ctx->q_data[V4L2_M2M_SRC].coded_width = 1280;
+ ctx->q_data[V4L2_M2M_SRC].coded_height = 720;
+ ctx->q_data[V4L2_M2M_SRC].visible_width = 1280;
+ ctx->q_data[V4L2_M2M_SRC].visible_height = 720;
size = 1280 * 720 * ctx->q_data[V4L2_M2M_SRC].info->sizeimage_mult /
ctx->q_data[V4L2_M2M_SRC].info->sizeimage_div;
if (ctx->is_enc)
@@ -1213,16 +1576,17 @@ static int vicodec_open(struct file *file)
else
ctx->q_data[V4L2_M2M_SRC].sizeimage =
size + sizeof(struct fwht_cframe_hdr);
- ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
- ctx->q_data[V4L2_M2M_DST].info =
- ctx->is_enc ? &pixfmt_fwht : v4l2_fwht_get_pixfmt(0);
- size = 1280 * 720 * ctx->q_data[V4L2_M2M_DST].info->sizeimage_mult /
- ctx->q_data[V4L2_M2M_DST].info->sizeimage_div;
- if (ctx->is_enc)
- ctx->q_data[V4L2_M2M_DST].sizeimage =
- size + sizeof(struct fwht_cframe_hdr);
- else
- ctx->q_data[V4L2_M2M_DST].sizeimage = size;
+ if (ctx->is_enc) {
+ ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
+ ctx->q_data[V4L2_M2M_DST].info = &pixfmt_fwht;
+ ctx->q_data[V4L2_M2M_DST].sizeimage = 1280 * 720 *
+ ctx->q_data[V4L2_M2M_DST].info->sizeimage_mult /
+ ctx->q_data[V4L2_M2M_DST].info->sizeimage_div +
+ sizeof(struct fwht_cframe_hdr);
+ } else {
+ ctx->q_data[V4L2_M2M_DST].info = NULL;
+ }
+
ctx->state.colorspace = V4L2_COLORSPACE_REC709;
if (ctx->is_enc) {
@@ -1310,6 +1674,8 @@ static int vicodec_probe(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
dev->mdev.dev = &pdev->dev;
strscpy(dev->mdev.model, "vicodec", sizeof(dev->mdev.model));
+ strscpy(dev->mdev.bus_info, "platform:vicodec",
+ sizeof(dev->mdev.bus_info));
media_device_init(&dev->mdev);
dev->v4l2_dev.mdev = &dev->mdev;
#endif
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index 89d9c4c21037..04250adf58e0 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -41,6 +41,11 @@ static unsigned debug;
module_param(debug, uint, 0644);
MODULE_PARM_DESC(debug, "activates debug info");
+/* Default transaction time in msec */
+static unsigned int default_transtime = 40; /* Max 25 fps */
+module_param(default_transtime, uint, 0644);
+MODULE_PARM_DESC(default_transtime, "default transaction time in ms");
+
#define MIN_W 32
#define MIN_H 32
#define MAX_W 640
@@ -58,11 +63,6 @@ MODULE_PARM_DESC(debug, "activates debug info");
/* In bytes, per queue */
#define MEM2MEM_VID_MEM_LIMIT (16 * 1024 * 1024)
-/* Default transaction time in msec */
-#define MEM2MEM_DEF_TRANSTIME 40
-#define MEM2MEM_COLOR_STEP (0xff >> 4)
-#define MEM2MEM_NUM_TILES 8
-
/* Flags that indicate processing mode */
#define MEM2MEM_HFLIP (1 << 0)
#define MEM2MEM_VFLIP (1 << 1)
@@ -82,22 +82,24 @@ static struct platform_device vim2m_pdev = {
struct vim2m_fmt {
u32 fourcc;
int depth;
- /* Types the format can be used for */
- u32 types;
};
static struct vim2m_fmt formats[] = {
{
- .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+ .fourcc = V4L2_PIX_FMT_RGB565, /* rrrrrggg gggbbbbb */
.depth = 16,
- /* Both capture and output format */
- .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
- },
- {
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565X, /* gggbbbbb rrrrrggg */
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .depth = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .depth = 24,
+ }, {
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
- /* Output-only format */
- .types = MEM2MEM_OUTPUT,
},
};
@@ -146,9 +148,6 @@ struct vim2m_dev {
atomic_t num_inst;
struct mutex dev_mutex;
- spinlock_t irqlock;
-
- struct delayed_work work_run;
struct v4l2_m2m_dev *m2m_dev;
};
@@ -167,6 +166,10 @@ struct vim2m_ctx {
/* Transaction time (i.e. simulated processing time) in milliseconds */
u32 transtime;
+ struct mutex vb_mutex;
+ struct delayed_work work_run;
+ spinlock_t irqlock;
+
/* Abort requested by m2m */
int aborting;
@@ -201,136 +204,253 @@ static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx,
return NULL;
}
+#define CLIP(__color) \
+ (u8)(((__color) > 0xff) ? 0xff : (((__color) < 0) ? 0 : (__color)))
-static int device_process(struct vim2m_ctx *ctx,
- struct vb2_v4l2_buffer *in_vb,
- struct vb2_v4l2_buffer *out_vb)
+static void copy_two_pixels(struct vim2m_fmt *in, struct vim2m_fmt *out,
+ u8 **src, u8 **dst, bool reverse)
{
- struct vim2m_dev *dev = ctx->dev;
- struct vim2m_q_data *q_data;
- u8 *p_in, *p_out;
- int x, y, t, w;
- int tile_w, bytes_left;
- int width, height, bytesperline;
+ u8 _r[2], _g[2], _b[2], *r, *g, *b;
+ int i, step;
- q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ // If format is the same just copy the data, respecting the width
+ if (in->fourcc == out->fourcc) {
+ int depth = out->depth >> 3;
- width = q_data->width;
- height = q_data->height;
- bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
+ if (reverse) {
+ if (in->fourcc == V4L2_PIX_FMT_YUYV) {
+ int u, v, y, y1;
- p_in = vb2_plane_vaddr(&in_vb->vb2_buf, 0);
- p_out = vb2_plane_vaddr(&out_vb->vb2_buf, 0);
- if (!p_in || !p_out) {
- v4l2_err(&dev->v4l2_dev,
- "Acquiring kernel pointers to buffers failed\n");
- return -EFAULT;
- }
+ *src -= 2;
- if (vb2_plane_size(&in_vb->vb2_buf, 0) >
- vb2_plane_size(&out_vb->vb2_buf, 0)) {
- v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
- return -EINVAL;
- }
+ y1 = (*src)[0]; /* copy as second point */
+ u = (*src)[1];
+ y = (*src)[2]; /* copy as first point */
+ v = (*src)[3];
+
+ *src -= 2;
- tile_w = (width * (q_data[V4L2_M2M_DST].fmt->depth >> 3))
- / MEM2MEM_NUM_TILES;
- bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES;
- w = 0;
-
- out_vb->sequence =
- get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++;
- in_vb->sequence = q_data->sequence++;
- out_vb->vb2_buf.timestamp = in_vb->vb2_buf.timestamp;
-
- if (in_vb->flags & V4L2_BUF_FLAG_TIMECODE)
- out_vb->timecode = in_vb->timecode;
- out_vb->field = in_vb->field;
- out_vb->flags = in_vb->flags &
- (V4L2_BUF_FLAG_TIMECODE |
- V4L2_BUF_FLAG_KEYFRAME |
- V4L2_BUF_FLAG_PFRAME |
- V4L2_BUF_FLAG_BFRAME |
- V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
-
- switch (ctx->mode) {
- case MEM2MEM_HFLIP | MEM2MEM_VFLIP:
- p_out += bytesperline * height - bytes_left;
- for (y = 0; y < height; ++y) {
- for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
- if (w & 0x1) {
- for (x = 0; x < tile_w; ++x)
- *--p_out = *p_in++ +
- MEM2MEM_COLOR_STEP;
- } else {
- for (x = 0; x < tile_w; ++x)
- *--p_out = *p_in++ -
- MEM2MEM_COLOR_STEP;
- }
- ++w;
+ *(*dst)++ = y;
+ *(*dst)++ = u;
+ *(*dst)++ = y1;
+ *(*dst)++ = v;
+ return;
}
- p_in += bytes_left;
- p_out -= bytes_left;
+
+ memcpy(*dst, *src, depth);
+ memcpy(*dst + depth, *src - depth, depth);
+ *src -= depth << 1;
+ } else {
+ memcpy(*dst, *src, depth << 1);
+ *src += depth << 1;
+ }
+ *dst += depth << 1;
+ return;
+ }
+
+ /* Step 1: read two consecutive pixels from src pointer */
+
+ r = _r;
+ g = _g;
+ b = _b;
+
+ if (reverse)
+ step = -1;
+ else
+ step = 1;
+
+ switch (in->fourcc) {
+ case V4L2_PIX_FMT_RGB565: /* rrrrrggg gggbbbbb */
+ for (i = 0; i < 2; i++) {
+ u16 pix = *(u16 *)*src;
+
+ *r++ = (u8)(((pix & 0xf800) >> 11) << 3) | 0x07;
+ *g++ = (u8)((((pix & 0x07e0) >> 5)) << 2) | 0x03;
+ *b++ = (u8)((pix & 0x1f) << 3) | 0x07;
+
+ *src += step << 1;
}
break;
+ case V4L2_PIX_FMT_RGB565X: /* gggbbbbb rrrrrggg */
+ for (i = 0; i < 2; i++) {
+ u16 pix = *(u16 *)*src;
- case MEM2MEM_HFLIP:
- for (y = 0; y < height; ++y) {
- p_out += MEM2MEM_NUM_TILES * tile_w;
- for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
- if (w & 0x01) {
- for (x = 0; x < tile_w; ++x)
- *--p_out = *p_in++ +
- MEM2MEM_COLOR_STEP;
- } else {
- for (x = 0; x < tile_w; ++x)
- *--p_out = *p_in++ -
- MEM2MEM_COLOR_STEP;
- }
- ++w;
- }
- p_in += bytes_left;
- p_out += bytesperline;
+ *r++ = (u8)(((0x00f8 & pix) >> 3) << 3) | 0x07;
+ *g++ = (u8)(((pix & 0x7) << 2) |
+ ((pix & 0xe000) >> 5)) | 0x03;
+ *b++ = (u8)(((pix & 0x1f00) >> 8) << 3) | 0x07;
+
+ *src += step << 1;
}
break;
+ case V4L2_PIX_FMT_RGB24:
+ for (i = 0; i < 2; i++) {
+ *r++ = (*src)[0];
+ *g++ = (*src)[1];
+ *b++ = (*src)[2];
- case MEM2MEM_VFLIP:
- p_out += bytesperline * (height - 1);
- for (y = 0; y < height; ++y) {
- for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
- if (w & 0x1) {
- for (x = 0; x < tile_w; ++x)
- *p_out++ = *p_in++ +
- MEM2MEM_COLOR_STEP;
- } else {
- for (x = 0; x < tile_w; ++x)
- *p_out++ = *p_in++ -
- MEM2MEM_COLOR_STEP;
- }
- ++w;
- }
- p_in += bytes_left;
- p_out += bytes_left - 2 * bytesperline;
+ *src += step * 3;
}
break;
+ case V4L2_PIX_FMT_BGR24:
+ for (i = 0; i < 2; i++) {
+ *b++ = (*src)[0];
+ *g++ = (*src)[1];
+ *r++ = (*src)[2];
- default:
- for (y = 0; y < height; ++y) {
- for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
- if (w & 0x1) {
- for (x = 0; x < tile_w; ++x)
- *p_out++ = *p_in++ +
- MEM2MEM_COLOR_STEP;
- } else {
- for (x = 0; x < tile_w; ++x)
- *p_out++ = *p_in++ -
- MEM2MEM_COLOR_STEP;
- }
- ++w;
- }
- p_in += bytes_left;
- p_out += bytes_left;
+ *src += step * 3;
+ }
+ break;
+ default: /* V4L2_PIX_FMT_YUYV */
+ {
+ int u, v, y, y1, u1, v1, tmp;
+
+ if (reverse) {
+ *src -= 2;
+
+ y1 = (*src)[0]; /* copy as second point */
+ u = (*src)[1];
+ y = (*src)[2]; /* copy as first point */
+ v = (*src)[3];
+
+ *src -= 2;
+ } else {
+ y = *(*src)++;
+ u = *(*src)++;
+ y1 = *(*src)++;
+ v = *(*src)++;
}
+
+ u1 = (((u - 128) << 7) + (u - 128)) >> 6;
+ tmp = (((u - 128) << 1) + (u - 128) +
+ ((v - 128) << 2) + ((v - 128) << 1)) >> 3;
+ v1 = (((v - 128) << 1) + (v - 128)) >> 1;
+
+ *r++ = CLIP(y + v1);
+ *g++ = CLIP(y - tmp);
+ *b++ = CLIP(y + u1);
+
+ *r = CLIP(y1 + v1);
+ *g = CLIP(y1 - tmp);
+ *b = CLIP(y1 + u1);
+ break;
+ }
+ }
+
+ /* Step 2: store two consecutive points, reversing them if needed */
+
+ r = _r;
+ g = _g;
+ b = _b;
+
+ switch (out->fourcc) {
+ case V4L2_PIX_FMT_RGB565: /* rrrrrggg gggbbbbb */
+ for (i = 0; i < 2; i++) {
+ u16 *pix = (u16 *)*dst;
+
+ *pix = ((*r << 8) & 0xf800) | ((*g << 3) & 0x07e0) |
+ (*b >> 3);
+
+ *dst += 2;
+ }
+ return;
+ case V4L2_PIX_FMT_RGB565X: /* gggbbbbb rrrrrggg */
+ for (i = 0; i < 2; i++) {
+ u16 *pix = (u16 *)*dst;
+ u8 green = *g++ >> 2;
+
+ *pix = ((green << 8) & 0xe000) | (green & 0x07) |
+ ((*b++ << 5) & 0x1f00) | ((*r++ & 0xf8));
+
+ *dst += 2;
+ }
+ return;
+ case V4L2_PIX_FMT_RGB24:
+ for (i = 0; i < 2; i++) {
+ *(*dst)++ = *r++;
+ *(*dst)++ = *g++;
+ *(*dst)++ = *b++;
+ }
+ return;
+ case V4L2_PIX_FMT_BGR24:
+ for (i = 0; i < 2; i++) {
+ *(*dst)++ = *b++;
+ *(*dst)++ = *g++;
+ *(*dst)++ = *r++;
+ }
+ return;
+ default: /* V4L2_PIX_FMT_YUYV */
+ {
+ u8 y, y1, u, v;
+
+ y = ((8453 * (*r) + 16594 * (*g) + 3223 * (*b)
+ + 524288) >> 15);
+ u = ((-4878 * (*r) - 9578 * (*g) + 14456 * (*b)
+ + 4210688) >> 15);
+ v = ((14456 * (*r++) - 12105 * (*g++) - 2351 * (*b++)
+ + 4210688) >> 15);
+ y1 = ((8453 * (*r) + 16594 * (*g) + 3223 * (*b)
+ + 524288) >> 15);
+
+ *(*dst)++ = y;
+ *(*dst)++ = u;
+
+ *(*dst)++ = y1;
+ *(*dst)++ = v;
+ return;
+ }
+ }
+}
+
+static int device_process(struct vim2m_ctx *ctx,
+ struct vb2_v4l2_buffer *in_vb,
+ struct vb2_v4l2_buffer *out_vb)
+{
+ struct vim2m_dev *dev = ctx->dev;
+ struct vim2m_q_data *q_data_in, *q_data_out;
+ u8 *p_in, *p, *p_out;
+ int width, height, bytesperline, x, y, start, end, step;
+ struct vim2m_fmt *in, *out;
+
+ q_data_in = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ in = q_data_in->fmt;
+ width = q_data_in->width;
+ height = q_data_in->height;
+ bytesperline = (q_data_in->width * q_data_in->fmt->depth) >> 3;
+
+ q_data_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ out = q_data_out->fmt;
+
+ p_in = vb2_plane_vaddr(&in_vb->vb2_buf, 0);
+ p_out = vb2_plane_vaddr(&out_vb->vb2_buf, 0);
+ if (!p_in || !p_out) {
+ v4l2_err(&dev->v4l2_dev,
+ "Acquiring kernel pointers to buffers failed\n");
+ return -EFAULT;
+ }
+
+ out_vb->sequence = get_q_data(ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++;
+ in_vb->sequence = q_data_in->sequence++;
+ v4l2_m2m_buf_copy_metadata(in_vb, out_vb, true);
+
+ if (ctx->mode & MEM2MEM_VFLIP) {
+ start = height - 1;
+ end = -1;
+ step = -1;
+ } else {
+ start = 0;
+ end = height;
+ step = 1;
+ }
+ for (y = start; y != end; y += step) {
+ p = p_in + (y * bytesperline);
+ if (ctx->mode & MEM2MEM_HFLIP)
+ p += bytesperline - (q_data_in->fmt->depth >> 3);
+
+ for (x = 0; x < width >> 1; x++)
+ copy_two_pixels(in, out, &p, &p_out,
+ ctx->mode & MEM2MEM_HFLIP);
}
return 0;
@@ -373,7 +493,6 @@ static void job_abort(void *priv)
static void device_run(void *priv)
{
struct vim2m_ctx *ctx = priv;
- struct vim2m_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
@@ -390,33 +509,34 @@ static void device_run(void *priv)
&ctx->hdl);
/* Run delayed work, which simulates a hardware irq */
- schedule_delayed_work(&dev->work_run, msecs_to_jiffies(ctx->transtime));
+ schedule_delayed_work(&ctx->work_run, msecs_to_jiffies(ctx->transtime));
}
static void device_work(struct work_struct *w)
{
- struct vim2m_dev *vim2m_dev =
- container_of(w, struct vim2m_dev, work_run.work);
struct vim2m_ctx *curr_ctx;
+ struct vim2m_dev *vim2m_dev;
struct vb2_v4l2_buffer *src_vb, *dst_vb;
unsigned long flags;
- curr_ctx = v4l2_m2m_get_curr_priv(vim2m_dev->m2m_dev);
+ curr_ctx = container_of(w, struct vim2m_ctx, work_run.work);
if (NULL == curr_ctx) {
pr_err("Instance released before the end of transaction\n");
return;
}
+ vim2m_dev = curr_ctx->dev;
+
src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
curr_ctx->num_processed++;
- spin_lock_irqsave(&vim2m_dev->irqlock, flags);
+ spin_lock_irqsave(&curr_ctx->irqlock, flags);
v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
- spin_unlock_irqrestore(&vim2m_dev->irqlock, flags);
+ spin_unlock_irqrestore(&curr_ctx->irqlock, flags);
if (curr_ctx->num_processed == curr_ctx->translen
|| curr_ctx->aborting) {
@@ -443,25 +563,11 @@ static int vidioc_querycap(struct file *file, void *priv,
static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
{
- int i, num;
struct vim2m_fmt *fmt;
- num = 0;
-
- for (i = 0; i < NUM_FORMATS; ++i) {
- if (formats[i].types & type) {
- /* index-th format of type type found ? */
- if (num == f->index)
- break;
- /* Correct type but haven't reached our index yet,
- * just increment per-type index */
- ++num;
- }
- }
-
- if (i < NUM_FORMATS) {
+ if (f->index < NUM_FORMATS) {
/* Format found */
- fmt = &formats[i];
+ fmt = &formats[f->index];
f->pixelformat = fmt->fourcc;
return 0;
}
@@ -552,12 +658,6 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.pixelformat = formats[0].fourcc;
fmt = find_format(f);
}
- if (!(fmt->types & MEM2MEM_CAPTURE)) {
- v4l2_err(&ctx->dev->v4l2_dev,
- "Fourcc format (0x%08x) invalid.\n",
- f->fmt.pix.pixelformat);
- return -EINVAL;
- }
f->fmt.pix.colorspace = ctx->colorspace;
f->fmt.pix.xfer_func = ctx->xfer_func;
f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
@@ -570,19 +670,12 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct vim2m_fmt *fmt;
- struct vim2m_ctx *ctx = file2ctx(file);
fmt = find_format(f);
if (!fmt) {
f->fmt.pix.pixelformat = formats[0].fourcc;
fmt = find_format(f);
}
- if (!(fmt->types & MEM2MEM_OUTPUT)) {
- v4l2_err(&ctx->dev->v4l2_dev,
- "Fourcc format (0x%08x) invalid.\n",
- f->fmt.pix.pixelformat);
- return -EINVAL;
- }
if (!f->fmt.pix.colorspace)
f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
@@ -674,6 +767,8 @@ static int vim2m_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_TRANS_TIME_MSEC:
ctx->transtime = ctrl->val;
+ if (ctx->transtime < 1)
+ ctx->transtime = 1;
break;
case V4L2_CID_TRANS_NUM_BUFS:
@@ -753,25 +848,29 @@ static int vim2m_queue_setup(struct vb2_queue *vq,
return 0;
}
-static int vim2m_buf_prepare(struct vb2_buffer *vb)
+static int vim2m_buf_out_validate(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dprintk(ctx->dev, "%s field isn't supported\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vim2m_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vim2m_q_data *q_data;
dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
q_data = get_q_data(ctx, vb->vb2_queue->type);
- if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
- if (vbuf->field == V4L2_FIELD_ANY)
- vbuf->field = V4L2_FIELD_NONE;
- if (vbuf->field != V4L2_FIELD_NONE) {
- dprintk(ctx->dev, "%s field isn't supported\n",
- __func__);
- return -EINVAL;
- }
- }
-
if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
__func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage);
@@ -803,7 +902,6 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
static void vim2m_stop_streaming(struct vb2_queue *q)
{
struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
- struct vim2m_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *vbuf;
unsigned long flags;
@@ -819,9 +917,9 @@ static void vim2m_stop_streaming(struct vb2_queue *q)
return;
v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
&ctx->hdl);
- spin_lock_irqsave(&ctx->dev->irqlock, flags);
+ spin_lock_irqsave(&ctx->irqlock, flags);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
- spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+ spin_unlock_irqrestore(&ctx->irqlock, flags);
}
}
@@ -834,6 +932,7 @@ static void vim2m_buf_request_complete(struct vb2_buffer *vb)
static const struct vb2_ops vim2m_qops = {
.queue_setup = vim2m_queue_setup,
+ .buf_out_validate = vim2m_buf_out_validate,
.buf_prepare = vim2m_buf_prepare,
.buf_queue = vim2m_buf_queue,
.start_streaming = vim2m_start_streaming,
@@ -855,7 +954,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
src_vq->ops = &vim2m_qops;
src_vq->mem_ops = &vb2_vmalloc_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->lock = &ctx->vb_mutex;
src_vq->supports_requests = true;
ret = vb2_queue_init(src_vq);
@@ -869,17 +968,16 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
dst_vq->ops = &vim2m_qops;
dst_vq->mem_ops = &vb2_vmalloc_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->lock = &ctx->vb_mutex;
return vb2_queue_init(dst_vq);
}
-static const struct v4l2_ctrl_config vim2m_ctrl_trans_time_msec = {
+static struct v4l2_ctrl_config vim2m_ctrl_trans_time_msec = {
.ops = &vim2m_ctrl_ops,
.id = V4L2_CID_TRANS_TIME_MSEC,
.name = "Transaction Time (msec)",
.type = V4L2_CTRL_TYPE_INTEGER,
- .def = MEM2MEM_DEF_TRANSTIME,
.min = 1,
.max = 10001,
.step = 1,
@@ -921,6 +1019,8 @@ static int vim2m_open(struct file *file)
v4l2_ctrl_handler_init(hdl, 4);
v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ vim2m_ctrl_trans_time_msec.def = default_transtime;
v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_time_msec, NULL);
v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_num_bufs, NULL);
if (hdl->error) {
@@ -944,6 +1044,10 @@ static int vim2m_open(struct file *file)
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+ mutex_init(&ctx->vb_mutex);
+ spin_lock_init(&ctx->irqlock);
+ INIT_DELAYED_WORK(&ctx->work_run, device_work);
+
if (IS_ERR(ctx->fh.m2m_ctx)) {
rc = PTR_ERR(ctx->fh.m2m_ctx);
@@ -1024,8 +1128,6 @@ static int vim2m_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- spin_lock_init(&dev->irqlock);
-
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
return ret;
@@ -1037,7 +1139,6 @@ static int vim2m_probe(struct platform_device *pdev)
vfd = &dev->vfd;
vfd->lock = &dev->dev_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
- INIT_DELAYED_WORK(&dev->work_run, device_work);
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
@@ -1061,6 +1162,8 @@ static int vim2m_probe(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
dev->mdev.dev = &pdev->dev;
strscpy(dev->mdev.model, "vim2m", sizeof(dev->mdev.model));
+ strscpy(dev->mdev.bus_info, "platform:vim2m",
+ sizeof(dev->mdev.bus_info));
media_device_init(&dev->mdev);
dev->mdev.ops = &m2m_media_ops;
dev->v4l2_dev.mdev = &dev->mdev;
diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile
index 4b2e3de7856e..c4fc8e7d365a 100644
--- a/drivers/media/platform/vimc/Makefile
+++ b/drivers/media/platform/vimc/Makefile
@@ -5,6 +5,7 @@ vimc_common-objs := vimc-common.o
vimc_debayer-objs := vimc-debayer.o
vimc_scaler-objs := vimc-scaler.o
vimc_sensor-objs := vimc-sensor.o
+vimc_streamer-objs := vimc-streamer.o
obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc_capture.o vimc_common.o vimc-debayer.o \
- vimc_scaler.o vimc_sensor.o
+ vimc_scaler.o vimc_sensor.o vimc_streamer.o
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index 3f7e9ed56633..a6f8715d2b44 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -24,6 +24,7 @@
#include <media/videobuf2-vmalloc.h>
#include "vimc-common.h"
+#include "vimc-streamer.h"
#define VIMC_CAP_DRV_NAME "vimc-capture"
@@ -44,7 +45,7 @@ struct vimc_cap_device {
spinlock_t qlock;
struct mutex lock;
u32 sequence;
- struct media_pipeline pipe;
+ struct vimc_stream stream;
};
static const struct v4l2_pix_format fmt_default = {
@@ -69,12 +70,10 @@ struct vimc_cap_buffer {
static int vimc_cap_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct vimc_cap_device *vcap = video_drvdata(file);
-
- strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strscpy(cap->driver, VIMC_PDEV_NAME, sizeof(cap->driver));
strscpy(cap->card, KBUILD_MODNAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
- "platform:%s", vcap->vdev.v4l2_dev->name);
+ "platform:%s", VIMC_PDEV_NAME);
return 0;
}
@@ -248,14 +247,13 @@ static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
vcap->sequence = 0;
/* Start the media pipeline */
- ret = media_pipeline_start(entity, &vcap->pipe);
+ ret = media_pipeline_start(entity, &vcap->stream.pipe);
if (ret) {
vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
return ret;
}
- /* Enable streaming from the pipe */
- ret = vimc_pipeline_s_stream(&vcap->vdev.entity, 1);
+ ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1);
if (ret) {
media_pipeline_stop(entity);
vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
@@ -273,8 +271,7 @@ static void vimc_cap_stop_streaming(struct vb2_queue *vq)
{
struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
- /* Disable streaming from the pipe */
- vimc_pipeline_s_stream(&vcap->vdev.entity, 0);
+ vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 0);
/* Stop the media pipeline */
media_pipeline_stop(&vcap->vdev.entity);
@@ -355,8 +352,8 @@ static void vimc_cap_comp_unbind(struct device *comp, struct device *master,
kfree(vcap);
}
-static void vimc_cap_process_frame(struct vimc_ent_device *ved,
- struct media_pad *sink, const void *frame)
+static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
+ const void *frame)
{
struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
ved);
@@ -370,7 +367,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved,
typeof(*vimc_buf), list);
if (!vimc_buf) {
spin_unlock(&vcap->qlock);
- return;
+ return ERR_PTR(-EAGAIN);
}
/* Remove this entry from the list */
@@ -391,6 +388,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved,
vb2_set_plane_payload(&vimc_buf->vb2.vb2_buf, 0,
vcap->format.sizeimage);
vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE);
+ return NULL;
}
static int vimc_cap_comp_bind(struct device *comp, struct device *master,
diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
index 867e24dbd6b5..c1a74bb2df58 100644
--- a/drivers/media/platform/vimc/vimc-common.c
+++ b/drivers/media/platform/vimc/vimc-common.c
@@ -207,41 +207,6 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
}
EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
-int vimc_propagate_frame(struct media_pad *src, const void *frame)
-{
- struct media_link *link;
-
- if (!(src->flags & MEDIA_PAD_FL_SOURCE))
- return -EINVAL;
-
- /* Send this frame to all sink pads that are direct linked */
- list_for_each_entry(link, &src->entity->links, list) {
- if (link->source == src &&
- (link->flags & MEDIA_LNK_FL_ENABLED)) {
- struct vimc_ent_device *ved = NULL;
- struct media_entity *entity = link->sink->entity;
-
- if (is_media_entity_v4l2_subdev(entity)) {
- struct v4l2_subdev *sd =
- container_of(entity, struct v4l2_subdev,
- entity);
- ved = v4l2_get_subdevdata(sd);
- } else if (is_media_entity_v4l2_video_device(entity)) {
- struct video_device *vdev =
- container_of(entity,
- struct video_device,
- entity);
- ved = video_get_drvdata(vdev);
- }
- if (ved && ved->process_frame)
- ved->process_frame(ved, link->sink, frame);
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vimc_propagate_frame);
-
/* Helper function to allocate and initialize pads */
struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
{
diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h
index 2e9981b18166..84539430b5e7 100644
--- a/drivers/media/platform/vimc/vimc-common.h
+++ b/drivers/media/platform/vimc/vimc-common.h
@@ -22,6 +22,8 @@
#include <media/media-device.h>
#include <media/v4l2-device.h>
+#define VIMC_PDEV_NAME "vimc"
+
/* VIMC-specific controls */
#define VIMC_CID_VIMC_BASE (0x00f00000 | 0xf000)
#define VIMC_CID_VIMC_CLASS (0x00f00000 | 1)
@@ -113,24 +115,13 @@ struct vimc_pix_map {
struct vimc_ent_device {
struct media_entity *ent;
struct media_pad *pads;
- void (*process_frame)(struct vimc_ent_device *ved,
- struct media_pad *sink, const void *frame);
+ void * (*process_frame)(struct vimc_ent_device *ved,
+ const void *frame);
void (*vdev_get_format)(struct vimc_ent_device *ved,
struct v4l2_pix_format *fmt);
};
/**
- * vimc_propagate_frame - propagate a frame through the topology
- *
- * @src: the source pad where the frame is being originated
- * @frame: the frame to be propagated
- *
- * This function will call the process_frame callback from the vimc_ent_device
- * struct of the nodes directly connected to the @src pad
- */
-int vimc_propagate_frame(struct media_pad *src, const void *frame);
-
-/**
* vimc_pads_init - initialize pads
*
* @num_pads: number of pads to initialize
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index ce809d2e3d53..c2fdf3ea67ed 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -24,7 +24,6 @@
#include "vimc-common.h"
-#define VIMC_PDEV_NAME "vimc"
#define VIMC_MDEV_MODEL_NAME "VIMC MDEV"
#define VIMC_ENT_LINK(src, srcpad, sink, sinkpad, link_flags) { \
@@ -319,6 +318,8 @@ static int vimc_probe(struct platform_device *pdev)
/* Initialize media device */
strscpy(vimc->mdev.model, VIMC_MDEV_MODEL_NAME,
sizeof(vimc->mdev.model));
+ snprintf(vimc->mdev.bus_info, sizeof(vimc->mdev.bus_info),
+ "platform:%s", VIMC_PDEV_NAME);
vimc->mdev.dev = &pdev->dev;
media_device_init(&vimc->mdev);
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
index 77887f66f323..7d77c63b99d2 100644
--- a/drivers/media/platform/vimc/vimc-debayer.c
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -321,7 +321,6 @@ static void vimc_deb_set_rgb_mbus_fmt_rgb888_1x24(struct vimc_deb_device *vdeb,
static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
{
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
- int ret;
if (enable) {
const struct vimc_pix_map *vpix;
@@ -351,22 +350,10 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
if (!vdeb->src_frame)
return -ENOMEM;
- /* Turn the stream on in the subdevices directly connected */
- ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 1);
- if (ret) {
- vfree(vdeb->src_frame);
- vdeb->src_frame = NULL;
- return ret;
- }
} else {
if (!vdeb->src_frame)
return 0;
- /* Disable streaming from the pipe */
- ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 0);
- if (ret)
- return ret;
-
vfree(vdeb->src_frame);
vdeb->src_frame = NULL;
}
@@ -480,9 +467,8 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
}
}
-static void vimc_deb_process_frame(struct vimc_ent_device *ved,
- struct media_pad *sink,
- const void *sink_frame)
+static void *vimc_deb_process_frame(struct vimc_ent_device *ved,
+ const void *sink_frame)
{
struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
ved);
@@ -491,7 +477,7 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved,
/* If the stream in this node is not active, just return */
if (!vdeb->src_frame)
- return;
+ return ERR_PTR(-EINVAL);
for (i = 0; i < vdeb->sink_fmt.height; i++)
for (j = 0; j < vdeb->sink_fmt.width; j++) {
@@ -499,12 +485,8 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved,
vdeb->set_rgb_src(vdeb, i, j, rgb);
}
- /* Propagate the frame through all source pads */
- for (i = 1; i < vdeb->sd.entity.num_pads; i++) {
- struct media_pad *pad = &vdeb->sd.entity.pads[i];
+ return vdeb->src_frame;
- vimc_propagate_frame(pad, vdeb->src_frame);
- }
}
static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index b0952ee86296..39b2a73dfcc1 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -217,7 +217,6 @@ static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = {
static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
- int ret;
if (enable) {
const struct vimc_pix_map *vpix;
@@ -245,22 +244,10 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
if (!vsca->src_frame)
return -ENOMEM;
- /* Turn the stream on in the subdevices directly connected */
- ret = vimc_pipeline_s_stream(&vsca->sd.entity, 1);
- if (ret) {
- vfree(vsca->src_frame);
- vsca->src_frame = NULL;
- return ret;
- }
} else {
if (!vsca->src_frame)
return 0;
- /* Disable streaming from the pipe */
- ret = vimc_pipeline_s_stream(&vsca->sd.entity, 0);
- if (ret)
- return ret;
-
vfree(vsca->src_frame);
vsca->src_frame = NULL;
}
@@ -346,26 +333,19 @@ static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca,
vimc_sca_scale_pix(vsca, i, j, sink_frame);
}
-static void vimc_sca_process_frame(struct vimc_ent_device *ved,
- struct media_pad *sink,
- const void *sink_frame)
+static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
+ const void *sink_frame)
{
struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
ved);
- unsigned int i;
/* If the stream in this node is not active, just return */
if (!vsca->src_frame)
- return;
+ return ERR_PTR(-EINVAL);
vimc_sca_fill_src_frame(vsca, sink_frame);
- /* Propagate the frame through all source pads */
- for (i = 1; i < vsca->sd.entity.num_pads; i++) {
- struct media_pad *pad = &vsca->sd.entity.pads[i];
-
- vimc_propagate_frame(pad, vsca->src_frame);
- }
+ return vsca->src_frame;
};
static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index 32ca9c6172b1..93961a1e694f 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -16,8 +16,6 @@
*/
#include <linux/component.h>
-#include <linux/freezer.h>
-#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
@@ -201,38 +199,27 @@ static const struct v4l2_subdev_pad_ops vimc_sen_pad_ops = {
.set_fmt = vimc_sen_set_fmt,
};
-static int vimc_sen_tpg_thread(void *data)
+static void *vimc_sen_process_frame(struct vimc_ent_device *ved,
+ const void *sink_frame)
{
- struct vimc_sen_device *vsen = data;
- unsigned int i;
-
- set_freezable();
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- for (;;) {
- try_to_freeze();
- if (kthread_should_stop())
- break;
-
- tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
+ struct vimc_sen_device *vsen = container_of(ved, struct vimc_sen_device,
+ ved);
+ const struct vimc_pix_map *vpix;
+ unsigned int frame_size;
- /* Send the frame to all source pads */
- for (i = 0; i < vsen->sd.entity.num_pads; i++)
- vimc_propagate_frame(&vsen->sd.entity.pads[i],
- vsen->frame);
+ /* Calculate the frame size */
+ vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
+ frame_size = vsen->mbus_format.width * vpix->bpp *
+ vsen->mbus_format.height;
- /* 60 frames per second */
- schedule_timeout(HZ/60);
- }
-
- return 0;
+ tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
+ return vsen->frame;
}
static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
{
struct vimc_sen_device *vsen =
container_of(sd, struct vimc_sen_device, sd);
- int ret;
if (enable) {
const struct vimc_pix_map *vpix;
@@ -258,26 +245,8 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
/* configure the test pattern generator */
vimc_sen_tpg_s_format(vsen);
- /* Initialize the image generator thread */
- vsen->kthread_sen = kthread_run(vimc_sen_tpg_thread, vsen,
- "%s-sen", vsen->sd.v4l2_dev->name);
- if (IS_ERR(vsen->kthread_sen)) {
- dev_err(vsen->dev, "%s: kernel_thread() failed\n",
- vsen->sd.name);
- vfree(vsen->frame);
- vsen->frame = NULL;
- return PTR_ERR(vsen->kthread_sen);
- }
} else {
- if (!vsen->kthread_sen)
- return 0;
-
- /* Stop image generator */
- ret = kthread_stop(vsen->kthread_sen);
- if (ret)
- return ret;
- vsen->kthread_sen = NULL;
vfree(vsen->frame);
vsen->frame = NULL;
return 0;
@@ -413,6 +382,7 @@ static int vimc_sen_comp_bind(struct device *comp, struct device *master,
if (ret)
goto err_free_hdl;
+ vsen->ved.process_frame = vimc_sen_process_frame;
dev_set_drvdata(comp, &vsen->ved);
vsen->dev = comp;
diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
new file mode 100644
index 000000000000..fcc897fb247b
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-streamer.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vimc-streamer.c Virtual Media Controller Driver
+ *
+ * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+
+#include "vimc-streamer.h"
+
+/**
+ * vimc_get_source_entity - get the entity connected with the first sink pad
+ *
+ * @ent: reference media_entity
+ *
+ * Helper function that returns the media entity containing the source pad
+ * linked with the first sink pad from the given media entity pad list.
+ */
+static struct media_entity *vimc_get_source_entity(struct media_entity *ent)
+{
+ struct media_pad *pad;
+ int i;
+
+ for (i = 0; i < ent->num_pads; i++) {
+ if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ continue;
+ pad = media_entity_remote_pad(&ent->pads[i]);
+ return pad ? pad->entity : NULL;
+ }
+ return NULL;
+}
+
+/*
+ * vimc_streamer_pipeline_terminate - Disable stream in all ved in stream
+ *
+ * @stream: the pointer to the stream structure with the pipeline to be
+ * disabled.
+ *
+ * Calls s_stream to disable the stream in each entity of the pipeline
+ *
+ */
+static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
+{
+ struct media_entity *entity;
+ struct v4l2_subdev *sd;
+
+ while (stream->pipe_size) {
+ stream->pipe_size--;
+ entity = stream->ved_pipeline[stream->pipe_size]->ent;
+ entity = vimc_get_source_entity(entity);
+ stream->ved_pipeline[stream->pipe_size] = NULL;
+
+ if (!is_media_entity_v4l2_subdev(entity))
+ continue;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+ v4l2_subdev_call(sd, video, s_stream, 0);
+ }
+}
+
+/*
+ * vimc_streamer_pipeline_init - initializes the stream structure
+ *
+ * @stream: the pointer to the stream structure to be initialized
+ * @ved: the pointer to the vimc entity initializing the stream
+ *
+ * Initializes the stream structure. Walks through the entity graph to
+ * construct the pipeline used later on the streamer thread.
+ * Calls s_stream to enable stream in all entities of the pipeline.
+ */
+static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
+ struct vimc_ent_device *ved)
+{
+ struct media_entity *entity;
+ struct video_device *vdev;
+ struct v4l2_subdev *sd;
+ int ret = 0;
+
+ stream->pipe_size = 0;
+ while (stream->pipe_size < VIMC_STREAMER_PIPELINE_MAX_SIZE) {
+ if (!ved) {
+ vimc_streamer_pipeline_terminate(stream);
+ return -EINVAL;
+ }
+ stream->ved_pipeline[stream->pipe_size++] = ved;
+
+ entity = vimc_get_source_entity(ved->ent);
+ /* Check if the end of the pipeline was reached*/
+ if (!entity)
+ return 0;
+
+ if (is_media_entity_v4l2_subdev(entity)) {
+ sd = media_entity_to_v4l2_subdev(entity);
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD) {
+ vimc_streamer_pipeline_terminate(stream);
+ return ret;
+ }
+ ved = v4l2_get_subdevdata(sd);
+ } else {
+ vdev = container_of(entity,
+ struct video_device,
+ entity);
+ ved = video_get_drvdata(vdev);
+ }
+ }
+
+ vimc_streamer_pipeline_terminate(stream);
+ return -EINVAL;
+}
+
+static int vimc_streamer_thread(void *data)
+{
+ struct vimc_stream *stream = data;
+ int i;
+
+ set_freezable();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+ for (;;) {
+ try_to_freeze();
+ if (kthread_should_stop())
+ break;
+
+ for (i = stream->pipe_size - 1; i >= 0; i--) {
+ stream->frame = stream->ved_pipeline[i]->process_frame(
+ stream->ved_pipeline[i],
+ stream->frame);
+ if (!stream->frame)
+ break;
+ if (IS_ERR(stream->frame))
+ break;
+ }
+ //wait for 60hz
+ schedule_timeout(HZ / 60);
+ }
+
+ return 0;
+}
+
+int vimc_streamer_s_stream(struct vimc_stream *stream,
+ struct vimc_ent_device *ved,
+ int enable)
+{
+ int ret;
+
+ if (!stream || !ved)
+ return -EINVAL;
+
+ if (enable) {
+ if (stream->kthread)
+ return 0;
+
+ ret = vimc_streamer_pipeline_init(stream, ved);
+ if (ret)
+ return ret;
+
+ stream->kthread = kthread_run(vimc_streamer_thread, stream,
+ "vimc-streamer thread");
+
+ if (IS_ERR(stream->kthread))
+ return PTR_ERR(stream->kthread);
+
+ } else {
+ if (!stream->kthread)
+ return 0;
+
+ ret = kthread_stop(stream->kthread);
+ if (ret)
+ return ret;
+
+ stream->kthread = NULL;
+
+ vimc_streamer_pipeline_terminate(stream);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vimc_streamer_s_stream);
+
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Streamer");
+MODULE_AUTHOR("Lucas A. M. Magalhães <lucmaga@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-streamer.h b/drivers/media/platform/vimc/vimc-streamer.h
new file mode 100644
index 000000000000..752af2e2d5a2
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-streamer.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vimc-streamer.h Virtual Media Controller Driver
+ *
+ * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
+ *
+ */
+
+#ifndef _VIMC_STREAMER_H_
+#define _VIMC_STREAMER_H_
+
+#include <media/media-device.h>
+
+#include "vimc-common.h"
+
+#define VIMC_STREAMER_PIPELINE_MAX_SIZE 16
+
+struct vimc_stream {
+ struct media_pipeline pipe;
+ struct vimc_ent_device *ved_pipeline[VIMC_STREAMER_PIPELINE_MAX_SIZE];
+ unsigned int pipe_size;
+ u8 *frame;
+ struct task_struct *kthread;
+};
+
+/**
+ * vimc_streamer_s_streamer - start/stop the stream
+ *
+ * @stream: the pointer to the stream to start or stop
+ * @ved: The last entity of the streamer pipeline
+ * @enable: any non-zero number start the stream, zero stop
+ *
+ */
+int vimc_streamer_s_stream(struct vimc_stream *stream,
+ struct vimc_ent_device *ved,
+ int enable);
+
+#endif //_VIMC_STREAMER_H_
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index c931f007e5b0..29e7b14fa704 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -371,7 +371,7 @@ static int vidioc_s_parm(struct file *file, void *fh,
if (vdev->vfl_dir == VFL_DIR_RX)
return vivid_vid_cap_s_parm(file, fh, parm);
- return vivid_vid_out_g_parm(file, fh, parm);
+ return -ENOTTY;
}
static int vidioc_log_status(struct file *file, void *fh)
@@ -1094,7 +1094,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q = &dev->vb_vid_cap_q;
q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ if (!allocator)
+ q->io_modes |= VB2_USERPTR;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivid_buffer);
q->ops = &vivid_vid_cap_qops;
@@ -1115,7 +1117,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q = &dev->vb_vid_out_q;
q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
V4L2_BUF_TYPE_VIDEO_OUTPUT;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_WRITE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
+ if (!allocator)
+ q->io_modes |= VB2_USERPTR;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivid_buffer);
q->ops = &vivid_vid_out_qops;
@@ -1136,7 +1140,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q = &dev->vb_vbi_cap_q;
q->type = dev->has_raw_vbi_cap ? V4L2_BUF_TYPE_VBI_CAPTURE :
V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ if (!allocator)
+ q->io_modes |= VB2_USERPTR;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivid_buffer);
q->ops = &vivid_vbi_cap_qops;
@@ -1157,7 +1163,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q = &dev->vb_vbi_out_q;
q->type = dev->has_raw_vbi_out ? V4L2_BUF_TYPE_VBI_OUTPUT :
V4L2_BUF_TYPE_SLICED_VBI_OUTPUT;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_WRITE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
+ if (!allocator)
+ q->io_modes |= VB2_USERPTR;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivid_buffer);
q->ops = &vivid_vbi_out_qops;
@@ -1177,7 +1185,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
/* initialize sdr_cap queue */
q = &dev->vb_sdr_cap_q;
q->type = V4L2_BUF_TYPE_SDR_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ if (!allocator)
+ q->io_modes |= VB2_USERPTR;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivid_buffer);
q->ops = &vivid_sdr_cap_qops;
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index c059fc12668a..52eeda624d7e 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -124,7 +124,8 @@ static int vid_cap_queue_setup(struct vb2_queue *vq,
}
} else {
for (p = 0; p < buffers; p++)
- sizes[p] = tpg_g_line_width(&dev->tpg, p) * h +
+ sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) /
+ dev->fmt_cap->vdownsampling[p] +
dev->fmt_cap->data_offset[p];
}
@@ -161,7 +162,9 @@ static int vid_cap_buf_prepare(struct vb2_buffer *vb)
return -EINVAL;
}
for (p = 0; p < buffers; p++) {
- size = tpg_g_line_width(&dev->tpg, p) * dev->fmt_cap_rect.height +
+ size = (tpg_g_line_width(&dev->tpg, p) *
+ dev->fmt_cap_rect.height) /
+ dev->fmt_cap->vdownsampling[p] +
dev->fmt_cap->data_offset[p];
if (vb2_plane_size(vb, p) < size) {
@@ -545,7 +548,8 @@ int vivid_g_fmt_vid_cap(struct file *file, void *priv,
for (p = 0; p < mp->num_planes; p++) {
mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p);
mp->plane_fmt[p].sizeimage =
- tpg_g_line_width(&dev->tpg, p) * mp->height +
+ (tpg_g_line_width(&dev->tpg, p) * mp->height) /
+ dev->fmt_cap->vdownsampling[p] +
dev->fmt_cap->data_offset[p];
}
return 0;
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index ea250aee2b2e..e61b91b414f9 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -28,11 +28,12 @@ static int vid_out_queue_setup(struct vb2_queue *vq,
const struct vivid_fmt *vfmt = dev->fmt_out;
unsigned planes = vfmt->buffers;
unsigned h = dev->fmt_out_rect.height;
- unsigned size = dev->bytesperline_out[0] * h;
+ unsigned int size = dev->bytesperline_out[0] * h + vfmt->data_offset[0];
unsigned p;
for (p = vfmt->buffers; p < vfmt->planes; p++)
- size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p];
+ size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p] +
+ vfmt->data_offset[p];
if (dev->field_out == V4L2_FIELD_ALTERNATE) {
/*
@@ -62,12 +63,14 @@ static int vid_out_queue_setup(struct vb2_queue *vq,
if (sizes[0] < size)
return -EINVAL;
for (p = 1; p < planes; p++) {
- if (sizes[p] < dev->bytesperline_out[p] * h)
+ if (sizes[p] < dev->bytesperline_out[p] * h +
+ vfmt->data_offset[p])
return -EINVAL;
}
} else {
for (p = 0; p < planes; p++)
- sizes[p] = p ? dev->bytesperline_out[p] * h : size;
+ sizes[p] = p ? dev->bytesperline_out[p] * h +
+ vfmt->data_offset[p] : size;
}
if (vq->num_buffers + *nbuffers < 2)
@@ -81,21 +84,38 @@ static int vid_out_queue_setup(struct vb2_queue *vq,
return 0;
}
-static int vid_out_buf_prepare(struct vb2_buffer *vb)
+static int vid_out_buf_out_validate(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- unsigned long size;
- unsigned planes;
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->field_out != V4L2_FIELD_ALTERNATE)
+ vbuf->field = dev->field_out;
+ else if (vbuf->field != V4L2_FIELD_TOP &&
+ vbuf->field != V4L2_FIELD_BOTTOM)
+ return -EINVAL;
+ return 0;
+}
+
+static int vid_out_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ const struct vivid_fmt *vfmt = dev->fmt_out;
+ unsigned int planes = vfmt->buffers;
+ unsigned int h = dev->fmt_out_rect.height;
+ unsigned int size = dev->bytesperline_out[0] * h;
unsigned p;
+ for (p = vfmt->buffers; p < vfmt->planes; p++)
+ size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p];
+
dprintk(dev, 1, "%s\n", __func__);
if (WARN_ON(NULL == dev->fmt_out))
return -EINVAL;
- planes = dev->fmt_out->planes;
-
if (dev->buf_prepare_error) {
/*
* Error injection: test what happens if buf_prepare() returns
@@ -105,18 +125,13 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb)
return -EINVAL;
}
- if (dev->field_out != V4L2_FIELD_ALTERNATE)
- vbuf->field = dev->field_out;
- else if (vbuf->field != V4L2_FIELD_TOP &&
- vbuf->field != V4L2_FIELD_BOTTOM)
- return -EINVAL;
-
for (p = 0; p < planes; p++) {
- size = dev->bytesperline_out[p] * dev->fmt_out_rect.height +
- vb->planes[p].data_offset;
+ if (p)
+ size = dev->bytesperline_out[p] * h;
+ size += vb->planes[p].data_offset;
if (vb2_get_plane_payload(vb, p) < size) {
- dprintk(dev, 1, "%s the payload is too small for plane %u (%lu < %lu)\n",
+ dprintk(dev, 1, "%s the payload is too small for plane %u (%lu < %u)\n",
__func__, p, vb2_get_plane_payload(vb, p), size);
return -EINVAL;
}
@@ -188,6 +203,7 @@ static void vid_out_buf_request_complete(struct vb2_buffer *vb)
const struct vb2_ops vivid_vid_out_qops = {
.queue_setup = vid_out_queue_setup,
+ .buf_out_validate = vid_out_buf_out_validate,
.buf_prepare = vid_out_buf_prepare,
.buf_queue = vid_out_buf_queue,
.start_streaming = vid_out_start_streaming,
@@ -321,7 +337,8 @@ int vivid_g_fmt_vid_out(struct file *file, void *priv,
for (p = 0; p < mp->num_planes; p++) {
mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p];
mp->plane_fmt[p].sizeimage =
- mp->plane_fmt[p].bytesperline * mp->height;
+ mp->plane_fmt[p].bytesperline * mp->height +
+ fmt->data_offset[p];
}
for (p = fmt->buffers; p < fmt->planes; p++) {
unsigned stride = dev->bytesperline_out[p];
@@ -399,7 +416,7 @@ int vivid_try_fmt_vid_out(struct file *file, void *priv,
pfmt[p].bytesperline = bytesperline;
pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) /
- fmt->vdownsampling[p];
+ fmt->vdownsampling[p] + fmt->data_offset[p];
memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
}
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 771dfe1f7c20..7ceaf3222145 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -223,7 +223,7 @@ static void vsp1_video_calculate_partition(struct vsp1_pipeline *pipe,
* If the modulus is less than half of the partition size,
* the penultimate partition is reduced to half, which is added
* to the final partition: |1234|1234|1234|12|341|
- * to prevents this: |1234|1234|1234|1234|1|.
+ * to prevent this: |1234|1234|1234|1234|1|.
*/
if (modulus) {
/*