aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/erofs/erofs_fs.h13
-rw-r--r--drivers/staging/erofs/internal.h2
-rw-r--r--drivers/staging/erofs/super.c20
-rw-r--r--drivers/staging/fbtft/fbtft-core.c4
-rw-r--r--drivers/staging/fieldbus/Documentation/fieldbus_dev.txt4
-rw-r--r--drivers/staging/fieldbus/Kconfig1
-rw-r--r--drivers/staging/fieldbus/anybuss/Kconfig1
-rw-r--r--drivers/staging/iio/addac/adt7316.c3
-rw-r--r--drivers/staging/iio/cdc/ad7150.c19
-rw-r--r--drivers/staging/isdn/Kconfig12
-rw-r--r--drivers/staging/isdn/Makefile8
-rw-r--r--drivers/staging/isdn/TODO22
-rw-r--r--drivers/staging/isdn/avm/Kconfig65
-rw-r--r--drivers/staging/isdn/avm/Makefile12
-rw-r--r--drivers/staging/isdn/avm/avm_cs.c166
-rw-r--r--drivers/staging/isdn/avm/avmcard.h581
-rw-r--r--drivers/staging/isdn/avm/b1.c804
-rw-r--r--drivers/staging/isdn/avm/b1dma.c981
-rw-r--r--drivers/staging/isdn/avm/b1isa.c243
-rw-r--r--drivers/staging/isdn/avm/b1pci.c416
-rw-r--r--drivers/staging/isdn/avm/b1pcmcia.c224
-rw-r--r--drivers/staging/isdn/avm/c4.c1317
-rw-r--r--drivers/staging/isdn/avm/t1isa.c594
-rw-r--r--drivers/staging/isdn/avm/t1pci.c259
-rw-r--r--drivers/staging/isdn/gigaset/Kconfig62
-rw-r--r--drivers/staging/isdn/gigaset/Makefile17
-rw-r--r--drivers/staging/isdn/gigaset/asyncdata.c606
-rw-r--r--drivers/staging/isdn/gigaset/bas-gigaset.c2672
-rw-r--r--drivers/staging/isdn/gigaset/capi.c2517
-rw-r--r--drivers/staging/isdn/gigaset/common.c1153
-rw-r--r--drivers/staging/isdn/gigaset/dummyll.c74
-rw-r--r--drivers/staging/isdn/gigaset/ev-layer.c1910
-rw-r--r--drivers/staging/isdn/gigaset/gigaset.h827
-rw-r--r--drivers/staging/isdn/gigaset/interface.c613
-rw-r--r--drivers/staging/isdn/gigaset/isocdata.c1006
-rw-r--r--drivers/staging/isdn/gigaset/proc.c77
-rw-r--r--drivers/staging/isdn/gigaset/ser-gigaset.c796
-rw-r--r--drivers/staging/isdn/gigaset/usb-gigaset.c946
-rw-r--r--drivers/staging/isdn/hysdn/Kconfig15
-rw-r--r--drivers/staging/isdn/hysdn/Makefile12
-rw-r--r--drivers/staging/isdn/hysdn/boardergo.c445
-rw-r--r--drivers/staging/isdn/hysdn/boardergo.h100
-rw-r--r--drivers/staging/isdn/hysdn/hycapi.c785
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_boot.c400
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_defs.h282
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_init.c213
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_net.c330
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_pof.h78
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_procconf.c411
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_proclog.c357
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_sched.c197
-rw-r--r--drivers/staging/isdn/hysdn/ince1pc.h134
-rw-r--r--drivers/staging/kpc2000/Kconfig2
-rw-r--r--drivers/staging/kpc2000/kpc_dma/fileops.c4
-rw-r--r--drivers/staging/media/Kconfig8
-rw-r--r--drivers/staging/media/Makefile4
-rw-r--r--drivers/staging/media/allegro-dvt/Kconfig16
-rw-r--r--drivers/staging/media/allegro-dvt/Makefile5
-rw-r--r--drivers/staging/media/allegro-dvt/TODO4
-rw-r--r--drivers/staging/media/allegro-dvt/allegro-core.c3014
-rw-r--r--drivers/staging/media/allegro-dvt/nal-h264.c1001
-rw-r--r--drivers/staging/media/allegro-dvt/nal-h264.h208
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c7
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c25
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c8
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c8
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c12
-rw-r--r--drivers/staging/media/hantro/Kconfig23
-rw-r--r--drivers/staging/media/hantro/Makefile15
-rw-r--r--drivers/staging/media/hantro/TODO (renamed from drivers/staging/media/rockchip/vpu/TODO)0
-rw-r--r--drivers/staging/media/hantro/hantro.h351
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c876
-rw-r--r--drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c260
-rw-r--r--drivers/staging/media/hantro/hantro_g1_regs.h301
-rw-r--r--drivers/staging/media/hantro/hantro_h1_jpeg_enc.c125
-rw-r--r--drivers/staging/media/hantro/hantro_h1_regs.h154
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h102
-rw-r--r--drivers/staging/media/hantro/hantro_jpeg.c (renamed from drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.c)41
-rw-r--r--drivers/staging/media/hantro/hantro_jpeg.h13
-rw-r--r--drivers/staging/media/hantro/hantro_mpeg2.c61
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c686
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.h26
-rw-r--r--drivers/staging/media/hantro/rk3288_vpu_hw.c187
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw.c186
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c (renamed from drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c)42
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c266
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_regs.h (renamed from drivers/staging/media/rockchip/vpu/rk3399_vpu_regs.h)2
-rw-r--r--drivers/staging/media/imx/Makefile18
-rw-r--r--drivers/staging/media/imx/imx-ic-common.c68
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c36
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c90
-rw-r--r--drivers/staging/media/imx/imx-ic.h6
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c97
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c51
-rw-r--r--drivers/staging/media/imx/imx-media-dev-common.c346
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c449
-rw-r--r--drivers/staging/media/imx/imx-media-fim.c9
-rw-r--r--drivers/staging/media/imx/imx-media-internal-sd.c357
-rw-r--r--drivers/staging/media/imx/imx-media-of.c41
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c170
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c84
-rw-r--r--drivers/staging/media/imx/imx-media.h116
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c169
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c41
-rw-r--r--drivers/staging/media/ipu3/include/intel-ipu3.h2
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.c6
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c14
-rw-r--r--drivers/staging/media/ipu3/ipu3-dmamap.c15
-rw-r--r--drivers/staging/media/ipu3/ipu3-mmu.c125
-rw-r--r--drivers/staging/media/ipu3/ipu3-mmu.h5
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c4
-rw-r--r--drivers/staging/media/meson/vdec/Kconfig11
-rw-r--r--drivers/staging/media/meson/vdec/Makefile8
-rw-r--r--drivers/staging/media/meson/vdec/TODO8
-rw-r--r--drivers/staging/media/meson/vdec/codec_mpeg12.c210
-rw-r--r--drivers/staging/media/meson/vdec/codec_mpeg12.h14
-rw-r--r--drivers/staging/media/meson/vdec/dos_regs.h98
-rw-r--r--drivers/staging/media/meson/vdec/esparser.c324
-rw-r--r--drivers/staging/media/meson/vdec/esparser.h32
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c1099
-rw-r--r--drivers/staging/media/meson/vdec/vdec.h267
-rw-r--r--drivers/staging/media/meson/vdec/vdec_1.c230
-rw-r--r--drivers/staging/media/meson/vdec/vdec_1.h14
-rw-r--r--drivers/staging/media/meson/vdec/vdec_helpers.c449
-rw-r--r--drivers/staging/media/meson/vdec/vdec_helpers.h83
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.c101
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.h30
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c11
-rw-r--r--drivers/staging/media/rockchip/vpu/Kconfig13
-rw-r--r--drivers/staging/media/rockchip/vpu/Makefile11
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3288_vpu_hw.c118
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c125
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3288_vpu_regs.h442
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3399_vpu_hw.c118
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu.h232
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_common.h29
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c542
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c671
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_hw.h58
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.h14
-rw-r--r--drivers/staging/media/soc_camera/imx074.c2
-rw-r--r--drivers/staging/media/soc_camera/mt9t031.c2
-rw-r--r--drivers/staging/media/soc_camera/soc_mt9v022.c2
-rw-r--r--drivers/staging/media/soc_camera/soc_ov5642.c6
-rw-r--r--drivers/staging/media/sunxi/cedrus/Makefile3
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c42
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h39
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c13
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c576
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c6
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.h2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_regs.h91
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c9
-rw-r--r--drivers/staging/media/tegra-vde/Kconfig1
-rw-r--r--drivers/staging/media/tegra-vde/Makefile1
-rw-r--r--drivers/staging/media/tegra-vde/dmabuf-cache.c226
-rw-r--r--drivers/staging/media/tegra-vde/iommu.c157
-rw-r--r--drivers/staging/media/tegra-vde/trace.h2
-rw-r--r--drivers/staging/media/tegra-vde/uapi.h48
-rw-r--r--drivers/staging/media/tegra-vde/vde.c (renamed from drivers/staging/media/tegra-vde/tegra-vde.c)212
-rw-r--r--drivers/staging/media/tegra-vde/vde.h107
-rw-r--r--drivers/staging/olpc_dcon/TODO7
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c6
-rw-r--r--drivers/staging/sm750fb/Kconfig2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c6
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c9
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c8
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c3
171 files changed, 36170 insertions, 3955 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index d5f771fafc21..7c96a01eef6c 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -118,4 +118,6 @@ source "drivers/staging/fieldbus/Kconfig"
source "drivers/staging/kpc2000/Kconfig"
+source "drivers/staging/isdn/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 0da0d3f0b5e4..fcaac9693b83 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -49,3 +49,4 @@ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_EROFS_FS) += erofs/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_KPC2000) += kpc2000/
+obj-$(CONFIG_ISDN_CAPI) += isdn/
diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h
index fa52898df006..8ddb2b3e7d39 100644
--- a/drivers/staging/erofs/erofs_fs.h
+++ b/drivers/staging/erofs/erofs_fs.h
@@ -17,10 +17,16 @@
#define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2
#define EROFS_SUPER_OFFSET 1024
+/*
+ * Any bits that aren't in EROFS_ALL_REQUIREMENTS should be
+ * incompatible with this kernel version.
+ */
+#define EROFS_ALL_REQUIREMENTS 0
+
struct erofs_super_block {
/* 0 */__le32 magic; /* in the little endian */
/* 4 */__le32 checksum; /* crc32c(super_block) */
-/* 8 */__le32 features;
+/* 8 */__le32 features; /* (aka. feature_compat) */
/* 12 */__u8 blkszbits; /* support block_size == PAGE_SIZE only */
/* 13 */__u8 reserved;
@@ -34,9 +40,10 @@ struct erofs_super_block {
/* 44 */__le32 xattr_blkaddr;
/* 48 */__u8 uuid[16]; /* 128-bit uuid for volume */
/* 64 */__u8 volume_name[16]; /* volume name */
+/* 80 */__le32 requirements; /* (aka. feature_incompat) */
-/* 80 */__u8 reserved2[48]; /* 128 bytes */
-} __packed;
+/* 84 */__u8 reserved2[44];
+} __packed; /* 128 bytes */
/*
* erofs inode data mapping:
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index c47778b3fabd..382258fc124d 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -115,6 +115,8 @@ struct erofs_sb_info {
u8 uuid[16]; /* 128-bit uuid for volume */
u8 volume_name[16]; /* volume name */
+ u32 requirements;
+
char *dev_name;
unsigned int mount_opt;
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
index 399847d21146..cadbcc11702a 100644
--- a/drivers/staging/erofs/super.c
+++ b/drivers/staging/erofs/super.c
@@ -71,6 +71,22 @@ static void free_inode(struct inode *inode)
kmem_cache_free(erofs_inode_cachep, vi);
}
+static bool check_layout_compatibility(struct super_block *sb,
+ struct erofs_super_block *layout)
+{
+ const unsigned int requirements = le32_to_cpu(layout->requirements);
+
+ EROFS_SB(sb)->requirements = requirements;
+
+ /* check if current kernel meets all mandatory requirements */
+ if (requirements & (~EROFS_ALL_REQUIREMENTS)) {
+ errln("unidentified requirements %x, please upgrade kernel version",
+ requirements & ~EROFS_ALL_REQUIREMENTS);
+ return false;
+ }
+ return true;
+}
+
static int superblock_read(struct super_block *sb)
{
struct erofs_sb_info *sbi;
@@ -104,6 +120,9 @@ static int superblock_read(struct super_block *sb)
goto out;
}
+ if (!check_layout_compatibility(sb, layout))
+ goto out;
+
sbi->blocks = le32_to_cpu(layout->blocks);
sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr);
#ifdef CONFIG_EROFS_FS_XATTR
@@ -457,6 +476,7 @@ static int erofs_read_super(struct super_block *sb,
*/
err_devname:
dput(sb->s_root);
+ sb->s_root = NULL;
err_iget:
#ifdef EROFS_FS_HAS_MANAGED_CACHE
iput(sbi->managed_cache);
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 9b07badf4c6c..7cbc1bdd2d8a 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -891,7 +891,9 @@ int fbtft_unregister_framebuffer(struct fb_info *fb_info)
if (par->fbtftops.unregister_backlight)
par->fbtftops.unregister_backlight(par);
fbtft_sysfs_exit(par);
- return unregister_framebuffer(fb_info);
+ unregister_framebuffer(fb_info);
+
+ return 0;
}
EXPORT_SYMBOL(fbtft_unregister_framebuffer);
diff --git a/drivers/staging/fieldbus/Documentation/fieldbus_dev.txt b/drivers/staging/fieldbus/Documentation/fieldbus_dev.txt
index 56af3f650fa3..89fb8e14676f 100644
--- a/drivers/staging/fieldbus/Documentation/fieldbus_dev.txt
+++ b/drivers/staging/fieldbus/Documentation/fieldbus_dev.txt
@@ -54,8 +54,8 @@ a limited few common behaviours and properties. This allows us to define
a simple interface consisting of a character device and a set of sysfs files:
See:
-Documentation/ABI/testing/sysfs-class-fieldbus-dev
-Documentation/ABI/testing/fieldbus-dev-cdev
+drivers/staging/fieldbus/Documentation/ABI/sysfs-class-fieldbus-dev
+drivers/staging/fieldbus/Documentation/ABI/fieldbus-dev-cdev
Note that this simple interface does not provide a way to modify adapter
configuration settings. It is therefore useful only for adapters that get their
diff --git a/drivers/staging/fieldbus/Kconfig b/drivers/staging/fieldbus/Kconfig
index e5e28e52c59b..b0b865acccfb 100644
--- a/drivers/staging/fieldbus/Kconfig
+++ b/drivers/staging/fieldbus/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
menuconfig FIELDBUS_DEV
tristate "Fieldbus Device Support"
help
diff --git a/drivers/staging/fieldbus/anybuss/Kconfig b/drivers/staging/fieldbus/anybuss/Kconfig
index 41f241c73826..8bc3d9a87743 100644
--- a/drivers/staging/fieldbus/anybuss/Kconfig
+++ b/drivers/staging/fieldbus/anybuss/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
config HMS_ANYBUSS_BUS
tristate "HMS Anybus-S Bus Support"
select REGMAP
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index b6a65ee8d558..dc8c25ddb97e 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -6,7 +6,8 @@
*/
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/irq.h>
#include <linux/workqueue.h>
#include <linux/device.h>
#include <linux/kernel.h>
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index dd7fcab8e19e..e075244c602b 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -5,6 +5,7 @@
* Copyright 2010-2011 Analog Devices Inc.
*/
+#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/kernel.h>
@@ -130,7 +131,7 @@ static int ad7150_read_event_config(struct iio_dev *indio_dev,
{
int ret;
u8 threshtype;
- bool adaptive;
+ bool thrfixed;
struct ad7150_chip_info *chip = iio_priv(indio_dev);
ret = i2c_smbus_read_byte_data(chip->client, AD7150_CFG);
@@ -138,21 +139,23 @@ static int ad7150_read_event_config(struct iio_dev *indio_dev,
return ret;
threshtype = (ret >> 5) & 0x03;
- adaptive = !!(ret & 0x80);
+
+ /*check if threshold mode is fixed or adaptive*/
+ thrfixed = FIELD_GET(AD7150_CFG_FIX, ret);
switch (type) {
case IIO_EV_TYPE_MAG_ADAPTIVE:
if (dir == IIO_EV_DIR_RISING)
- return adaptive && (threshtype == 0x1);
- return adaptive && (threshtype == 0x0);
+ return !thrfixed && (threshtype == 0x1);
+ return !thrfixed && (threshtype == 0x0);
case IIO_EV_TYPE_THRESH_ADAPTIVE:
if (dir == IIO_EV_DIR_RISING)
- return adaptive && (threshtype == 0x3);
- return adaptive && (threshtype == 0x2);
+ return !thrfixed && (threshtype == 0x3);
+ return !thrfixed && (threshtype == 0x2);
case IIO_EV_TYPE_THRESH:
if (dir == IIO_EV_DIR_RISING)
- return !adaptive && (threshtype == 0x1);
- return !adaptive && (threshtype == 0x0);
+ return thrfixed && (threshtype == 0x1);
+ return thrfixed && (threshtype == 0x0);
default:
break;
}
diff --git a/drivers/staging/isdn/Kconfig b/drivers/staging/isdn/Kconfig
new file mode 100644
index 000000000000..faaf63887094
--- /dev/null
+++ b/drivers/staging/isdn/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "ISDN CAPI drivers"
+ depends on ISDN_CAPI
+
+source "drivers/staging/isdn/avm/Kconfig"
+
+source "drivers/staging/isdn/gigaset/Kconfig"
+
+source "drivers/staging/isdn/hysdn/Kconfig"
+
+endmenu
+
diff --git a/drivers/staging/isdn/Makefile b/drivers/staging/isdn/Makefile
new file mode 100644
index 000000000000..025504bae5df
--- /dev/null
+++ b/drivers/staging/isdn/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the kernel ISDN subsystem and device drivers.
+
+# Object files in subdirectories
+
+obj-$(CONFIG_CAPI_AVM) += avm/
+obj-$(CONFIG_HYSDN) += hysdn/
+obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset/
diff --git a/drivers/staging/isdn/TODO b/drivers/staging/isdn/TODO
new file mode 100644
index 000000000000..9210d11eb68b
--- /dev/null
+++ b/drivers/staging/isdn/TODO
@@ -0,0 +1,22 @@
+TODO: Remove in late 2019 unless there are users
+
+
+I tried to find any indication of whether the capi drivers are
+still in use, and have not found anything from a long time ago.
+
+With public ISDN networks almost completely shut down over the past 12
+months, there is very little you can actually do with this hardware. The
+main remaining use case would be to connect ISDN voice phones to an
+in-house installation with Asterisk or LCR, but anyone trying this in
+turn seems to be using either the mISDN driver stack, or out-of-tree
+drivers from the hardware vendors.
+
+I may of course have missed something, so I would suggest moving
+these into drivers/staging/ just in case someone still uses one
+of the three remaining in-kernel drivers (avm, hysdn, gigaset).
+
+If nobody complains, we can remove them entirely in six months,
+or otherwise move the core code and any drivers that are still
+needed back into drivers/isdn.
+
+ Arnd Bergmann <arnd@arndb.de>
diff --git a/drivers/staging/isdn/avm/Kconfig b/drivers/staging/isdn/avm/Kconfig
new file mode 100644
index 000000000000..81483db067bb
--- /dev/null
+++ b/drivers/staging/isdn/avm/Kconfig
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# ISDN AVM drivers
+#
+
+menuconfig CAPI_AVM
+ bool "Active AVM cards"
+ help
+ Enable support for AVM active ISDN cards.
+
+if CAPI_AVM
+
+config ISDN_DRV_AVMB1_B1ISA
+ tristate "AVM B1 ISA support"
+ depends on ISA
+ help
+ Enable support for the ISA version of the AVM B1 card.
+
+config ISDN_DRV_AVMB1_B1PCI
+ tristate "AVM B1 PCI support"
+ depends on PCI
+ help
+ Enable support for the PCI version of the AVM B1 card.
+
+config ISDN_DRV_AVMB1_B1PCIV4
+ bool "AVM B1 PCI V4 support"
+ depends on ISDN_DRV_AVMB1_B1PCI
+ help
+ Enable support for the V4 version of AVM B1 PCI card.
+
+config ISDN_DRV_AVMB1_T1ISA
+ tristate "AVM T1/T1-B ISA support"
+ depends on ISA
+ help
+ Enable support for the AVM T1 T1B card.
+ Note: This is a PRI card and handle 30 B-channels.
+
+config ISDN_DRV_AVMB1_B1PCMCIA
+ tristate "AVM B1/M1/M2 PCMCIA support"
+ depends on PCMCIA
+ help
+ Enable support for the PCMCIA version of the AVM B1 card.
+
+config ISDN_DRV_AVMB1_AVM_CS
+ tristate "AVM B1/M1/M2 PCMCIA cs module"
+ depends on ISDN_DRV_AVMB1_B1PCMCIA
+ help
+ Enable the PCMCIA client driver for the AVM B1/M1/M2
+ PCMCIA cards.
+
+config ISDN_DRV_AVMB1_T1PCI
+ tristate "AVM T1/T1-B PCI support"
+ depends on PCI
+ help
+ Enable support for the AVM T1 T1B card.
+ Note: This is a PRI card and handle 30 B-channels.
+
+config ISDN_DRV_AVMB1_C4
+ tristate "AVM C4/C2 support"
+ depends on PCI
+ help
+ Enable support for the AVM C4/C2 PCI cards.
+ These cards handle 4/2 BRI ISDN lines (8/4 channels).
+
+endif # CAPI_AVM
diff --git a/drivers/staging/isdn/avm/Makefile b/drivers/staging/isdn/avm/Makefile
new file mode 100644
index 000000000000..3830a0573fcc
--- /dev/null
+++ b/drivers/staging/isdn/avm/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the AVM ISDN device drivers
+
+# Each configuration option enables a list of files.
+
+obj-$(CONFIG_ISDN_DRV_AVMB1_B1ISA) += b1isa.o b1.o
+obj-$(CONFIG_ISDN_DRV_AVMB1_B1PCI) += b1pci.o b1.o b1dma.o
+obj-$(CONFIG_ISDN_DRV_AVMB1_B1PCMCIA) += b1pcmcia.o b1.o
+obj-$(CONFIG_ISDN_DRV_AVMB1_AVM_CS) += avm_cs.o
+obj-$(CONFIG_ISDN_DRV_AVMB1_T1ISA) += t1isa.o b1.o
+obj-$(CONFIG_ISDN_DRV_AVMB1_T1PCI) += t1pci.o b1.o b1dma.o
+obj-$(CONFIG_ISDN_DRV_AVMB1_C4) += c4.o b1.o
diff --git a/drivers/staging/isdn/avm/avm_cs.c b/drivers/staging/isdn/avm/avm_cs.c
new file mode 100644
index 000000000000..62b8030ee331
--- /dev/null
+++ b/drivers/staging/isdn/avm/avm_cs.c
@@ -0,0 +1,166 @@
+/* $Id: avm_cs.c,v 1.4.6.3 2001/09/23 22:24:33 kai Exp $
+ *
+ * A PCMCIA client driver for AVM B1/M1/M2
+ *
+ * Copyright 1999 by Carsten Paeth <calle@calle.de>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/major.h>
+#include <asm/io.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cisreg.h>
+
+#include <linux/skbuff.h>
+#include <linux/capi.h>
+#include <linux/b1lli.h>
+#include <linux/b1pcmcia.h>
+
+/*====================================================================*/
+
+MODULE_DESCRIPTION("CAPI4Linux: PCMCIA client driver for AVM B1/M1/M2");
+MODULE_AUTHOR("Carsten Paeth");
+MODULE_LICENSE("GPL");
+
+/*====================================================================*/
+
+static int avmcs_config(struct pcmcia_device *link);
+static void avmcs_release(struct pcmcia_device *link);
+static void avmcs_detach(struct pcmcia_device *p_dev);
+
+static int avmcs_probe(struct pcmcia_device *p_dev)
+{
+ /* General socket configuration */
+ p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+ p_dev->config_index = 1;
+ p_dev->config_regs = PRESENT_OPTION;
+
+ return avmcs_config(p_dev);
+} /* avmcs_attach */
+
+
+static void avmcs_detach(struct pcmcia_device *link)
+{
+ avmcs_release(link);
+} /* avmcs_detach */
+
+static int avmcs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
+{
+ p_dev->resource[0]->end = 16;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+
+ return pcmcia_request_io(p_dev);
+}
+
+static int avmcs_config(struct pcmcia_device *link)
+{
+ int i = -1;
+ char devname[128];
+ int cardtype;
+ int (*addcard)(unsigned int port, unsigned irq);
+
+ devname[0] = 0;
+ if (link->prod_id[1])
+ strlcpy(devname, link->prod_id[1], sizeof(devname));
+
+ /*
+ * find IO port
+ */
+ if (pcmcia_loop_config(link, avmcs_configcheck, NULL))
+ return -ENODEV;
+
+ do {
+ if (!link->irq) {
+ /* undo */
+ pcmcia_disable_device(link);
+ break;
+ }
+
+ /*
+ * configure the PCMCIA socket
+ */
+ i = pcmcia_enable_device(link);
+ if (i != 0) {
+ pcmcia_disable_device(link);
+ break;
+ }
+
+ } while (0);
+
+ if (devname[0]) {
+ char *s = strrchr(devname, ' ');
+ if (!s)
+ s = devname;
+ else s++;
+ if (strcmp("M1", s) == 0) {
+ cardtype = AVM_CARDTYPE_M1;
+ } else if (strcmp("M2", s) == 0) {
+ cardtype = AVM_CARDTYPE_M2;
+ } else {
+ cardtype = AVM_CARDTYPE_B1;
+ }
+ } else
+ cardtype = AVM_CARDTYPE_B1;
+
+ /* If any step failed, release any partially configured state */
+ if (i != 0) {
+ avmcs_release(link);
+ return -ENODEV;
+ }
+
+
+ switch (cardtype) {
+ case AVM_CARDTYPE_M1: addcard = b1pcmcia_addcard_m1; break;
+ case AVM_CARDTYPE_M2: addcard = b1pcmcia_addcard_m2; break;
+ default:
+ case AVM_CARDTYPE_B1: addcard = b1pcmcia_addcard_b1; break;
+ }
+ if ((i = (*addcard)(link->resource[0]->start, link->irq)) < 0) {
+ dev_err(&link->dev,
+ "avm_cs: failed to add AVM-Controller at i/o %#x, irq %d\n",
+ (unsigned int) link->resource[0]->start, link->irq);
+ avmcs_release(link);
+ return -ENODEV;
+ }
+ return 0;
+
+} /* avmcs_config */
+
+
+static void avmcs_release(struct pcmcia_device *link)
+{
+ b1pcmcia_delcard(link->resource[0]->start, link->irq);
+ pcmcia_disable_device(link);
+} /* avmcs_release */
+
+
+static const struct pcmcia_device_id avmcs_ids[] = {
+ PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN-Controller B1", 0x95d42008, 0x845dc335),
+ PCMCIA_DEVICE_PROD_ID12("AVM", "Mobile ISDN-Controller M1", 0x95d42008, 0x81e10430),
+ PCMCIA_DEVICE_PROD_ID12("AVM", "Mobile ISDN-Controller M2", 0x95d42008, 0x18e8558a),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, avmcs_ids);
+
+static struct pcmcia_driver avmcs_driver = {
+ .owner = THIS_MODULE,
+ .name = "avm_cs",
+ .probe = avmcs_probe,
+ .remove = avmcs_detach,
+ .id_table = avmcs_ids,
+};
+module_pcmcia_driver(avmcs_driver);
diff --git a/drivers/staging/isdn/avm/avmcard.h b/drivers/staging/isdn/avm/avmcard.h
new file mode 100644
index 000000000000..cdfa89c71997
--- /dev/null
+++ b/drivers/staging/isdn/avm/avmcard.h
@@ -0,0 +1,581 @@
+/* $Id: avmcard.h,v 1.1.4.1.2.1 2001/12/21 15:00:17 kai Exp $
+ *
+ * Copyright 1999 by Carsten Paeth <calle@calle.de>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#ifndef _AVMCARD_H_
+#define _AVMCARD_H_
+
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+
+#define AVMB1_PORTLEN 0x1f
+#define AVM_MAXVERSION 8
+#define AVM_NCCI_PER_CHANNEL 4
+
+/*
+ * Versions
+ */
+
+#define VER_DRIVER 0
+#define VER_CARDTYPE 1
+#define VER_HWID 2
+#define VER_SERIAL 3
+#define VER_OPTION 4
+#define VER_PROTO 5
+#define VER_PROFILE 6
+#define VER_CAPI 7
+
+enum avmcardtype {
+ avm_b1isa,
+ avm_b1pci,
+ avm_b1pcmcia,
+ avm_m1,
+ avm_m2,
+ avm_t1isa,
+ avm_t1pci,
+ avm_c4,
+ avm_c2
+};
+
+typedef struct avmcard_dmabuf {
+ long size;
+ u8 *dmabuf;
+ dma_addr_t dmaaddr;
+} avmcard_dmabuf;
+
+typedef struct avmcard_dmainfo {
+ u32 recvlen;
+ avmcard_dmabuf recvbuf;
+
+ avmcard_dmabuf sendbuf;
+ struct sk_buff_head send_queue;
+
+ struct pci_dev *pcidev;
+} avmcard_dmainfo;
+
+typedef struct avmctrl_info {
+ char cardname[32];
+
+ int versionlen;
+ char versionbuf[1024];
+ char *version[AVM_MAXVERSION];
+
+ char infobuf[128]; /* for function procinfo */
+
+ struct avmcard *card;
+ struct capi_ctr capi_ctrl;
+
+ struct list_head ncci_head;
+} avmctrl_info;
+
+typedef struct avmcard {
+ char name[32];
+
+ spinlock_t lock;
+ unsigned int port;
+ unsigned irq;
+ unsigned long membase;
+ enum avmcardtype cardtype;
+ unsigned char revision;
+ unsigned char class;
+ int cardnr; /* for t1isa */
+
+ char msgbuf[128]; /* capimsg msg part */
+ char databuf[2048]; /* capimsg data part */
+
+ void __iomem *mbase;
+ volatile u32 csr;
+ avmcard_dmainfo *dma;
+
+ struct avmctrl_info *ctrlinfo;
+
+ u_int nr_controllers;
+ u_int nlogcontr;
+ struct list_head list;
+} avmcard;
+
+extern int b1_irq_table[16];
+
+/*
+ * LLI Messages to the ISDN-ControllerISDN Controller
+ */
+
+#define SEND_POLL 0x72 /*
+ * after load <- RECEIVE_POLL
+ */
+#define SEND_INIT 0x11 /*
+ * first message <- RECEIVE_INIT
+ * int32 NumApplications int32
+ * NumNCCIs int32 BoardNumber
+ */
+#define SEND_REGISTER 0x12 /*
+ * register an application int32
+ * ApplIDId int32 NumMessages
+ * int32 NumB3Connections int32
+ * NumB3Blocks int32 B3Size
+ *
+ * AnzB3Connection != 0 &&
+ * AnzB3Blocks >= 1 && B3Size >= 1
+ */
+#define SEND_RELEASE 0x14 /*
+ * deregister an application int32
+ * ApplID
+ */
+#define SEND_MESSAGE 0x15 /*
+ * send capi-message int32 length
+ * capi-data ...
+ */
+#define SEND_DATA_B3_REQ 0x13 /*
+ * send capi-data-message int32
+ * MsgLength capi-data ... int32
+ * B3Length data ....
+ */
+
+#define SEND_CONFIG 0x21 /*
+ */
+
+#define SEND_POLLACK 0x73 /* T1 Watchdog */
+
+/*
+ * LLI Messages from the ISDN-ControllerISDN Controller
+ */
+
+#define RECEIVE_POLL 0x32 /*
+ * <- after SEND_POLL
+ */
+#define RECEIVE_INIT 0x27 /*
+ * <- after SEND_INIT int32 length
+ * byte total length b1struct board
+ * driver revision b1struct card
+ * type b1struct reserved b1struct
+ * serial number b1struct driver
+ * capability b1struct d-channel
+ * protocol b1struct CAPI-2.0
+ * profile b1struct capi version
+ */
+#define RECEIVE_MESSAGE 0x21 /*
+ * <- after SEND_MESSAGE int32
+ * AppllID int32 Length capi-data
+ * ....
+ */
+#define RECEIVE_DATA_B3_IND 0x22 /*
+ * received data int32 AppllID
+ * int32 Length capi-data ...
+ * int32 B3Length data ...
+ */
+#define RECEIVE_START 0x23 /*
+ * Handshake
+ */
+#define RECEIVE_STOP 0x24 /*
+ * Handshake
+ */
+#define RECEIVE_NEW_NCCI 0x25 /*
+ * int32 AppllID int32 NCCI int32
+ * WindowSize
+ */
+#define RECEIVE_FREE_NCCI 0x26 /*
+ * int32 AppllID int32 NCCI
+ */
+#define RECEIVE_RELEASE 0x26 /*
+ * int32 AppllID int32 0xffffffff
+ */
+#define RECEIVE_TASK_READY 0x31 /*
+ * int32 tasknr
+ * int32 Length Taskname ...
+ */
+#define RECEIVE_DEBUGMSG 0x71 /*
+ * int32 Length message
+ *
+ */
+#define RECEIVE_POLLDWORD 0x75 /* t1pci in dword mode */
+
+#define WRITE_REGISTER 0x00
+#define READ_REGISTER 0x01
+
+/*
+ * port offsets
+ */
+
+#define B1_READ 0x00
+#define B1_WRITE 0x01
+#define B1_INSTAT 0x02
+#define B1_OUTSTAT 0x03
+#define B1_ANALYSE 0x04
+#define B1_REVISION 0x05
+#define B1_RESET 0x10
+
+
+#define B1_STAT0(cardtype) ((cardtype) == avm_m1 ? 0x81200000l : 0x80A00000l)
+#define B1_STAT1(cardtype) (0x80E00000l)
+
+/* ---------------------------------------------------------------- */
+
+static inline unsigned char b1outp(unsigned int base,
+ unsigned short offset,
+ unsigned char value)
+{
+ outb(value, base + offset);
+ return inb(base + B1_ANALYSE);
+}
+
+
+static inline int b1_rx_full(unsigned int base)
+{
+ return inb(base + B1_INSTAT) & 0x1;
+}
+
+static inline unsigned char b1_get_byte(unsigned int base)
+{
+ unsigned long stop = jiffies + 1 * HZ; /* maximum wait time 1 sec */
+ while (!b1_rx_full(base) && time_before(jiffies, stop));
+ if (b1_rx_full(base))
+ return inb(base + B1_READ);
+ printk(KERN_CRIT "b1lli(0x%x): rx not full after 1 second\n", base);
+ return 0;
+}
+
+static inline unsigned int b1_get_word(unsigned int base)
+{
+ unsigned int val = 0;
+ val |= b1_get_byte(base);
+ val |= (b1_get_byte(base) << 8);
+ val |= (b1_get_byte(base) << 16);
+ val |= (b1_get_byte(base) << 24);
+ return val;
+}
+
+static inline int b1_tx_empty(unsigned int base)
+{
+ return inb(base + B1_OUTSTAT) & 0x1;
+}
+
+static inline void b1_put_byte(unsigned int base, unsigned char val)
+{
+ while (!b1_tx_empty(base));
+ b1outp(base, B1_WRITE, val);
+}
+
+static inline int b1_save_put_byte(unsigned int base, unsigned char val)
+{
+ unsigned long stop = jiffies + 2 * HZ;
+ while (!b1_tx_empty(base) && time_before(jiffies, stop));
+ if (!b1_tx_empty(base)) return -1;
+ b1outp(base, B1_WRITE, val);
+ return 0;
+}
+
+static inline void b1_put_word(unsigned int base, unsigned int val)
+{
+ b1_put_byte(base, val & 0xff);
+ b1_put_byte(base, (val >> 8) & 0xff);
+ b1_put_byte(base, (val >> 16) & 0xff);
+ b1_put_byte(base, (val >> 24) & 0xff);
+}
+
+static inline unsigned int b1_get_slice(unsigned int base,
+ unsigned char *dp)
+{
+ unsigned int len, i;
+
+ len = i = b1_get_word(base);
+ while (i-- > 0) *dp++ = b1_get_byte(base);
+ return len;
+}
+
+static inline void b1_put_slice(unsigned int base,
+ unsigned char *dp, unsigned int len)
+{
+ unsigned i = len;
+ b1_put_word(base, i);
+ while (i-- > 0)
+ b1_put_byte(base, *dp++);
+}
+
+static void b1_wr_reg(unsigned int base,
+ unsigned int reg,
+ unsigned int value)
+{
+ b1_put_byte(base, WRITE_REGISTER);
+ b1_put_word(base, reg);
+ b1_put_word(base, value);
+}
+
+static inline unsigned int b1_rd_reg(unsigned int base,
+ unsigned int reg)
+{
+ b1_put_byte(base, READ_REGISTER);
+ b1_put_word(base, reg);
+ return b1_get_word(base);
+
+}
+
+static inline void b1_reset(unsigned int base)
+{
+ b1outp(base, B1_RESET, 0);
+ mdelay(55 * 2); /* 2 TIC's */
+
+ b1outp(base, B1_RESET, 1);
+ mdelay(55 * 2); /* 2 TIC's */
+
+ b1outp(base, B1_RESET, 0);
+ mdelay(55 * 2); /* 2 TIC's */
+}
+
+static inline unsigned char b1_disable_irq(unsigned int base)
+{
+ return b1outp(base, B1_INSTAT, 0x00);
+}
+
+/* ---------------------------------------------------------------- */
+
+static inline void b1_set_test_bit(unsigned int base,
+ enum avmcardtype cardtype,
+ int onoff)
+{
+ b1_wr_reg(base, B1_STAT0(cardtype), onoff ? 0x21 : 0x20);
+}
+
+static inline int b1_get_test_bit(unsigned int base,
+ enum avmcardtype cardtype)
+{
+ return (b1_rd_reg(base, B1_STAT0(cardtype)) & 0x01) != 0;
+}
+
+/* ---------------------------------------------------------------- */
+
+#define T1_FASTLINK 0x00
+#define T1_SLOWLINK 0x08
+
+#define T1_READ B1_READ
+#define T1_WRITE B1_WRITE
+#define T1_INSTAT B1_INSTAT
+#define T1_OUTSTAT B1_OUTSTAT
+#define T1_IRQENABLE 0x05
+#define T1_FIFOSTAT 0x06
+#define T1_RESETLINK 0x10
+#define T1_ANALYSE 0x11
+#define T1_IRQMASTER 0x12
+#define T1_IDENT 0x17
+#define T1_RESETBOARD 0x1f
+
+#define T1F_IREADY 0x01
+#define T1F_IHALF 0x02
+#define T1F_IFULL 0x04
+#define T1F_IEMPTY 0x08
+#define T1F_IFLAGS 0xF0
+
+#define T1F_OREADY 0x10
+#define T1F_OHALF 0x20
+#define T1F_OEMPTY 0x40
+#define T1F_OFULL 0x80
+#define T1F_OFLAGS 0xF0
+
+/* there are HEMA cards with 1k and 4k FIFO out */
+#define FIFO_OUTBSIZE 256
+#define FIFO_INPBSIZE 512
+
+#define HEMA_VERSION_ID 0
+#define HEMA_PAL_ID 0
+
+static inline void t1outp(unsigned int base,
+ unsigned short offset,
+ unsigned char value)
+{
+ outb(value, base + offset);
+}
+
+static inline unsigned char t1inp(unsigned int base,
+ unsigned short offset)
+{
+ return inb(base + offset);
+}
+
+static inline int t1_isfastlink(unsigned int base)
+{
+ return (inb(base + T1_IDENT) & ~0x82) == 1;
+}
+
+static inline unsigned char t1_fifostatus(unsigned int base)
+{
+ return inb(base + T1_FIFOSTAT);
+}
+
+static inline unsigned int t1_get_slice(unsigned int base,
+ unsigned char *dp)
+{
+ unsigned int len, i;
+#ifdef FASTLINK_DEBUG
+ unsigned wcnt = 0, bcnt = 0;
+#endif
+
+ len = i = b1_get_word(base);
+ if (t1_isfastlink(base)) {
+ int status;
+ while (i > 0) {
+ status = t1_fifostatus(base) & (T1F_IREADY | T1F_IHALF);
+ if (i >= FIFO_INPBSIZE) status |= T1F_IFULL;
+
+ switch (status) {
+ case T1F_IREADY | T1F_IHALF | T1F_IFULL:
+ insb(base + B1_READ, dp, FIFO_INPBSIZE);
+ dp += FIFO_INPBSIZE;
+ i -= FIFO_INPBSIZE;
+#ifdef FASTLINK_DEBUG
+ wcnt += FIFO_INPBSIZE;
+#endif
+ break;
+ case T1F_IREADY | T1F_IHALF:
+ insb(base + B1_READ, dp, i);
+#ifdef FASTLINK_DEBUG
+ wcnt += i;
+#endif
+ dp += i;
+ i = 0;
+ break;
+ default:
+ *dp++ = b1_get_byte(base);
+ i--;
+#ifdef FASTLINK_DEBUG
+ bcnt++;
+#endif
+ break;
+ }
+ }
+#ifdef FASTLINK_DEBUG
+ if (wcnt)
+ printk(KERN_DEBUG "b1lli(0x%x): get_slice l=%d w=%d b=%d\n",
+ base, len, wcnt, bcnt);
+#endif
+ } else {
+ while (i-- > 0)
+ *dp++ = b1_get_byte(base);
+ }
+ return len;
+}
+
+static inline void t1_put_slice(unsigned int base,
+ unsigned char *dp, unsigned int len)
+{
+ unsigned i = len;
+ b1_put_word(base, i);
+ if (t1_isfastlink(base)) {
+ int status;
+ while (i > 0) {
+ status = t1_fifostatus(base) & (T1F_OREADY | T1F_OHALF);
+ if (i >= FIFO_OUTBSIZE) status |= T1F_OEMPTY;
+ switch (status) {
+ case T1F_OREADY | T1F_OHALF | T1F_OEMPTY:
+ outsb(base + B1_WRITE, dp, FIFO_OUTBSIZE);
+ dp += FIFO_OUTBSIZE;
+ i -= FIFO_OUTBSIZE;
+ break;
+ case T1F_OREADY | T1F_OHALF:
+ outsb(base + B1_WRITE, dp, i);
+ dp += i;
+ i = 0;
+ break;
+ default:
+ b1_put_byte(base, *dp++);
+ i--;
+ break;
+ }
+ }
+ } else {
+ while (i-- > 0)
+ b1_put_byte(base, *dp++);
+ }
+}
+
+static inline void t1_disable_irq(unsigned int base)
+{
+ t1outp(base, T1_IRQMASTER, 0x00);
+}
+
+static inline void t1_reset(unsigned int base)
+{
+ /* reset T1 Controller */
+ b1_reset(base);
+ /* disable irq on HEMA */
+ t1outp(base, B1_INSTAT, 0x00);
+ t1outp(base, B1_OUTSTAT, 0x00);
+ t1outp(base, T1_IRQMASTER, 0x00);
+ /* reset HEMA board configuration */
+ t1outp(base, T1_RESETBOARD, 0xf);
+}
+
+static inline void b1_setinterrupt(unsigned int base, unsigned irq,
+ enum avmcardtype cardtype)
+{
+ switch (cardtype) {
+ case avm_t1isa:
+ t1outp(base, B1_INSTAT, 0x00);
+ t1outp(base, B1_INSTAT, 0x02);
+ t1outp(base, T1_IRQMASTER, 0x08);
+ break;
+ case avm_b1isa:
+ b1outp(base, B1_INSTAT, 0x00);
+ b1outp(base, B1_RESET, b1_irq_table[irq]);
+ b1outp(base, B1_INSTAT, 0x02);
+ break;
+ default:
+ case avm_m1:
+ case avm_m2:
+ case avm_b1pci:
+ b1outp(base, B1_INSTAT, 0x00);
+ b1outp(base, B1_RESET, 0xf0);
+ b1outp(base, B1_INSTAT, 0x02);
+ break;
+ case avm_c4:
+ case avm_t1pci:
+ b1outp(base, B1_RESET, 0xf0);
+ break;
+ }
+}
+
+/* b1.c */
+avmcard *b1_alloc_card(int nr_controllers);
+void b1_free_card(avmcard *card);
+int b1_detect(unsigned int base, enum avmcardtype cardtype);
+void b1_getrevision(avmcard *card);
+int b1_load_t4file(avmcard *card, capiloaddatapart *t4file);
+int b1_load_config(avmcard *card, capiloaddatapart *config);
+int b1_loaded(avmcard *card);
+
+int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data);
+void b1_reset_ctr(struct capi_ctr *ctrl);
+void b1_register_appl(struct capi_ctr *ctrl, u16 appl,
+ capi_register_params *rp);
+void b1_release_appl(struct capi_ctr *ctrl, u16 appl);
+u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
+void b1_parse_version(avmctrl_info *card);
+irqreturn_t b1_interrupt(int interrupt, void *devptr);
+
+int b1_proc_show(struct seq_file *m, void *v);
+
+avmcard_dmainfo *avmcard_dma_alloc(char *name, struct pci_dev *,
+ long rsize, long ssize);
+void avmcard_dma_free(avmcard_dmainfo *);
+
+/* b1dma.c */
+int b1pciv4_detect(avmcard *card);
+int t1pci_detect(avmcard *card);
+void b1dma_reset(avmcard *card);
+irqreturn_t b1dma_interrupt(int interrupt, void *devptr);
+
+int b1dma_load_firmware(struct capi_ctr *ctrl, capiloaddata *data);
+void b1dma_reset_ctr(struct capi_ctr *ctrl);
+void b1dma_remove_ctr(struct capi_ctr *ctrl);
+void b1dma_register_appl(struct capi_ctr *ctrl,
+ u16 appl,
+ capi_register_params *rp);
+void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl);
+u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
+int b1dma_proc_show(struct seq_file *m, void *v);
+
+#endif /* _AVMCARD_H_ */
diff --git a/drivers/staging/isdn/avm/b1.c b/drivers/staging/isdn/avm/b1.c
new file mode 100644
index 000000000000..40ca1e8fa09f
--- /dev/null
+++ b/drivers/staging/isdn/avm/b1.c
@@ -0,0 +1,804 @@
+/* $Id: b1.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
+ *
+ * Common module for AVM B1 cards.
+ *
+ * Copyright 1999 by Carsten Paeth <calle@calle.de>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/capi.h>
+#include <linux/kernelcapi.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/netdevice.h>
+#include <linux/isdn/capilli.h>
+#include "avmcard.h"
+#include <linux/isdn/capicmd.h>
+#include <linux/isdn/capiutil.h>
+
+static char *revision = "$Revision: 1.1.2.2 $";
+
+/* ------------------------------------------------------------- */
+
+MODULE_DESCRIPTION("CAPI4Linux: Common support for active AVM cards");
+MODULE_AUTHOR("Carsten Paeth");
+MODULE_LICENSE("GPL");
+
+/* ------------------------------------------------------------- */
+
+int b1_irq_table[16] =
+{0,
+ 0,
+ 0,
+ 192, /* irq 3 */
+ 32, /* irq 4 */
+ 160, /* irq 5 */
+ 96, /* irq 6 */
+ 224, /* irq 7 */
+ 0,
+ 64, /* irq 9 */
+ 80, /* irq 10 */
+ 208, /* irq 11 */
+ 48, /* irq 12 */
+ 0,
+ 0,
+ 112, /* irq 15 */
+};
+
+/* ------------------------------------------------------------- */
+
+avmcard *b1_alloc_card(int nr_controllers)
+{
+ avmcard *card;
+ avmctrl_info *cinfo;
+ int i;
+
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return NULL;
+
+ cinfo = kcalloc(nr_controllers, sizeof(*cinfo), GFP_KERNEL);
+ if (!cinfo) {
+ kfree(card);
+ return NULL;
+ }
+
+ card->ctrlinfo = cinfo;
+ for (i = 0; i < nr_controllers; i++) {
+ INIT_LIST_HEAD(&cinfo[i].ncci_head);
+ cinfo[i].card = card;
+ }
+ spin_lock_init(&card->lock);
+ card->nr_controllers = nr_controllers;
+
+ return card;
+}
+
+/* ------------------------------------------------------------- */
+
+void b1_free_card(avmcard *card)
+{
+ kfree(card->ctrlinfo);
+ kfree(card);
+}
+
+/* ------------------------------------------------------------- */
+
+int b1_detect(unsigned int base, enum avmcardtype cardtype)
+{
+ int onoff, i;
+
+ /*
+ * Statusregister 0000 00xx
+ */
+ if ((inb(base + B1_INSTAT) & 0xfc)
+ || (inb(base + B1_OUTSTAT) & 0xfc))
+ return 1;
+ /*
+ * Statusregister 0000 001x
+ */
+ b1outp(base, B1_INSTAT, 0x2); /* enable irq */
+ /* b1outp(base, B1_OUTSTAT, 0x2); */
+ if ((inb(base + B1_INSTAT) & 0xfe) != 0x2
+ /* || (inb(base + B1_OUTSTAT) & 0xfe) != 0x2 */)
+ return 2;
+ /*
+ * Statusregister 0000 000x
+ */
+ b1outp(base, B1_INSTAT, 0x0); /* disable irq */
+ b1outp(base, B1_OUTSTAT, 0x0);
+ if ((inb(base + B1_INSTAT) & 0xfe)
+ || (inb(base + B1_OUTSTAT) & 0xfe))
+ return 3;
+
+ for (onoff = !0, i = 0; i < 10; i++) {
+ b1_set_test_bit(base, cardtype, onoff);
+ if (b1_get_test_bit(base, cardtype) != onoff)
+ return 4;
+ onoff = !onoff;
+ }
+
+ if (cardtype == avm_m1)
+ return 0;
+
+ if ((b1_rd_reg(base, B1_STAT1(cardtype)) & 0x0f) != 0x01)
+ return 5;
+
+ return 0;
+}
+
+void b1_getrevision(avmcard *card)
+{
+ card->class = inb(card->port + B1_ANALYSE);
+ card->revision = inb(card->port + B1_REVISION);
+}
+
+#define FWBUF_SIZE 256
+int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
+{
+ unsigned char buf[FWBUF_SIZE];
+ unsigned char *dp;
+ int i, left;
+ unsigned int base = card->port;
+
+ dp = t4file->data;
+ left = t4file->len;
+ while (left > FWBUF_SIZE) {
+ if (t4file->user) {
+ if (copy_from_user(buf, dp, FWBUF_SIZE))
+ return -EFAULT;
+ } else {
+ memcpy(buf, dp, FWBUF_SIZE);
+ }
+ for (i = 0; i < FWBUF_SIZE; i++)
+ if (b1_save_put_byte(base, buf[i]) < 0) {
+ printk(KERN_ERR "%s: corrupted firmware file ?\n",
+ card->name);
+ return -EIO;
+ }
+ left -= FWBUF_SIZE;
+ dp += FWBUF_SIZE;
+ }
+ if (left) {
+ if (t4file->user) {
+ if (copy_from_user(buf, dp, left))
+ return -EFAULT;
+ } else {
+ memcpy(buf, dp, left);
+ }
+ for (i = 0; i < left; i++)
+ if (b1_save_put_byte(base, buf[i]) < 0) {
+ printk(KERN_ERR "%s: corrupted firmware file ?\n",
+ card->name);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+int b1_load_config(avmcard *card, capiloaddatapart *config)
+{
+ unsigned char buf[FWBUF_SIZE];
+ unsigned char *dp;
+ unsigned int base = card->port;
+ int i, j, left;
+
+ dp = config->data;
+ left = config->len;
+ if (left) {
+ b1_put_byte(base, SEND_CONFIG);
+ b1_put_word(base, 1);
+ b1_put_byte(base, SEND_CONFIG);
+ b1_put_word(base, left);
+ }
+ while (left > FWBUF_SIZE) {
+ if (config->user) {
+ if (copy_from_user(buf, dp, FWBUF_SIZE))
+ return -EFAULT;
+ } else {
+ memcpy(buf, dp, FWBUF_SIZE);
+ }
+ for (i = 0; i < FWBUF_SIZE; ) {
+ b1_put_byte(base, SEND_CONFIG);
+ for (j = 0; j < 4; j++) {
+ b1_put_byte(base, buf[i++]);
+ }
+ }
+ left -= FWBUF_SIZE;
+ dp += FWBUF_SIZE;
+ }
+ if (left) {
+ if (config->user) {
+ if (copy_from_user(buf, dp, left))
+ return -EFAULT;
+ } else {
+ memcpy(buf, dp, left);
+ }
+ for (i = 0; i < left; ) {
+ b1_put_byte(base, SEND_CONFIG);
+ for (j = 0; j < 4; j++) {
+ if (i < left)
+ b1_put_byte(base, buf[i++]);
+ else
+ b1_put_byte(base, 0);
+ }
+ }
+ }
+ return 0;
+}
+
+int b1_loaded(avmcard *card)
+{
+ unsigned int base = card->port;
+ unsigned long stop;
+ unsigned char ans;
+ unsigned long tout = 2;
+
+ for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
+ if (b1_tx_empty(base))
+ break;
+ }
+ if (!b1_tx_empty(base)) {
+ printk(KERN_ERR "%s: b1_loaded: tx err, corrupted t4 file ?\n",
+ card->name);
+ return 0;
+ }
+ b1_put_byte(base, SEND_POLL);
+ for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
+ if (b1_rx_full(base)) {
+ if ((ans = b1_get_byte(base)) == RECEIVE_POLL) {
+ return 1;
+ }
+ printk(KERN_ERR "%s: b1_loaded: got 0x%x, firmware not running\n",
+ card->name, ans);
+ return 0;
+ }
+ }
+ printk(KERN_ERR "%s: b1_loaded: firmware not running\n", card->name);
+ return 0;
+}
+
+/* ------------------------------------------------------------- */
+
+int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ unsigned int port = card->port;
+ unsigned long flags;
+ int retval;
+
+ b1_reset(port);
+
+ if ((retval = b1_load_t4file(card, &data->firmware))) {
+ b1_reset(port);
+ printk(KERN_ERR "%s: failed to load t4file!!\n",
+ card->name);
+ return retval;
+ }
+
+ b1_disable_irq(port);
+
+ if (data->configuration.len > 0 && data->configuration.data) {
+ if ((retval = b1_load_config(card, &data->configuration))) {
+ b1_reset(port);
+ printk(KERN_ERR "%s: failed to load config!!\n",
+ card->name);
+ return retval;
+ }
+ }
+
+ if (!b1_loaded(card)) {
+ printk(KERN_ERR "%s: failed to load t4file.\n", card->name);
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&card->lock, flags);
+ b1_setinterrupt(port, card->irq, card->cardtype);
+ b1_put_byte(port, SEND_INIT);
+ b1_put_word(port, CAPI_MAXAPPL);
+ b1_put_word(port, AVM_NCCI_PER_CHANNEL * 2);
+ b1_put_word(port, ctrl->cnr - 1);
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ return 0;
+}
+
+void b1_reset_ctr(struct capi_ctr *ctrl)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ unsigned int port = card->port;
+ unsigned long flags;
+
+ b1_reset(port);
+ b1_reset(port);
+
+ memset(cinfo->version, 0, sizeof(cinfo->version));
+ spin_lock_irqsave(&card->lock, flags);
+ capilib_release(&cinfo->ncci_head);
+ spin_unlock_irqrestore(&card->lock, flags);
+ capi_ctr_down(ctrl);
+}
+
+void b1_register_appl(struct capi_ctr *ctrl,
+ u16 appl,
+ capi_register_params *rp)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ unsigned int port = card->port;
+ unsigned long flags;
+ int nconn, want = rp->level3cnt;
+
+ if (want > 0) nconn = want;
+ else nconn = ctrl->profile.nbchannel * -want;
+ if (nconn == 0) nconn = ctrl->profile.nbchannel;
+
+ spin_lock_irqsave(&card->lock, flags);
+ b1_put_byte(port, SEND_REGISTER);
+ b1_put_word(port, appl);
+ b1_put_word(port, 1024 * (nconn + 1));
+ b1_put_word(port, nconn);
+ b1_put_word(port, rp->datablkcnt);
+ b1_put_word(port, rp->datablklen);
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
+void b1_release_appl(struct capi_ctr *ctrl, u16 appl)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ unsigned int port = card->port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ capilib_release_appl(&cinfo->ncci_head, appl);
+ b1_put_byte(port, SEND_RELEASE);
+ b1_put_word(port, appl);
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
+u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ unsigned int port = card->port;
+ unsigned long flags;
+ u16 len = CAPIMSG_LEN(skb->data);
+ u8 cmd = CAPIMSG_COMMAND(skb->data);
+ u8 subcmd = CAPIMSG_SUBCOMMAND(skb->data);
+ u16 dlen, retval;
+
+ spin_lock_irqsave(&card->lock, flags);
+ if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
+ retval = capilib_data_b3_req(&cinfo->ncci_head,
+ CAPIMSG_APPID(skb->data),
+ CAPIMSG_NCCI(skb->data),
+ CAPIMSG_MSGID(skb->data));
+ if (retval != CAPI_NOERROR) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ return retval;
+ }
+
+ dlen = CAPIMSG_DATALEN(skb->data);
+
+ b1_put_byte(port, SEND_DATA_B3_REQ);
+ b1_put_slice(port, skb->data, len);
+ b1_put_slice(port, skb->data + len, dlen);
+ } else {
+ b1_put_byte(port, SEND_MESSAGE);
+ b1_put_slice(port, skb->data, len);
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ dev_kfree_skb_any(skb);
+ return CAPI_NOERROR;
+}
+
+/* ------------------------------------------------------------- */
+
+void b1_parse_version(avmctrl_info *cinfo)
+{
+ struct capi_ctr *ctrl = &cinfo->capi_ctrl;
+ avmcard *card = cinfo->card;
+ capi_profile *profp;
+ u8 *dversion;
+ u8 flag;
+ int i, j;
+
+ for (j = 0; j < AVM_MAXVERSION; j++)
+ cinfo->version[j] = "";
+ for (i = 0, j = 0;
+ j < AVM_MAXVERSION && i < cinfo->versionlen;
+ j++, i += cinfo->versionbuf[i] + 1)
+ cinfo->version[j] = &cinfo->versionbuf[i + 1];
+
+ strlcpy(ctrl->serial, cinfo->version[VER_SERIAL], sizeof(ctrl->serial));
+ memcpy(&ctrl->profile, cinfo->version[VER_PROFILE], sizeof(capi_profile));
+ strlcpy(ctrl->manu, "AVM GmbH", sizeof(ctrl->manu));
+ dversion = cinfo->version[VER_DRIVER];
+ ctrl->version.majorversion = 2;
+ ctrl->version.minorversion = 0;
+ ctrl->version.majormanuversion = (((dversion[0] - '0') & 0xf) << 4);
+ ctrl->version.majormanuversion |= ((dversion[2] - '0') & 0xf);
+ ctrl->version.minormanuversion = (dversion[3] - '0') << 4;
+ ctrl->version.minormanuversion |=
+ (dversion[5] - '0') * 10 + ((dversion[6] - '0') & 0xf);
+
+ profp = &ctrl->profile;
+
+ flag = ((u8 *)(profp->manu))[1];
+ switch (flag) {
+ case 0: if (cinfo->version[VER_CARDTYPE])
+ strcpy(cinfo->cardname, cinfo->version[VER_CARDTYPE]);
+ else strcpy(cinfo->cardname, "B1");
+ break;
+ case 3: strcpy(cinfo->cardname, "PCMCIA B"); break;
+ case 4: strcpy(cinfo->cardname, "PCMCIA M1"); break;
+ case 5: strcpy(cinfo->cardname, "PCMCIA M2"); break;
+ case 6: strcpy(cinfo->cardname, "B1 V3.0"); break;
+ case 7: strcpy(cinfo->cardname, "B1 PCI"); break;
+ default: sprintf(cinfo->cardname, "AVM?%u", (unsigned int)flag); break;
+ }
+ printk(KERN_NOTICE "%s: card %d \"%s\" ready.\n",
+ card->name, ctrl->cnr, cinfo->cardname);
+
+ flag = ((u8 *)(profp->manu))[3];
+ if (flag)
+ printk(KERN_NOTICE "%s: card %d Protocol:%s%s%s%s%s%s%s\n",
+ card->name,
+ ctrl->cnr,
+ (flag & 0x01) ? " DSS1" : "",
+ (flag & 0x02) ? " CT1" : "",
+ (flag & 0x04) ? " VN3" : "",
+ (flag & 0x08) ? " NI1" : "",
+ (flag & 0x10) ? " AUSTEL" : "",
+ (flag & 0x20) ? " ESS" : "",
+ (flag & 0x40) ? " 1TR6" : ""
+ );
+
+ flag = ((u8 *)(profp->manu))[5];
+ if (flag)
+ printk(KERN_NOTICE "%s: card %d Linetype:%s%s%s%s\n",
+ card->name,
+ ctrl->cnr,
+ (flag & 0x01) ? " point to point" : "",
+ (flag & 0x02) ? " point to multipoint" : "",
+ (flag & 0x08) ? " leased line without D-channel" : "",
+ (flag & 0x04) ? " leased line with D-channel" : ""
+ );
+}
+
+/* ------------------------------------------------------------- */
+
+irqreturn_t b1_interrupt(int interrupt, void *devptr)
+{
+ avmcard *card = devptr;
+ avmctrl_info *cinfo = &card->ctrlinfo[0];
+ struct capi_ctr *ctrl = &cinfo->capi_ctrl;
+ unsigned char b1cmd;
+ struct sk_buff *skb;
+
+ unsigned ApplId;
+ unsigned MsgLen;
+ unsigned DataB3Len;
+ unsigned NCCI;
+ unsigned WindowSize;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ if (!b1_rx_full(card->port)) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ return IRQ_NONE;
+ }
+
+ b1cmd = b1_get_byte(card->port);
+
+ switch (b1cmd) {
+
+ case RECEIVE_DATA_B3_IND:
+
+ ApplId = (unsigned) b1_get_word(card->port);
+ MsgLen = b1_get_slice(card->port, card->msgbuf);
+ DataB3Len = b1_get_slice(card->port, card->databuf);
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ if (MsgLen < 30) { /* not CAPI 64Bit */
+ memset(card->msgbuf + MsgLen, 0, 30-MsgLen);
+ MsgLen = 30;
+ CAPIMSG_SETLEN(card->msgbuf, 30);
+ }
+ if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
+ printk(KERN_ERR "%s: incoming packet dropped\n",
+ card->name);
+ } else {
+ skb_put_data(skb, card->msgbuf, MsgLen);
+ skb_put_data(skb, card->databuf, DataB3Len);
+ capi_ctr_handle_message(ctrl, ApplId, skb);
+ }
+ break;
+
+ case RECEIVE_MESSAGE:
+
+ ApplId = (unsigned) b1_get_word(card->port);
+ MsgLen = b1_get_slice(card->port, card->msgbuf);
+ if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
+ printk(KERN_ERR "%s: incoming packet dropped\n",
+ card->name);
+ spin_unlock_irqrestore(&card->lock, flags);
+ } else {
+ skb_put_data(skb, card->msgbuf, MsgLen);
+ if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF)
+ capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
+ CAPIMSG_NCCI(skb->data),
+ CAPIMSG_MSGID(skb->data));
+ spin_unlock_irqrestore(&card->lock, flags);
+ capi_ctr_handle_message(ctrl, ApplId, skb);
+ }
+ break;
+
+ case RECEIVE_NEW_NCCI:
+
+ ApplId = b1_get_word(card->port);
+ NCCI = b1_get_word(card->port);
+ WindowSize = b1_get_word(card->port);
+ capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize);
+ spin_unlock_irqrestore(&card->lock, flags);
+ break;
+
+ case RECEIVE_FREE_NCCI:
+
+ ApplId = b1_get_word(card->port);
+ NCCI = b1_get_word(card->port);
+ if (NCCI != 0xffffffff)
+ capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI);
+ spin_unlock_irqrestore(&card->lock, flags);
+ break;
+
+ case RECEIVE_START:
+ /* b1_put_byte(card->port, SEND_POLLACK); */
+ spin_unlock_irqrestore(&card->lock, flags);
+ capi_ctr_resume_output(ctrl);
+ break;
+
+ case RECEIVE_STOP:
+ spin_unlock_irqrestore(&card->lock, flags);
+ capi_ctr_suspend_output(ctrl);
+ break;
+
+ case RECEIVE_INIT:
+
+ cinfo->versionlen = b1_get_slice(card->port, cinfo->versionbuf);
+ spin_unlock_irqrestore(&card->lock, flags);
+ b1_parse_version(cinfo);
+ printk(KERN_INFO "%s: %s-card (%s) now active\n",
+ card->name,
+ cinfo->version[VER_CARDTYPE],
+ cinfo->version[VER_DRIVER]);
+ capi_ctr_ready(ctrl);
+ break;
+
+ case RECEIVE_TASK_READY:
+ ApplId = (unsigned) b1_get_word(card->port);
+ MsgLen = b1_get_slice(card->port, card->msgbuf);
+ spin_unlock_irqrestore(&card->lock, flags);
+ card->msgbuf[MsgLen] = 0;
+ while (MsgLen > 0
+ && (card->msgbuf[MsgLen - 1] == '\n'
+ || card->msgbuf[MsgLen - 1] == '\r')) {
+ card->msgbuf[MsgLen - 1] = 0;
+ MsgLen--;
+ }
+ printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
+ card->name, ApplId, card->msgbuf);
+ break;
+
+ case RECEIVE_DEBUGMSG:
+ MsgLen = b1_get_slice(card->port, card->msgbuf);
+ spin_unlock_irqrestore(&card->lock, flags);
+ card->msgbuf[MsgLen] = 0;
+ while (MsgLen > 0
+ && (card->msgbuf[MsgLen - 1] == '\n'
+ || card->msgbuf[MsgLen - 1] == '\r')) {
+ card->msgbuf[MsgLen - 1] = 0;
+ MsgLen--;
+ }
+ printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
+ break;
+
+ case 0xff:
+ spin_unlock_irqrestore(&card->lock, flags);
+ printk(KERN_ERR "%s: card removed ?\n", card->name);
+ return IRQ_NONE;
+ default:
+ spin_unlock_irqrestore(&card->lock, flags);
+ printk(KERN_ERR "%s: b1_interrupt: 0x%x ???\n",
+ card->name, b1cmd);
+ return IRQ_HANDLED;
+ }
+ return IRQ_HANDLED;
+}
+
+/* ------------------------------------------------------------- */
+int b1_proc_show(struct seq_file *m, void *v)
+{
+ struct capi_ctr *ctrl = m->private;
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ u8 flag;
+ char *s;
+
+ seq_printf(m, "%-16s %s\n", "name", card->name);
+ seq_printf(m, "%-16s 0x%x\n", "io", card->port);
+ seq_printf(m, "%-16s %d\n", "irq", card->irq);
+ switch (card->cardtype) {
+ case avm_b1isa: s = "B1 ISA"; break;
+ case avm_b1pci: s = "B1 PCI"; break;
+ case avm_b1pcmcia: s = "B1 PCMCIA"; break;
+ case avm_m1: s = "M1"; break;
+ case avm_m2: s = "M2"; break;
+ case avm_t1isa: s = "T1 ISA (HEMA)"; break;
+ case avm_t1pci: s = "T1 PCI"; break;
+ case avm_c4: s = "C4"; break;
+ case avm_c2: s = "C2"; break;
+ default: s = "???"; break;
+ }
+ seq_printf(m, "%-16s %s\n", "type", s);
+ if (card->cardtype == avm_t1isa)
+ seq_printf(m, "%-16s %d\n", "cardnr", card->cardnr);
+ if ((s = cinfo->version[VER_DRIVER]) != NULL)
+ seq_printf(m, "%-16s %s\n", "ver_driver", s);
+ if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
+ seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
+ if ((s = cinfo->version[VER_SERIAL]) != NULL)
+ seq_printf(m, "%-16s %s\n", "ver_serial", s);
+
+ if (card->cardtype != avm_m1) {
+ flag = ((u8 *)(ctrl->profile.manu))[3];
+ if (flag)
+ seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
+ "protocol",
+ (flag & 0x01) ? " DSS1" : "",
+ (flag & 0x02) ? " CT1" : "",
+ (flag & 0x04) ? " VN3" : "",
+ (flag & 0x08) ? " NI1" : "",
+ (flag & 0x10) ? " AUSTEL" : "",
+ (flag & 0x20) ? " ESS" : "",
+ (flag & 0x40) ? " 1TR6" : ""
+ );
+ }
+ if (card->cardtype != avm_m1) {
+ flag = ((u8 *)(ctrl->profile.manu))[5];
+ if (flag)
+ seq_printf(m, "%-16s%s%s%s%s\n",
+ "linetype",
+ (flag & 0x01) ? " point to point" : "",
+ (flag & 0x02) ? " point to multipoint" : "",
+ (flag & 0x08) ? " leased line without D-channel" : "",
+ (flag & 0x04) ? " leased line with D-channel" : ""
+ );
+ }
+ seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
+
+ return 0;
+}
+EXPORT_SYMBOL(b1_proc_show);
+
+/* ------------------------------------------------------------- */
+
+#ifdef CONFIG_PCI
+
+avmcard_dmainfo *
+avmcard_dma_alloc(char *name, struct pci_dev *pdev, long rsize, long ssize)
+{
+ avmcard_dmainfo *p;
+ void *buf;
+
+ p = kzalloc(sizeof(avmcard_dmainfo), GFP_KERNEL);
+ if (!p) {
+ printk(KERN_WARNING "%s: no memory.\n", name);
+ goto err;
+ }
+
+ p->recvbuf.size = rsize;
+ buf = pci_alloc_consistent(pdev, rsize, &p->recvbuf.dmaaddr);
+ if (!buf) {
+ printk(KERN_WARNING "%s: allocation of receive dma buffer failed.\n", name);
+ goto err_kfree;
+ }
+ p->recvbuf.dmabuf = buf;
+
+ p->sendbuf.size = ssize;
+ buf = pci_alloc_consistent(pdev, ssize, &p->sendbuf.dmaaddr);
+ if (!buf) {
+ printk(KERN_WARNING "%s: allocation of send dma buffer failed.\n", name);
+ goto err_free_consistent;
+ }
+
+ p->sendbuf.dmabuf = buf;
+ skb_queue_head_init(&p->send_queue);
+
+ return p;
+
+err_free_consistent:
+ pci_free_consistent(p->pcidev, p->recvbuf.size,
+ p->recvbuf.dmabuf, p->recvbuf.dmaaddr);
+err_kfree:
+ kfree(p);
+err:
+ return NULL;
+}
+
+void avmcard_dma_free(avmcard_dmainfo *p)
+{
+ pci_free_consistent(p->pcidev, p->recvbuf.size,
+ p->recvbuf.dmabuf, p->recvbuf.dmaaddr);
+ pci_free_consistent(p->pcidev, p->sendbuf.size,
+ p->sendbuf.dmabuf, p->sendbuf.dmaaddr);
+ skb_queue_purge(&p->send_queue);
+ kfree(p);
+}
+
+EXPORT_SYMBOL(avmcard_dma_alloc);
+EXPORT_SYMBOL(avmcard_dma_free);
+
+#endif
+
+EXPORT_SYMBOL(b1_irq_table);
+
+EXPORT_SYMBOL(b1_alloc_card);
+EXPORT_SYMBOL(b1_free_card);
+EXPORT_SYMBOL(b1_detect);
+EXPORT_SYMBOL(b1_getrevision);
+EXPORT_SYMBOL(b1_load_t4file);
+EXPORT_SYMBOL(b1_load_config);
+EXPORT_SYMBOL(b1_loaded);
+EXPORT_SYMBOL(b1_load_firmware);
+EXPORT_SYMBOL(b1_reset_ctr);
+EXPORT_SYMBOL(b1_register_appl);
+EXPORT_SYMBOL(b1_release_appl);
+EXPORT_SYMBOL(b1_send_message);
+
+EXPORT_SYMBOL(b1_parse_version);
+EXPORT_SYMBOL(b1_interrupt);
+
+static int __init b1_init(void)
+{
+ char *p;
+ char rev[32];
+
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
+ strlcpy(rev, p + 2, 32);
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
+ *(p - 1) = 0;
+ } else
+ strcpy(rev, "1.0");
+
+ printk(KERN_INFO "b1: revision %s\n", rev);
+
+ return 0;
+}
+
+static void __exit b1_exit(void)
+{
+}
+
+module_init(b1_init);
+module_exit(b1_exit);
diff --git a/drivers/staging/isdn/avm/b1dma.c b/drivers/staging/isdn/avm/b1dma.c
new file mode 100644
index 000000000000..6a3dc9937ce5
--- /dev/null
+++ b/drivers/staging/isdn/avm/b1dma.c
@@ -0,0 +1,981 @@
+/* $Id: b1dma.c,v 1.1.2.3 2004/02/10 01:07:12 keil Exp $
+ *
+ * Common module for AVM B1 cards that support dma with AMCC
+ *
+ * Copyright 2000 by Carsten Paeth <calle@calle.de>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/capi.h>
+#include <linux/kernelcapi.h>
+#include <linux/gfp.h>
+#include <asm/io.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/netdevice.h>
+#include <linux/isdn/capilli.h>
+#include "avmcard.h"
+#include <linux/isdn/capicmd.h>
+#include <linux/isdn/capiutil.h>
+
+static char *revision = "$Revision: 1.1.2.3 $";
+
+#undef AVM_B1DMA_DEBUG
+
+/* ------------------------------------------------------------- */
+
+MODULE_DESCRIPTION("CAPI4Linux: DMA support for active AVM cards");
+MODULE_AUTHOR("Carsten Paeth");
+MODULE_LICENSE("GPL");
+
+static bool suppress_pollack = 0;
+module_param(suppress_pollack, bool, 0);
+
+/* ------------------------------------------------------------- */
+
+static void b1dma_dispatch_tx(avmcard *card);
+
+/* ------------------------------------------------------------- */
+
+/* S5933 */
+
+#define AMCC_RXPTR 0x24
+#define AMCC_RXLEN 0x28
+#define AMCC_TXPTR 0x2c
+#define AMCC_TXLEN 0x30
+
+#define AMCC_INTCSR 0x38
+# define EN_READ_TC_INT 0x00008000L
+# define EN_WRITE_TC_INT 0x00004000L
+# define EN_TX_TC_INT EN_READ_TC_INT
+# define EN_RX_TC_INT EN_WRITE_TC_INT
+# define AVM_FLAG 0x30000000L
+
+# define ANY_S5933_INT 0x00800000L
+# define READ_TC_INT 0x00080000L
+# define WRITE_TC_INT 0x00040000L
+# define TX_TC_INT READ_TC_INT
+# define RX_TC_INT WRITE_TC_INT
+# define MASTER_ABORT_INT 0x00100000L
+# define TARGET_ABORT_INT 0x00200000L
+# define BUS_MASTER_INT 0x00200000L
+# define ALL_INT 0x000C0000L
+
+#define AMCC_MCSR 0x3c
+# define A2P_HI_PRIORITY 0x00000100L
+# define EN_A2P_TRANSFERS 0x00000400L
+# define P2A_HI_PRIORITY 0x00001000L
+# define EN_P2A_TRANSFERS 0x00004000L
+# define RESET_A2P_FLAGS 0x04000000L
+# define RESET_P2A_FLAGS 0x02000000L
+
+/* ------------------------------------------------------------- */
+
+static inline void b1dma_writel(avmcard *card, u32 value, int off)
+{
+ writel(value, card->mbase + off);
+}
+
+static inline u32 b1dma_readl(avmcard *card, int off)
+{
+ return readl(card->mbase + off);
+}
+
+/* ------------------------------------------------------------- */
+
+static inline int b1dma_tx_empty(unsigned int port)
+{
+ return inb(port + 0x03) & 0x1;
+}
+
+static inline int b1dma_rx_full(unsigned int port)
+{
+ return inb(port + 0x02) & 0x1;
+}
+
+static int b1dma_tolink(avmcard *card, void *buf, unsigned int len)
+{
+ unsigned long stop = jiffies + 1 * HZ; /* maximum wait time 1 sec */
+ unsigned char *s = (unsigned char *)buf;
+ while (len--) {
+ while (!b1dma_tx_empty(card->port)
+ && time_before(jiffies, stop));
+ if (!b1dma_tx_empty(card->port))
+ return -1;
+ t1outp(card->port, 0x01, *s++);
+ }
+ return 0;
+}
+
+static int b1dma_fromlink(avmcard *card, void *buf, unsigned int len)
+{
+ unsigned long stop = jiffies + 1 * HZ; /* maximum wait time 1 sec */
+ unsigned char *s = (unsigned char *)buf;
+ while (len--) {
+ while (!b1dma_rx_full(card->port)
+ && time_before(jiffies, stop));
+ if (!b1dma_rx_full(card->port))
+ return -1;
+ *s++ = t1inp(card->port, 0x00);
+ }
+ return 0;
+}
+
+static int WriteReg(avmcard *card, u32 reg, u8 val)
+{
+ u8 cmd = 0x00;
+ if (b1dma_tolink(card, &cmd, 1) == 0
+ && b1dma_tolink(card, &reg, 4) == 0) {
+ u32 tmp = val;
+ return b1dma_tolink(card, &tmp, 4);
+ }
+ return -1;
+}
+
+static u8 ReadReg(avmcard *card, u32 reg)
+{
+ u8 cmd = 0x01;
+ if (b1dma_tolink(card, &cmd, 1) == 0
+ && b1dma_tolink(card, &reg, 4) == 0) {
+ u32 tmp;
+ if (b1dma_fromlink(card, &tmp, 4) == 0)
+ return (u8)tmp;
+ }
+ return 0xff;
+}
+
+/* ------------------------------------------------------------- */
+
+static inline void _put_byte(void **pp, u8 val)
+{
+ u8 *s = *pp;
+ *s++ = val;
+ *pp = s;
+}
+
+static inline void _put_word(void **pp, u32 val)
+{
+ u8 *s = *pp;
+ *s++ = val & 0xff;
+ *s++ = (val >> 8) & 0xff;
+ *s++ = (val >> 16) & 0xff;
+ *s++ = (val >> 24) & 0xff;
+ *pp = s;
+}
+
+static inline void _put_slice(void **pp, unsigned char *dp, unsigned int len)
+{
+ unsigned i = len;
+ _put_word(pp, i);
+ while (i-- > 0)
+ _put_byte(pp, *dp++);
+}
+
+static inline u8 _get_byte(void **pp)
+{
+ u8 *s = *pp;
+ u8 val;
+ val = *s++;
+ *pp = s;
+ return val;
+}
+
+static inline u32 _get_word(void **pp)
+{
+ u8 *s = *pp;
+ u32 val;
+ val = *s++;
+ val |= (*s++ << 8);
+ val |= (*s++ << 16);
+ val |= (*s++ << 24);
+ *pp = s;
+ return val;
+}
+
+static inline u32 _get_slice(void **pp, unsigned char *dp)
+{
+ unsigned int len, i;
+
+ len = i = _get_word(pp);
+ while (i-- > 0) *dp++ = _get_byte(pp);
+ return len;
+}
+
+/* ------------------------------------------------------------- */
+
+void b1dma_reset(avmcard *card)
+{
+ card->csr = 0x0;
+ b1dma_writel(card, card->csr, AMCC_INTCSR);
+ b1dma_writel(card, 0, AMCC_MCSR);
+ b1dma_writel(card, 0, AMCC_RXLEN);
+ b1dma_writel(card, 0, AMCC_TXLEN);
+
+ t1outp(card->port, 0x10, 0x00);
+ t1outp(card->port, 0x07, 0x00);
+
+ b1dma_writel(card, 0, AMCC_MCSR);
+ mdelay(10);
+ b1dma_writel(card, 0x0f000000, AMCC_MCSR); /* reset all */
+ mdelay(10);
+ b1dma_writel(card, 0, AMCC_MCSR);
+ if (card->cardtype == avm_t1pci)
+ mdelay(42);
+ else
+ mdelay(10);
+}
+
+/* ------------------------------------------------------------- */
+
+static int b1dma_detect(avmcard *card)
+{
+ b1dma_writel(card, 0, AMCC_MCSR);
+ mdelay(10);
+ b1dma_writel(card, 0x0f000000, AMCC_MCSR); /* reset all */
+ mdelay(10);
+ b1dma_writel(card, 0, AMCC_MCSR);
+ mdelay(42);
+
+ b1dma_writel(card, 0, AMCC_RXLEN);
+ b1dma_writel(card, 0, AMCC_TXLEN);
+ card->csr = 0x0;
+ b1dma_writel(card, card->csr, AMCC_INTCSR);
+
+ if (b1dma_readl(card, AMCC_MCSR) != 0x000000E6)
+ return 1;
+
+ b1dma_writel(card, 0xffffffff, AMCC_RXPTR);
+ b1dma_writel(card, 0xffffffff, AMCC_TXPTR);
+ if (b1dma_readl(card, AMCC_RXPTR) != 0xfffffffc
+ || b1dma_readl(card, AMCC_TXPTR) != 0xfffffffc)
+ return 2;
+
+ b1dma_writel(card, 0x0, AMCC_RXPTR);
+ b1dma_writel(card, 0x0, AMCC_TXPTR);
+ if (b1dma_readl(card, AMCC_RXPTR) != 0x0
+ || b1dma_readl(card, AMCC_TXPTR) != 0x0)
+ return 3;
+
+ t1outp(card->port, 0x10, 0x00);
+ t1outp(card->port, 0x07, 0x00);
+
+ t1outp(card->port, 0x02, 0x02);
+ t1outp(card->port, 0x03, 0x02);
+
+ if ((t1inp(card->port, 0x02) & 0xFE) != 0x02
+ || t1inp(card->port, 0x3) != 0x03)
+ return 4;
+
+ t1outp(card->port, 0x02, 0x00);
+ t1outp(card->port, 0x03, 0x00);
+
+ if ((t1inp(card->port, 0x02) & 0xFE) != 0x00
+ || t1inp(card->port, 0x3) != 0x01)
+ return 5;
+
+ return 0;
+}
+
+int t1pci_detect(avmcard *card)
+{
+ int ret;
+
+ if ((ret = b1dma_detect(card)) != 0)
+ return ret;
+
+ /* Transputer test */
+
+ if (WriteReg(card, 0x80001000, 0x11) != 0
+ || WriteReg(card, 0x80101000, 0x22) != 0
+ || WriteReg(card, 0x80201000, 0x33) != 0
+ || WriteReg(card, 0x80301000, 0x44) != 0)
+ return 6;
+
+ if (ReadReg(card, 0x80001000) != 0x11
+ || ReadReg(card, 0x80101000) != 0x22
+ || ReadReg(card, 0x80201000) != 0x33
+ || ReadReg(card, 0x80301000) != 0x44)
+ return 7;
+
+ if (WriteReg(card, 0x80001000, 0x55) != 0
+ || WriteReg(card, 0x80101000, 0x66) != 0
+ || WriteReg(card, 0x80201000, 0x77) != 0
+ || WriteReg(card, 0x80301000, 0x88) != 0)
+ return 8;
+
+ if (ReadReg(card, 0x80001000) != 0x55
+ || ReadReg(card, 0x80101000) != 0x66
+ || ReadReg(card, 0x80201000) != 0x77
+ || ReadReg(card, 0x80301000) != 0x88)
+ return 9;
+
+ return 0;
+}
+
+int b1pciv4_detect(avmcard *card)
+{
+ int ret, i;
+
+ if ((ret = b1dma_detect(card)) != 0)
+ return ret;
+
+ for (i = 0; i < 5; i++) {
+ if (WriteReg(card, 0x80A00000, 0x21) != 0)
+ return 6;
+ if ((ReadReg(card, 0x80A00000) & 0x01) != 0x01)
+ return 7;
+ }
+ for (i = 0; i < 5; i++) {
+ if (WriteReg(card, 0x80A00000, 0x20) != 0)
+ return 8;
+ if ((ReadReg(card, 0x80A00000) & 0x01) != 0x00)
+ return 9;
+ }
+
+ return 0;
+}
+
+static void b1dma_queue_tx(avmcard *card, struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ skb_queue_tail(&card->dma->send_queue, skb);
+
+ if (!(card->csr & EN_TX_TC_INT)) {
+ b1dma_dispatch_tx(card);
+ b1dma_writel(card, card->csr, AMCC_INTCSR);
+ }
+
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
+/* ------------------------------------------------------------- */
+
+static void b1dma_dispatch_tx(avmcard *card)
+{
+ avmcard_dmainfo *dma = card->dma;
+ struct sk_buff *skb;
+ u8 cmd, subcmd;
+ u16 len;
+ u32 txlen;
+ void *p;
+
+ skb = skb_dequeue(&dma->send_queue);
+
+ len = CAPIMSG_LEN(skb->data);
+
+ if (len) {
+ cmd = CAPIMSG_COMMAND(skb->data);
+ subcmd = CAPIMSG_SUBCOMMAND(skb->data);
+
+ p = dma->sendbuf.dmabuf;
+
+ if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
+ u16 dlen = CAPIMSG_DATALEN(skb->data);
+ _put_byte(&p, SEND_DATA_B3_REQ);
+ _put_slice(&p, skb->data, len);
+ _put_slice(&p, skb->data + len, dlen);
+ } else {
+ _put_byte(&p, SEND_MESSAGE);
+ _put_slice(&p, skb->data, len);
+ }
+ txlen = (u8 *)p - (u8 *)dma->sendbuf.dmabuf;
+#ifdef AVM_B1DMA_DEBUG
+ printk(KERN_DEBUG "tx: put msg len=%d\n", txlen);
+#endif
+ } else {
+ txlen = skb->len - 2;
+#ifdef AVM_B1DMA_POLLDEBUG
+ if (skb->data[2] == SEND_POLLACK)
+ printk(KERN_INFO "%s: send ack\n", card->name);
+#endif
+#ifdef AVM_B1DMA_DEBUG
+ printk(KERN_DEBUG "tx: put 0x%x len=%d\n",
+ skb->data[2], txlen);
+#endif
+ skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
+ skb->len - 2);
+ }
+ txlen = (txlen + 3) & ~3;
+
+ b1dma_writel(card, dma->sendbuf.dmaaddr, AMCC_TXPTR);
+ b1dma_writel(card, txlen, AMCC_TXLEN);
+
+ card->csr |= EN_TX_TC_INT;
+
+ dev_kfree_skb_any(skb);
+}
+
+/* ------------------------------------------------------------- */
+
+static void queue_pollack(avmcard *card)
+{
+ struct sk_buff *skb;
+ void *p;
+
+ skb = alloc_skb(3, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_CRIT "%s: no memory, lost poll ack\n",
+ card->name);
+ return;
+ }
+ p = skb->data;
+ _put_byte(&p, 0);
+ _put_byte(&p, 0);
+ _put_byte(&p, SEND_POLLACK);
+ skb_put(skb, (u8 *)p - (u8 *)skb->data);
+
+ b1dma_queue_tx(card, skb);
+}
+
+/* ------------------------------------------------------------- */
+
+static void b1dma_handle_rx(avmcard *card)
+{
+ avmctrl_info *cinfo = &card->ctrlinfo[0];
+ avmcard_dmainfo *dma = card->dma;
+ struct capi_ctr *ctrl = &cinfo->capi_ctrl;
+ struct sk_buff *skb;
+ void *p = dma->recvbuf.dmabuf + 4;
+ u32 ApplId, MsgLen, DataB3Len, NCCI, WindowSize;
+ u8 b1cmd = _get_byte(&p);
+
+#ifdef AVM_B1DMA_DEBUG
+ printk(KERN_DEBUG "rx: 0x%x %lu\n", b1cmd, (unsigned long)dma->recvlen);
+#endif
+
+ switch (b1cmd) {
+ case RECEIVE_DATA_B3_IND:
+
+ ApplId = (unsigned) _get_word(&p);
+ MsgLen = _get_slice(&p, card->msgbuf);
+ DataB3Len = _get_slice(&p, card->databuf);
+
+ if (MsgLen < 30) { /* not CAPI 64Bit */
+ memset(card->msgbuf + MsgLen, 0, 30 - MsgLen);
+ MsgLen = 30;
+ CAPIMSG_SETLEN(card->msgbuf, 30);
+ }
+ if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
+ printk(KERN_ERR "%s: incoming packet dropped\n",
+ card->name);
+ } else {
+ skb_put_data(skb, card->msgbuf, MsgLen);
+ skb_put_data(skb, card->databuf, DataB3Len);
+ capi_ctr_handle_message(ctrl, ApplId, skb);
+ }
+ break;
+
+ case RECEIVE_MESSAGE:
+
+ ApplId = (unsigned) _get_word(&p);
+ MsgLen = _get_slice(&p, card->msgbuf);
+ if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
+ printk(KERN_ERR "%s: incoming packet dropped\n",
+ card->name);
+ } else {
+ skb_put_data(skb, card->msgbuf, MsgLen);
+ if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF) {
+ spin_lock(&card->lock);
+ capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
+ CAPIMSG_NCCI(skb->data),
+ CAPIMSG_MSGID(skb->data));
+ spin_unlock(&card->lock);
+ }
+ capi_ctr_handle_message(ctrl, ApplId, skb);
+ }
+ break;
+
+ case RECEIVE_NEW_NCCI:
+
+ ApplId = _get_word(&p);
+ NCCI = _get_word(&p);
+ WindowSize = _get_word(&p);
+ spin_lock(&card->lock);
+ capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize);
+ spin_unlock(&card->lock);
+ break;
+
+ case RECEIVE_FREE_NCCI:
+
+ ApplId = _get_word(&p);
+ NCCI = _get_word(&p);
+
+ if (NCCI != 0xffffffff) {
+ spin_lock(&card->lock);
+ capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI);
+ spin_unlock(&card->lock);
+ }
+ break;
+
+ case RECEIVE_START:
+#ifdef AVM_B1DMA_POLLDEBUG
+ printk(KERN_INFO "%s: receive poll\n", card->name);
+#endif
+ if (!suppress_pollack)
+ queue_pollack(card);
+ capi_ctr_resume_output(ctrl);
+ break;
+
+ case RECEIVE_STOP:
+ capi_ctr_suspend_output(ctrl);
+ break;
+
+ case RECEIVE_INIT:
+
+ cinfo->versionlen = _get_slice(&p, cinfo->versionbuf);
+ b1_parse_version(cinfo);
+ printk(KERN_INFO "%s: %s-card (%s) now active\n",
+ card->name,
+ cinfo->version[VER_CARDTYPE],
+ cinfo->version[VER_DRIVER]);
+ capi_ctr_ready(ctrl);
+ break;
+
+ case RECEIVE_TASK_READY:
+ ApplId = (unsigned) _get_word(&p);
+ MsgLen = _get_slice(&p, card->msgbuf);
+ card->msgbuf[MsgLen] = 0;
+ while (MsgLen > 0
+ && (card->msgbuf[MsgLen - 1] == '\n'
+ || card->msgbuf[MsgLen - 1] == '\r')) {
+ card->msgbuf[MsgLen - 1] = 0;
+ MsgLen--;
+ }
+ printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
+ card->name, ApplId, card->msgbuf);
+ break;
+
+ case RECEIVE_DEBUGMSG:
+ MsgLen = _get_slice(&p, card->msgbuf);
+ card->msgbuf[MsgLen] = 0;
+ while (MsgLen > 0
+ && (card->msgbuf[MsgLen - 1] == '\n'
+ || card->msgbuf[MsgLen - 1] == '\r')) {
+ card->msgbuf[MsgLen - 1] = 0;
+ MsgLen--;
+ }
+ printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
+ break;
+
+ default:
+ printk(KERN_ERR "%s: b1dma_interrupt: 0x%x ???\n",
+ card->name, b1cmd);
+ return;
+ }
+}
+
+/* ------------------------------------------------------------- */
+
+static void b1dma_handle_interrupt(avmcard *card)
+{
+ u32 status;
+ u32 newcsr;
+
+ spin_lock(&card->lock);
+
+ status = b1dma_readl(card, AMCC_INTCSR);
+ if ((status & ANY_S5933_INT) == 0) {
+ spin_unlock(&card->lock);
+ return;
+ }
+
+ newcsr = card->csr | (status & ALL_INT);
+ if (status & TX_TC_INT) newcsr &= ~EN_TX_TC_INT;
+ if (status & RX_TC_INT) newcsr &= ~EN_RX_TC_INT;
+ b1dma_writel(card, newcsr, AMCC_INTCSR);
+
+ if ((status & RX_TC_INT) != 0) {
+ struct avmcard_dmainfo *dma = card->dma;
+ u32 rxlen;
+ if (card->dma->recvlen == 0) {
+ rxlen = b1dma_readl(card, AMCC_RXLEN);
+ if (rxlen == 0) {
+ dma->recvlen = *((u32 *)dma->recvbuf.dmabuf);
+ rxlen = (dma->recvlen + 3) & ~3;
+ b1dma_writel(card, dma->recvbuf.dmaaddr + 4, AMCC_RXPTR);
+ b1dma_writel(card, rxlen, AMCC_RXLEN);
+#ifdef AVM_B1DMA_DEBUG
+ } else {
+ printk(KERN_ERR "%s: rx not complete (%d).\n",
+ card->name, rxlen);
+#endif
+ }
+ } else {
+ spin_unlock(&card->lock);
+ b1dma_handle_rx(card);
+ dma->recvlen = 0;
+ spin_lock(&card->lock);
+ b1dma_writel(card, dma->recvbuf.dmaaddr, AMCC_RXPTR);
+ b1dma_writel(card, 4, AMCC_RXLEN);
+ }
+ }
+
+ if ((status & TX_TC_INT) != 0) {
+ if (skb_queue_empty(&card->dma->send_queue))
+ card->csr &= ~EN_TX_TC_INT;
+ else
+ b1dma_dispatch_tx(card);
+ }
+ b1dma_writel(card, card->csr, AMCC_INTCSR);
+
+ spin_unlock(&card->lock);
+}
+
+irqreturn_t b1dma_interrupt(int interrupt, void *devptr)
+{
+ avmcard *card = devptr;
+
+ b1dma_handle_interrupt(card);
+ return IRQ_HANDLED;
+}
+
+/* ------------------------------------------------------------- */
+
+static int b1dma_loaded(avmcard *card)
+{
+ unsigned long stop;
+ unsigned char ans;
+ unsigned long tout = 2;
+ unsigned int base = card->port;
+
+ for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
+ if (b1_tx_empty(base))
+ break;
+ }
+ if (!b1_tx_empty(base)) {
+ printk(KERN_ERR "%s: b1dma_loaded: tx err, corrupted t4 file ?\n",
+ card->name);
+ return 0;
+ }
+ b1_put_byte(base, SEND_POLLACK);
+ for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
+ if (b1_rx_full(base)) {
+ if ((ans = b1_get_byte(base)) == RECEIVE_POLLDWORD) {
+ return 1;
+ }
+ printk(KERN_ERR "%s: b1dma_loaded: got 0x%x, firmware not running in dword mode\n", card->name, ans);
+ return 0;
+ }
+ }
+ printk(KERN_ERR "%s: b1dma_loaded: firmware not running\n", card->name);
+ return 0;
+}
+
+/* ------------------------------------------------------------- */
+
+static void b1dma_send_init(avmcard *card)
+{
+ struct sk_buff *skb;
+ void *p;
+
+ skb = alloc_skb(15, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_CRIT "%s: no memory, lost register appl.\n",
+ card->name);
+ return;
+ }
+ p = skb->data;
+ _put_byte(&p, 0);
+ _put_byte(&p, 0);
+ _put_byte(&p, SEND_INIT);
+ _put_word(&p, CAPI_MAXAPPL);
+ _put_word(&p, AVM_NCCI_PER_CHANNEL * 30);
+ _put_word(&p, card->cardnr - 1);
+ skb_put(skb, (u8 *)p - (u8 *)skb->data);
+
+ b1dma_queue_tx(card, skb);
+}
+
+int b1dma_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ int retval;
+
+ b1dma_reset(card);
+
+ if ((retval = b1_load_t4file(card, &data->firmware))) {
+ b1dma_reset(card);
+ printk(KERN_ERR "%s: failed to load t4file!!\n",
+ card->name);
+ return retval;
+ }
+
+ if (data->configuration.len > 0 && data->configuration.data) {
+ if ((retval = b1_load_config(card, &data->configuration))) {
+ b1dma_reset(card);
+ printk(KERN_ERR "%s: failed to load config!!\n",
+ card->name);
+ return retval;
+ }
+ }
+
+ if (!b1dma_loaded(card)) {
+ b1dma_reset(card);
+ printk(KERN_ERR "%s: failed to load t4file.\n", card->name);
+ return -EIO;
+ }
+
+ card->csr = AVM_FLAG;
+ b1dma_writel(card, card->csr, AMCC_INTCSR);
+ b1dma_writel(card, EN_A2P_TRANSFERS | EN_P2A_TRANSFERS | A2P_HI_PRIORITY |
+ P2A_HI_PRIORITY | RESET_A2P_FLAGS | RESET_P2A_FLAGS,
+ AMCC_MCSR);
+ t1outp(card->port, 0x07, 0x30);
+ t1outp(card->port, 0x10, 0xF0);
+
+ card->dma->recvlen = 0;
+ b1dma_writel(card, card->dma->recvbuf.dmaaddr, AMCC_RXPTR);
+ b1dma_writel(card, 4, AMCC_RXLEN);
+ card->csr |= EN_RX_TC_INT;
+ b1dma_writel(card, card->csr, AMCC_INTCSR);
+
+ b1dma_send_init(card);
+
+ return 0;
+}
+
+void b1dma_reset_ctr(struct capi_ctr *ctrl)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ b1dma_reset(card);
+
+ memset(cinfo->version, 0, sizeof(cinfo->version));
+ capilib_release(&cinfo->ncci_head);
+ spin_unlock_irqrestore(&card->lock, flags);
+ capi_ctr_down(ctrl);
+}
+
+/* ------------------------------------------------------------- */
+
+void b1dma_register_appl(struct capi_ctr *ctrl,
+ u16 appl,
+ capi_register_params *rp)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ struct sk_buff *skb;
+ int want = rp->level3cnt;
+ int nconn;
+ void *p;
+
+ if (want > 0) nconn = want;
+ else nconn = ctrl->profile.nbchannel * -want;
+ if (nconn == 0) nconn = ctrl->profile.nbchannel;
+
+ skb = alloc_skb(23, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_CRIT "%s: no memory, lost register appl.\n",
+ card->name);
+ return;
+ }
+ p = skb->data;
+ _put_byte(&p, 0);
+ _put_byte(&p, 0);
+ _put_byte(&p, SEND_REGISTER);
+ _put_word(&p, appl);
+ _put_word(&p, 1024 * (nconn + 1));
+ _put_word(&p, nconn);
+ _put_word(&p, rp->datablkcnt);
+ _put_word(&p, rp->datablklen);
+ skb_put(skb, (u8 *)p - (u8 *)skb->data);
+
+ b1dma_queue_tx(card, skb);
+}
+
+/* ------------------------------------------------------------- */
+
+void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ struct sk_buff *skb;
+ void *p;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ capilib_release_appl(&cinfo->ncci_head, appl);
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ skb = alloc_skb(7, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_CRIT "%s: no memory, lost release appl.\n",
+ card->name);
+ return;
+ }
+ p = skb->data;
+ _put_byte(&p, 0);
+ _put_byte(&p, 0);
+ _put_byte(&p, SEND_RELEASE);
+ _put_word(&p, appl);
+
+ skb_put(skb, (u8 *)p - (u8 *)skb->data);
+
+ b1dma_queue_tx(card, skb);
+}
+
+/* ------------------------------------------------------------- */
+
+u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ u16 retval = CAPI_NOERROR;
+
+ if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
+ unsigned long flags;
+ spin_lock_irqsave(&card->lock, flags);
+ retval = capilib_data_b3_req(&cinfo->ncci_head,
+ CAPIMSG_APPID(skb->data),
+ CAPIMSG_NCCI(skb->data),
+ CAPIMSG_MSGID(skb->data));
+ spin_unlock_irqrestore(&card->lock, flags);
+ }
+ if (retval == CAPI_NOERROR)
+ b1dma_queue_tx(card, skb);
+
+ return retval;
+}
+
+/* ------------------------------------------------------------- */
+
+int b1dma_proc_show(struct seq_file *m, void *v)
+{
+ struct capi_ctr *ctrl = m->private;
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ u8 flag;
+ char *s;
+ u32 txoff, txlen, rxoff, rxlen, csr;
+ unsigned long flags;
+
+ seq_printf(m, "%-16s %s\n", "name", card->name);
+ seq_printf(m, "%-16s 0x%x\n", "io", card->port);
+ seq_printf(m, "%-16s %d\n", "irq", card->irq);
+ seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
+ switch (card->cardtype) {
+ case avm_b1isa: s = "B1 ISA"; break;
+ case avm_b1pci: s = "B1 PCI"; break;
+ case avm_b1pcmcia: s = "B1 PCMCIA"; break;
+ case avm_m1: s = "M1"; break;
+ case avm_m2: s = "M2"; break;
+ case avm_t1isa: s = "T1 ISA (HEMA)"; break;
+ case avm_t1pci: s = "T1 PCI"; break;
+ case avm_c4: s = "C4"; break;
+ case avm_c2: s = "C2"; break;
+ default: s = "???"; break;
+ }
+ seq_printf(m, "%-16s %s\n", "type", s);
+ if ((s = cinfo->version[VER_DRIVER]) != NULL)
+ seq_printf(m, "%-16s %s\n", "ver_driver", s);
+ if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
+ seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
+ if ((s = cinfo->version[VER_SERIAL]) != NULL)
+ seq_printf(m, "%-16s %s\n", "ver_serial", s);
+
+ if (card->cardtype != avm_m1) {
+ flag = ((u8 *)(ctrl->profile.manu))[3];
+ if (flag)
+ seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
+ "protocol",
+ (flag & 0x01) ? " DSS1" : "",
+ (flag & 0x02) ? " CT1" : "",
+ (flag & 0x04) ? " VN3" : "",
+ (flag & 0x08) ? " NI1" : "",
+ (flag & 0x10) ? " AUSTEL" : "",
+ (flag & 0x20) ? " ESS" : "",
+ (flag & 0x40) ? " 1TR6" : ""
+ );
+ }
+ if (card->cardtype != avm_m1) {
+ flag = ((u8 *)(ctrl->profile.manu))[5];
+ if (flag)
+ seq_printf(m, "%-16s%s%s%s%s\n",
+ "linetype",
+ (flag & 0x01) ? " point to point" : "",
+ (flag & 0x02) ? " point to multipoint" : "",
+ (flag & 0x08) ? " leased line without D-channel" : "",
+ (flag & 0x04) ? " leased line with D-channel" : ""
+ );
+ }
+ seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
+
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ txoff = (dma_addr_t)b1dma_readl(card, AMCC_TXPTR)-card->dma->sendbuf.dmaaddr;
+ txlen = b1dma_readl(card, AMCC_TXLEN);
+
+ rxoff = (dma_addr_t)b1dma_readl(card, AMCC_RXPTR)-card->dma->recvbuf.dmaaddr;
+ rxlen = b1dma_readl(card, AMCC_RXLEN);
+
+ csr = b1dma_readl(card, AMCC_INTCSR);
+
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ seq_printf(m, "%-16s 0x%lx\n", "csr (cached)", (unsigned long)card->csr);
+ seq_printf(m, "%-16s 0x%lx\n", "csr", (unsigned long)csr);
+ seq_printf(m, "%-16s %lu\n", "txoff", (unsigned long)txoff);
+ seq_printf(m, "%-16s %lu\n", "txlen", (unsigned long)txlen);
+ seq_printf(m, "%-16s %lu\n", "rxoff", (unsigned long)rxoff);
+ seq_printf(m, "%-16s %lu\n", "rxlen", (unsigned long)rxlen);
+
+ return 0;
+}
+EXPORT_SYMBOL(b1dma_proc_show);
+
+/* ------------------------------------------------------------- */
+
+EXPORT_SYMBOL(b1dma_reset);
+EXPORT_SYMBOL(t1pci_detect);
+EXPORT_SYMBOL(b1pciv4_detect);
+EXPORT_SYMBOL(b1dma_interrupt);
+
+EXPORT_SYMBOL(b1dma_load_firmware);
+EXPORT_SYMBOL(b1dma_reset_ctr);
+EXPORT_SYMBOL(b1dma_register_appl);
+EXPORT_SYMBOL(b1dma_release_appl);
+EXPORT_SYMBOL(b1dma_send_message);
+
+static int __init b1dma_init(void)
+{
+ char *p;
+ char rev[32];
+
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
+ strlcpy(rev, p + 2, sizeof(rev));
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
+ *(p - 1) = 0;
+ } else
+ strcpy(rev, "1.0");
+
+ printk(KERN_INFO "b1dma: revision %s\n", rev);
+
+ return 0;
+}
+
+static void __exit b1dma_exit(void)
+{
+}
+
+module_init(b1dma_init);
+module_exit(b1dma_exit);
diff --git a/drivers/staging/isdn/avm/b1isa.c b/drivers/staging/isdn/avm/b1isa.c
new file mode 100644
index 000000000000..cdfea72e0ef6
--- /dev/null
+++ b/drivers/staging/isdn/avm/b1isa.c
@@ -0,0 +1,243 @@
+/* $Id: b1isa.c,v 1.1.2.3 2004/02/10 01:07:12 keil Exp $
+ *
+ * Module for AVM B1 ISA-card.
+ *
+ * Copyright 1999 by Carsten Paeth <calle@calle.de>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/capi.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <linux/isdn/capicmd.h>
+#include <linux/isdn/capiutil.h>
+#include <linux/isdn/capilli.h>
+#include "avmcard.h"
+
+/* ------------------------------------------------------------- */
+
+static char *revision = "$Revision: 1.1.2.3 $";
+
+/* ------------------------------------------------------------- */
+
+MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM B1 ISA card");
+MODULE_AUTHOR("Carsten Paeth");
+MODULE_LICENSE("GPL");
+
+/* ------------------------------------------------------------- */
+
+static void b1isa_remove(struct pci_dev *pdev)
+{
+ avmctrl_info *cinfo = pci_get_drvdata(pdev);
+ avmcard *card;
+
+ if (!cinfo)
+ return;
+
+ card = cinfo->card;
+
+ b1_reset(card->port);
+ b1_reset(card->port);
+
+ detach_capi_ctr(&cinfo->capi_ctrl);
+ free_irq(card->irq, card);
+ release_region(card->port, AVMB1_PORTLEN);
+ b1_free_card(card);
+}
+
+/* ------------------------------------------------------------- */
+
+static char *b1isa_procinfo(struct capi_ctr *ctrl);
+
+static int b1isa_probe(struct pci_dev *pdev)
+{
+ avmctrl_info *cinfo;
+ avmcard *card;
+ int retval;
+
+ card = b1_alloc_card(1);
+ if (!card) {
+ printk(KERN_WARNING "b1isa: no memory.\n");
+ retval = -ENOMEM;
+ goto err;
+ }
+
+ cinfo = card->ctrlinfo;
+
+ card->port = pci_resource_start(pdev, 0);
+ card->irq = pdev->irq;
+ card->cardtype = avm_b1isa;
+ sprintf(card->name, "b1isa-%x", card->port);
+
+ if (card->port != 0x150 && card->port != 0x250
+ && card->port != 0x300 && card->port != 0x340) {
+ printk(KERN_WARNING "b1isa: invalid port 0x%x.\n", card->port);
+ retval = -EINVAL;
+ goto err_free;
+ }
+ if (b1_irq_table[card->irq & 0xf] == 0) {
+ printk(KERN_WARNING "b1isa: irq %d not valid.\n", card->irq);
+ retval = -EINVAL;
+ goto err_free;
+ }
+ if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
+ printk(KERN_WARNING "b1isa: ports 0x%03x-0x%03x in use.\n",
+ card->port, card->port + AVMB1_PORTLEN);
+ retval = -EBUSY;
+ goto err_free;
+ }
+ retval = request_irq(card->irq, b1_interrupt, 0, card->name, card);
+ if (retval) {
+ printk(KERN_ERR "b1isa: unable to get IRQ %d.\n", card->irq);
+ goto err_release_region;
+ }
+ b1_reset(card->port);
+ if ((retval = b1_detect(card->port, card->cardtype)) != 0) {
+ printk(KERN_NOTICE "b1isa: NO card at 0x%x (%d)\n",
+ card->port, retval);
+ retval = -ENODEV;
+ goto err_free_irq;
+ }
+ b1_reset(card->port);
+ b1_getrevision(card);
+
+ cinfo->capi_ctrl.owner = THIS_MODULE;
+ cinfo->capi_ctrl.driver_name = "b1isa";
+ cinfo->capi_ctrl.driverdata = cinfo;
+ cinfo->capi_ctrl.register_appl = b1_register_appl;
+ cinfo->capi_ctrl.release_appl = b1_release_appl;
+ cinfo->capi_ctrl.send_message = b1_send_message;
+ cinfo->capi_ctrl.load_firmware = b1_load_firmware;
+ cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
+ cinfo->capi_ctrl.procinfo = b1isa_procinfo;
+ cinfo->capi_ctrl.proc_show = b1_proc_show;
+ strcpy(cinfo->capi_ctrl.name, card->name);
+
+ retval = attach_capi_ctr(&cinfo->capi_ctrl);
+ if (retval) {
+ printk(KERN_ERR "b1isa: attach controller failed.\n");
+ goto err_free_irq;
+ }
+
+ printk(KERN_INFO "b1isa: AVM B1 ISA at i/o %#x, irq %d, revision %d\n",
+ card->port, card->irq, card->revision);
+
+ pci_set_drvdata(pdev, cinfo);
+ return 0;
+
+err_free_irq:
+ free_irq(card->irq, card);
+err_release_region:
+ release_region(card->port, AVMB1_PORTLEN);
+err_free:
+ b1_free_card(card);
+err:
+ return retval;
+}
+
+static char *b1isa_procinfo(struct capi_ctr *ctrl)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+
+ if (!cinfo)
+ return "";
+ sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d",
+ cinfo->cardname[0] ? cinfo->cardname : "-",
+ cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
+ cinfo->card ? cinfo->card->port : 0x0,
+ cinfo->card ? cinfo->card->irq : 0,
+ cinfo->card ? cinfo->card->revision : 0
+ );
+ return cinfo->infobuf;
+}
+
+/* ------------------------------------------------------------- */
+
+#define MAX_CARDS 4
+static struct pci_dev isa_dev[MAX_CARDS];
+static int io[MAX_CARDS];
+static int irq[MAX_CARDS];
+
+module_param_hw_array(io, int, ioport, NULL, 0);
+module_param_hw_array(irq, int, irq, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
+
+static int b1isa_add_card(struct capi_driver *driver, capicardparams *data)
+{
+ int i;
+
+ for (i = 0; i < MAX_CARDS; i++) {
+ if (isa_dev[i].resource[0].start)
+ continue;
+
+ isa_dev[i].resource[0].start = data->port;
+ isa_dev[i].irq = data->irq;
+
+ if (b1isa_probe(&isa_dev[i]) == 0)
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static struct capi_driver capi_driver_b1isa = {
+ .name = "b1isa",
+ .revision = "1.0",
+ .add_card = b1isa_add_card,
+};
+
+static int __init b1isa_init(void)
+{
+ char *p;
+ char rev[32];
+ int i;
+
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
+ strlcpy(rev, p + 2, 32);
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
+ *(p - 1) = 0;
+ } else
+ strcpy(rev, "1.0");
+
+ for (i = 0; i < MAX_CARDS; i++) {
+ if (!io[i])
+ break;
+
+ isa_dev[i].resource[0].start = io[i];
+ isa_dev[i].irq = irq[i];
+
+ if (b1isa_probe(&isa_dev[i]) != 0)
+ return -ENODEV;
+ }
+
+ strlcpy(capi_driver_b1isa.revision, rev, 32);
+ register_capi_driver(&capi_driver_b1isa);
+ printk(KERN_INFO "b1isa: revision %s\n", rev);
+
+ return 0;
+}
+
+static void __exit b1isa_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_CARDS; i++) {
+ if (isa_dev[i].resource[0].start)
+ b1isa_remove(&isa_dev[i]);
+ }
+ unregister_capi_driver(&capi_driver_b1isa);
+}
+
+module_init(b1isa_init);
+module_exit(b1isa_exit);
diff --git a/drivers/staging/isdn/avm/b1pci.c b/drivers/staging/isdn/avm/b1pci.c
new file mode 100644
index 000000000000..b76b57a82c02
--- /dev/null
+++ b/drivers/staging/isdn/avm/b1pci.c
@@ -0,0 +1,416 @@
+/* $Id: b1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
+ *
+ * Module for AVM B1 PCI-card.
+ *
+ * Copyright 1999 by Carsten Paeth <calle@calle.de>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/capi.h>
+#include <asm/io.h>
+#include <linux/init.h>
+#include <linux/isdn/capicmd.h>
+#include <linux/isdn/capiutil.h>
+#include <linux/isdn/capilli.h>
+#include "avmcard.h"
+
+/* ------------------------------------------------------------- */
+
+static char *revision = "$Revision: 1.1.2.2 $";
+
+/* ------------------------------------------------------------- */
+
+static struct pci_device_id b1pci_pci_tbl[] = {
+ { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_B1, PCI_ANY_ID, PCI_ANY_ID },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(pci, b1pci_pci_tbl);
+MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM B1 PCI card");
+MODULE_AUTHOR("Carsten Paeth");
+MODULE_LICENSE("GPL");
+
+/* ------------------------------------------------------------- */
+
+static char *b1pci_procinfo(struct capi_ctr *ctrl)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+
+ if (!cinfo)
+ return "";
+ sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d",
+ cinfo->cardname[0] ? cinfo->cardname : "-",
+ cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
+ cinfo->card ? cinfo->card->port : 0x0,
+ cinfo->card ? cinfo->card->irq : 0,
+ cinfo->card ? cinfo->card->revision : 0
+ );
+ return cinfo->infobuf;
+}
+
+/* ------------------------------------------------------------- */
+
+static int b1pci_probe(struct capicardparams *p, struct pci_dev *pdev)
+{
+ avmcard *card;
+ avmctrl_info *cinfo;
+ int retval;
+
+ card = b1_alloc_card(1);
+ if (!card) {
+ printk(KERN_WARNING "b1pci: no memory.\n");
+ retval = -ENOMEM;
+ goto err;
+ }
+
+ cinfo = card->ctrlinfo;
+ sprintf(card->name, "b1pci-%x", p->port);
+ card->port = p->port;
+ card->irq = p->irq;
+ card->cardtype = avm_b1pci;
+
+ if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
+ printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n",
+ card->port, card->port + AVMB1_PORTLEN);
+ retval = -EBUSY;
+ goto err_free;
+ }
+ b1_reset(card->port);
+ retval = b1_detect(card->port, card->cardtype);
+ if (retval) {
+ printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n",
+ card->port, retval);
+ retval = -ENODEV;
+ goto err_release_region;
+ }
+ b1_reset(card->port);
+ b1_getrevision(card);
+
+ retval = request_irq(card->irq, b1_interrupt, IRQF_SHARED, card->name, card);
+ if (retval) {
+ printk(KERN_ERR "b1pci: unable to get IRQ %d.\n", card->irq);
+ retval = -EBUSY;
+ goto err_release_region;
+ }
+
+ cinfo->capi_ctrl.driver_name = "b1pci";
+ cinfo->capi_ctrl.driverdata = cinfo;
+ cinfo->capi_ctrl.register_appl = b1_register_appl;
+ cinfo->capi_ctrl.release_appl = b1_release_appl;
+ cinfo->capi_ctrl.send_message = b1_send_message;
+ cinfo->capi_ctrl.load_firmware = b1_load_firmware;
+ cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
+ cinfo->capi_ctrl.procinfo = b1pci_procinfo;
+ cinfo->capi_ctrl.proc_show = b1_proc_show;
+ strcpy(cinfo->capi_ctrl.name, card->name);
+ cinfo->capi_ctrl.owner = THIS_MODULE;
+
+ retval = attach_capi_ctr(&cinfo->capi_ctrl);
+ if (retval) {
+ printk(KERN_ERR "b1pci: attach controller failed.\n");
+ goto err_free_irq;
+ }
+
+ if (card->revision >= 4) {
+ printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, revision %d (no dma)\n",
+ card->port, card->irq, card->revision);
+ } else {
+ printk(KERN_INFO "b1pci: AVM B1 PCI at i/o %#x, irq %d, revision %d\n",
+ card->port, card->irq, card->revision);
+ }
+
+ pci_set_drvdata(pdev, card);
+ return 0;
+
+err_free_irq:
+ free_irq(card->irq, card);
+err_release_region:
+ release_region(card->port, AVMB1_PORTLEN);
+err_free:
+ b1_free_card(card);
+err:
+ return retval;
+}
+
+static void b1pci_remove(struct pci_dev *pdev)
+{
+ avmcard *card = pci_get_drvdata(pdev);
+ avmctrl_info *cinfo = card->ctrlinfo;
+ unsigned int port = card->port;
+
+ b1_reset(port);
+ b1_reset(port);
+
+ detach_capi_ctr(&cinfo->capi_ctrl);
+ free_irq(card->irq, card);
+ release_region(card->port, AVMB1_PORTLEN);
+ b1_free_card(card);
+}
+
+#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
+/* ------------------------------------------------------------- */
+
+static char *b1pciv4_procinfo(struct capi_ctr *ctrl)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+
+ if (!cinfo)
+ return "";
+ sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx r%d",
+ cinfo->cardname[0] ? cinfo->cardname : "-",
+ cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
+ cinfo->card ? cinfo->card->port : 0x0,
+ cinfo->card ? cinfo->card->irq : 0,
+ cinfo->card ? cinfo->card->membase : 0,
+ cinfo->card ? cinfo->card->revision : 0
+ );
+ return cinfo->infobuf;
+}
+
+/* ------------------------------------------------------------- */
+
+static int b1pciv4_probe(struct capicardparams *p, struct pci_dev *pdev)
+{
+ avmcard *card;
+ avmctrl_info *cinfo;
+ int retval;
+
+ card = b1_alloc_card(1);
+ if (!card) {
+ printk(KERN_WARNING "b1pci: no memory.\n");
+ retval = -ENOMEM;
+ goto err;
+ }
+
+ card->dma = avmcard_dma_alloc("b1pci", pdev, 2048 + 128, 2048 + 128);
+ if (!card->dma) {
+ printk(KERN_WARNING "b1pci: dma alloc.\n");
+ retval = -ENOMEM;
+ goto err_free;
+ }
+
+ cinfo = card->ctrlinfo;
+ sprintf(card->name, "b1pciv4-%x", p->port);
+ card->port = p->port;
+ card->irq = p->irq;
+ card->membase = p->membase;
+ card->cardtype = avm_b1pci;
+
+ if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
+ printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n",
+ card->port, card->port + AVMB1_PORTLEN);
+ retval = -EBUSY;
+ goto err_free_dma;
+ }
+
+ card->mbase = ioremap(card->membase, 64);
+ if (!card->mbase) {
+ printk(KERN_NOTICE "b1pci: can't remap memory at 0x%lx\n",
+ card->membase);
+ retval = -ENOMEM;
+ goto err_release_region;
+ }
+
+ b1dma_reset(card);
+
+ retval = b1pciv4_detect(card);
+ if (retval) {
+ printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n",
+ card->port, retval);
+ retval = -ENODEV;
+ goto err_unmap;
+ }
+ b1dma_reset(card);
+ b1_getrevision(card);
+
+ retval = request_irq(card->irq, b1dma_interrupt, IRQF_SHARED, card->name, card);
+ if (retval) {
+ printk(KERN_ERR "b1pci: unable to get IRQ %d.\n",
+ card->irq);
+ retval = -EBUSY;
+ goto err_unmap;
+ }
+
+ cinfo->capi_ctrl.owner = THIS_MODULE;
+ cinfo->capi_ctrl.driver_name = "b1pciv4";
+ cinfo->capi_ctrl.driverdata = cinfo;
+ cinfo->capi_ctrl.register_appl = b1dma_register_appl;
+ cinfo->capi_ctrl.release_appl = b1dma_release_appl;
+ cinfo->capi_ctrl.send_message = b1dma_send_message;
+ cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
+ cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
+ cinfo->capi_ctrl.procinfo = b1pciv4_procinfo;
+ cinfo->capi_ctrl.proc_show = b1dma_proc_show;
+ strcpy(cinfo->capi_ctrl.name, card->name);
+
+ retval = attach_capi_ctr(&cinfo->capi_ctrl);
+ if (retval) {
+ printk(KERN_ERR "b1pci: attach controller failed.\n");
+ goto err_free_irq;
+ }
+ card->cardnr = cinfo->capi_ctrl.cnr;
+
+ printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, mem %#lx, revision %d (dma)\n",
+ card->port, card->irq, card->membase, card->revision);
+
+ pci_set_drvdata(pdev, card);
+ return 0;
+
+err_free_irq:
+ free_irq(card->irq, card);
+err_unmap:
+ iounmap(card->mbase);
+err_release_region:
+ release_region(card->port, AVMB1_PORTLEN);
+err_free_dma:
+ avmcard_dma_free(card->dma);
+err_free:
+ b1_free_card(card);
+err:
+ return retval;
+
+}
+
+static void b1pciv4_remove(struct pci_dev *pdev)
+{
+ avmcard *card = pci_get_drvdata(pdev);
+ avmctrl_info *cinfo = card->ctrlinfo;
+
+ b1dma_reset(card);
+
+ detach_capi_ctr(&cinfo->capi_ctrl);
+ free_irq(card->irq, card);
+ iounmap(card->mbase);
+ release_region(card->port, AVMB1_PORTLEN);
+ avmcard_dma_free(card->dma);
+ b1_free_card(card);
+}
+
+#endif /* CONFIG_ISDN_DRV_AVMB1_B1PCIV4 */
+
+static int b1pci_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct capicardparams param;
+ int retval;
+
+ if (pci_enable_device(pdev) < 0) {
+ printk(KERN_ERR "b1pci: failed to enable AVM-B1\n");
+ return -ENODEV;
+ }
+ param.irq = pdev->irq;
+
+ if (pci_resource_start(pdev, 2)) { /* B1 PCI V4 */
+#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
+ pci_set_master(pdev);
+#endif
+ param.membase = pci_resource_start(pdev, 0);
+ param.port = pci_resource_start(pdev, 2);
+
+ printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 V4 at i/o %#x, irq %d, mem %#x\n",
+ param.port, param.irq, param.membase);
+#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
+ retval = b1pciv4_probe(&param, pdev);
+#else
+ retval = b1pci_probe(&param, pdev);
+#endif
+ if (retval != 0) {
+ printk(KERN_ERR "b1pci: no AVM-B1 V4 at i/o %#x, irq %d, mem %#x detected\n",
+ param.port, param.irq, param.membase);
+ }
+ } else {
+ param.membase = 0;
+ param.port = pci_resource_start(pdev, 1);
+
+ printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 at i/o %#x, irq %d\n",
+ param.port, param.irq);
+ retval = b1pci_probe(&param, pdev);
+ if (retval != 0) {
+ printk(KERN_ERR "b1pci: no AVM-B1 at i/o %#x, irq %d detected\n",
+ param.port, param.irq);
+ }
+ }
+ return retval;
+}
+
+static void b1pci_pci_remove(struct pci_dev *pdev)
+{
+#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
+ avmcard *card = pci_get_drvdata(pdev);
+
+ if (card->dma)
+ b1pciv4_remove(pdev);
+ else
+ b1pci_remove(pdev);
+#else
+ b1pci_remove(pdev);
+#endif
+}
+
+static struct pci_driver b1pci_pci_driver = {
+ .name = "b1pci",
+ .id_table = b1pci_pci_tbl,
+ .probe = b1pci_pci_probe,
+ .remove = b1pci_pci_remove,
+};
+
+static struct capi_driver capi_driver_b1pci = {
+ .name = "b1pci",
+ .revision = "1.0",
+};
+#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
+static struct capi_driver capi_driver_b1pciv4 = {
+ .name = "b1pciv4",
+ .revision = "1.0",
+};
+#endif
+
+static int __init b1pci_init(void)
+{
+ char *p;
+ char rev[32];
+ int err;
+
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
+ strlcpy(rev, p + 2, 32);
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
+ *(p - 1) = 0;
+ } else
+ strcpy(rev, "1.0");
+
+
+ err = pci_register_driver(&b1pci_pci_driver);
+ if (!err) {
+ strlcpy(capi_driver_b1pci.revision, rev, 32);
+ register_capi_driver(&capi_driver_b1pci);
+#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
+ strlcpy(capi_driver_b1pciv4.revision, rev, 32);
+ register_capi_driver(&capi_driver_b1pciv4);
+#endif
+ printk(KERN_INFO "b1pci: revision %s\n", rev);
+ }
+ return err;
+}
+
+static void __exit b1pci_exit(void)
+{
+ unregister_capi_driver(&capi_driver_b1pci);
+#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
+ unregister_capi_driver(&capi_driver_b1pciv4);
+#endif
+ pci_unregister_driver(&b1pci_pci_driver);
+}
+
+module_init(b1pci_init);
+module_exit(b1pci_exit);
diff --git a/drivers/staging/isdn/avm/b1pcmcia.c b/drivers/staging/isdn/avm/b1pcmcia.c
new file mode 100644
index 000000000000..3aca16e62902
--- /dev/null
+++ b/drivers/staging/isdn/avm/b1pcmcia.c
@@ -0,0 +1,224 @@
+/* $Id: b1pcmcia.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
+ *
+ * Module for AVM B1/M1/M2 PCMCIA-card.
+ *
+ * Copyright 1999 by Carsten Paeth <calle@calle.de>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/capi.h>
+#include <linux/b1pcmcia.h>
+#include <linux/isdn/capicmd.h>
+#include <linux/isdn/capiutil.h>
+#include <linux/isdn/capilli.h>
+#include "avmcard.h"
+
+/* ------------------------------------------------------------- */
+
+static char *revision = "$Revision: 1.1.2.2 $";
+
+/* ------------------------------------------------------------- */
+
+MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM PCMCIA cards");
+MODULE_AUTHOR("Carsten Paeth");
+MODULE_LICENSE("GPL");
+
+/* ------------------------------------------------------------- */
+
+static void b1pcmcia_remove_ctr(struct capi_ctr *ctrl)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ unsigned int port = card->port;
+
+ b1_reset(port);
+ b1_reset(port);
+
+ detach_capi_ctr(ctrl);
+ free_irq(card->irq, card);
+ b1_free_card(card);
+}
+
+/* ------------------------------------------------------------- */
+
+static LIST_HEAD(cards);
+
+static char *b1pcmcia_procinfo(struct capi_ctr *ctrl);
+
+static int b1pcmcia_add_card(unsigned int port, unsigned irq,
+ enum avmcardtype cardtype)
+{
+ avmctrl_info *cinfo;
+ avmcard *card;
+ char *cardname;
+ int retval;
+
+ card = b1_alloc_card(1);
+ if (!card) {
+ printk(KERN_WARNING "b1pcmcia: no memory.\n");
+ retval = -ENOMEM;
+ goto err;
+ }
+ cinfo = card->ctrlinfo;
+
+ switch (cardtype) {
+ case avm_m1: sprintf(card->name, "m1-%x", port); break;
+ case avm_m2: sprintf(card->name, "m2-%x", port); break;
+ default: sprintf(card->name, "b1pcmcia-%x", port); break;
+ }
+ card->port = port;
+ card->irq = irq;
+ card->cardtype = cardtype;
+
+ retval = request_irq(card->irq, b1_interrupt, IRQF_SHARED, card->name, card);
+ if (retval) {
+ printk(KERN_ERR "b1pcmcia: unable to get IRQ %d.\n",
+ card->irq);
+ retval = -EBUSY;
+ goto err_free;
+ }
+ b1_reset(card->port);
+ if ((retval = b1_detect(card->port, card->cardtype)) != 0) {
+ printk(KERN_NOTICE "b1pcmcia: NO card at 0x%x (%d)\n",
+ card->port, retval);
+ retval = -ENODEV;
+ goto err_free_irq;
+ }
+ b1_reset(card->port);
+ b1_getrevision(card);
+
+ cinfo->capi_ctrl.owner = THIS_MODULE;
+ cinfo->capi_ctrl.driver_name = "b1pcmcia";
+ cinfo->capi_ctrl.driverdata = cinfo;
+ cinfo->capi_ctrl.register_appl = b1_register_appl;
+ cinfo->capi_ctrl.release_appl = b1_release_appl;
+ cinfo->capi_ctrl.send_message = b1_send_message;
+ cinfo->capi_ctrl.load_firmware = b1_load_firmware;
+ cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
+ cinfo->capi_ctrl.procinfo = b1pcmcia_procinfo;
+ cinfo->capi_ctrl.proc_show = b1_proc_show;
+ strcpy(cinfo->capi_ctrl.name, card->name);
+
+ retval = attach_capi_ctr(&cinfo->capi_ctrl);
+ if (retval) {
+ printk(KERN_ERR "b1pcmcia: attach controller failed.\n");
+ goto err_free_irq;
+ }
+ switch (cardtype) {
+ case avm_m1: cardname = "M1"; break;
+ case avm_m2: cardname = "M2"; break;
+ default: cardname = "B1 PCMCIA"; break;
+ }
+
+ printk(KERN_INFO "b1pcmcia: AVM %s at i/o %#x, irq %d, revision %d\n",
+ cardname, card->port, card->irq, card->revision);
+
+ list_add(&card->list, &cards);
+ return cinfo->capi_ctrl.cnr;
+
+err_free_irq:
+ free_irq(card->irq, card);
+err_free:
+ b1_free_card(card);
+err:
+ return retval;
+}
+
+/* ------------------------------------------------------------- */
+
+static char *b1pcmcia_procinfo(struct capi_ctr *ctrl)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+
+ if (!cinfo)
+ return "";
+ sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d",
+ cinfo->cardname[0] ? cinfo->cardname : "-",
+ cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
+ cinfo->card ? cinfo->card->port : 0x0,
+ cinfo->card ? cinfo->card->irq : 0,
+ cinfo->card ? cinfo->card->revision : 0
+ );
+ return cinfo->infobuf;
+}
+
+/* ------------------------------------------------------------- */
+
+int b1pcmcia_addcard_b1(unsigned int port, unsigned irq)
+{
+ return b1pcmcia_add_card(port, irq, avm_b1pcmcia);
+}
+
+int b1pcmcia_addcard_m1(unsigned int port, unsigned irq)
+{
+ return b1pcmcia_add_card(port, irq, avm_m1);
+}
+
+int b1pcmcia_addcard_m2(unsigned int port, unsigned irq)
+{
+ return b1pcmcia_add_card(port, irq, avm_m2);
+}
+
+int b1pcmcia_delcard(unsigned int port, unsigned irq)
+{
+ struct list_head *l;
+ avmcard *card;
+
+ list_for_each(l, &cards) {
+ card = list_entry(l, avmcard, list);
+ if (card->port == port && card->irq == irq) {
+ b1pcmcia_remove_ctr(&card->ctrlinfo[0].capi_ctrl);
+ return 0;
+ }
+ }
+ return -ESRCH;
+}
+
+EXPORT_SYMBOL(b1pcmcia_addcard_b1);
+EXPORT_SYMBOL(b1pcmcia_addcard_m1);
+EXPORT_SYMBOL(b1pcmcia_addcard_m2);
+EXPORT_SYMBOL(b1pcmcia_delcard);
+
+static struct capi_driver capi_driver_b1pcmcia = {
+ .name = "b1pcmcia",
+ .revision = "1.0",
+};
+
+static int __init b1pcmcia_init(void)
+{
+ char *p;
+ char rev[32];
+
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
+ strlcpy(rev, p + 2, 32);
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
+ *(p - 1) = 0;
+ } else
+ strcpy(rev, "1.0");
+
+ strlcpy(capi_driver_b1pcmcia.revision, rev, 32);
+ register_capi_driver(&capi_driver_b1pcmcia);
+ printk(KERN_INFO "b1pci: revision %s\n", rev);
+
+ return 0;
+}
+
+static void __exit b1pcmcia_exit(void)
+{
+ unregister_capi_driver(&capi_driver_b1pcmcia);
+}
+
+module_init(b1pcmcia_init);
+module_exit(b1pcmcia_exit);
diff --git a/drivers/staging/isdn/avm/c4.c b/drivers/staging/isdn/avm/c4.c
new file mode 100644
index 000000000000..ac72cd204c4d
--- /dev/null
+++ b/drivers/staging/isdn/avm/c4.c
@@ -0,0 +1,1317 @@
+/* $Id: c4.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
+ *
+ * Module for AVM C4 & C2 card.
+ *
+ * Copyright 1999 by Carsten Paeth <calle@calle.de>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/capi.h>
+#include <linux/kernelcapi.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <linux/netdevice.h>
+#include <linux/isdn/capicmd.h>
+#include <linux/isdn/capiutil.h>
+#include <linux/isdn/capilli.h>
+#include "avmcard.h"
+
+#undef AVM_C4_DEBUG
+#undef AVM_C4_POLLDEBUG
+
+/* ------------------------------------------------------------- */
+
+static char *revision = "$Revision: 1.1.2.2 $";
+
+/* ------------------------------------------------------------- */
+
+static bool suppress_pollack;
+
+static const struct pci_device_id c4_pci_tbl[] = {
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 0, 0, (unsigned long)4 },
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2, 0, 0, (unsigned long)2 },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(pci, c4_pci_tbl);
+MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM C2/C4 cards");
+MODULE_AUTHOR("Carsten Paeth");
+MODULE_LICENSE("GPL");
+module_param(suppress_pollack, bool, 0);
+
+/* ------------------------------------------------------------- */
+
+static void c4_dispatch_tx(avmcard *card);
+
+/* ------------------------------------------------------------- */
+
+#define DC21285_DRAM_A0MR 0x40000000
+#define DC21285_DRAM_A1MR 0x40004000
+#define DC21285_DRAM_A2MR 0x40008000
+#define DC21285_DRAM_A3MR 0x4000C000
+
+#define CAS_OFFSET 0x88
+
+#define DC21285_ARMCSR_BASE 0x42000000
+
+#define PCI_OUT_INT_STATUS 0x30
+#define PCI_OUT_INT_MASK 0x34
+#define MAILBOX_0 0x50
+#define MAILBOX_1 0x54
+#define MAILBOX_2 0x58
+#define MAILBOX_3 0x5C
+#define DOORBELL 0x60
+#define DOORBELL_SETUP 0x64
+
+#define CHAN_1_CONTROL 0x90
+#define CHAN_2_CONTROL 0xB0
+#define DRAM_TIMING 0x10C
+#define DRAM_ADDR_SIZE_0 0x110
+#define DRAM_ADDR_SIZE_1 0x114
+#define DRAM_ADDR_SIZE_2 0x118
+#define DRAM_ADDR_SIZE_3 0x11C
+#define SA_CONTROL 0x13C
+#define XBUS_CYCLE 0x148
+#define XBUS_STROBE 0x14C
+#define DBELL_PCI_MASK 0x150
+#define DBELL_SA_MASK 0x154
+
+#define SDRAM_SIZE 0x1000000
+
+/* ------------------------------------------------------------- */
+
+#define MBOX_PEEK_POKE MAILBOX_0
+
+#define DBELL_ADDR 0x01
+#define DBELL_DATA 0x02
+#define DBELL_RNWR 0x40
+#define DBELL_INIT 0x80
+
+/* ------------------------------------------------------------- */
+
+#define MBOX_UP_ADDR MAILBOX_0
+#define MBOX_UP_LEN MAILBOX_1
+#define MBOX_DOWN_ADDR MAILBOX_2
+#define MBOX_DOWN_LEN MAILBOX_3
+
+#define DBELL_UP_HOST 0x00000100
+#define DBELL_UP_ARM 0x00000200
+#define DBELL_DOWN_HOST 0x00000400
+#define DBELL_DOWN_ARM 0x00000800
+#define DBELL_RESET_HOST 0x40000000
+#define DBELL_RESET_ARM 0x80000000
+
+/* ------------------------------------------------------------- */
+
+#define DRAM_TIMING_DEF 0x001A01A5
+#define DRAM_AD_SZ_DEF0 0x00000045
+#define DRAM_AD_SZ_NULL 0x00000000
+
+#define SA_CTL_ALLRIGHT 0x64AA0271
+
+#define INIT_XBUS_CYCLE 0x100016DB
+#define INIT_XBUS_STROBE 0xF1F1F1F1
+
+/* ------------------------------------------------------------- */
+
+#define RESET_TIMEOUT (15 * HZ) /* 15 sec */
+#define PEEK_POKE_TIMEOUT (HZ / 10) /* 0.1 sec */
+
+/* ------------------------------------------------------------- */
+
+#define c4outmeml(addr, value) writel(value, addr)
+#define c4inmeml(addr) readl(addr)
+#define c4outmemw(addr, value) writew(value, addr)
+#define c4inmemw(addr) readw(addr)
+#define c4outmemb(addr, value) writeb(value, addr)
+#define c4inmemb(addr) readb(addr)
+
+/* ------------------------------------------------------------- */
+
+static inline int wait_for_doorbell(avmcard *card, unsigned long t)
+{
+ unsigned long stop;
+
+ stop = jiffies + t;
+ while (c4inmeml(card->mbase + DOORBELL) != 0xffffffff) {
+ if (!time_before(jiffies, stop))
+ return -1;
+ mb();
+ }
+ return 0;
+}
+
+static int c4_poke(avmcard *card, unsigned long off, unsigned long value)
+{
+
+ if (wait_for_doorbell(card, HZ / 10) < 0)
+ return -1;
+
+ c4outmeml(card->mbase + MBOX_PEEK_POKE, off);
+ c4outmeml(card->mbase + DOORBELL, DBELL_ADDR);
+
+ if (wait_for_doorbell(card, HZ / 10) < 0)
+ return -1;
+
+ c4outmeml(card->mbase + MBOX_PEEK_POKE, value);
+ c4outmeml(card->mbase + DOORBELL, DBELL_DATA | DBELL_ADDR);
+
+ return 0;
+}
+
+static int c4_peek(avmcard *card, unsigned long off, unsigned long *valuep)
+{
+ if (wait_for_doorbell(card, HZ / 10) < 0)
+ return -1;
+
+ c4outmeml(card->mbase + MBOX_PEEK_POKE, off);
+ c4outmeml(card->mbase + DOORBELL, DBELL_RNWR | DBELL_ADDR);
+
+ if (wait_for_doorbell(card, HZ / 10) < 0)
+ return -1;
+
+ *valuep = c4inmeml(card->mbase + MBOX_PEEK_POKE);
+
+ return 0;
+}
+
+/* ------------------------------------------------------------- */
+
+static int c4_load_t4file(avmcard *card, capiloaddatapart *t4file)
+{
+ u32 val;
+ unsigned char *dp;
+ u_int left;
+ u32 loadoff = 0;
+
+ dp = t4file->data;
+ left = t4file->len;
+ while (left >= sizeof(u32)) {
+ if (t4file->user) {
+ if (copy_from_user(&val, dp, sizeof(val)))
+ return -EFAULT;
+ } else {
+ memcpy(&val, dp, sizeof(val));
+ }
+ if (c4_poke(card, loadoff, val)) {
+ printk(KERN_ERR "%s: corrupted firmware file ?\n",
+ card->name);
+ return -EIO;
+ }
+ left -= sizeof(u32);
+ dp += sizeof(u32);
+ loadoff += sizeof(u32);
+ }
+ if (left) {
+ val = 0;
+ if (t4file->user) {
+ if (copy_from_user(&val, dp, left))
+ return -EFAULT;
+ } else {
+ memcpy(&val, dp, left);
+ }
+ if (c4_poke(card, loadoff, val)) {
+ printk(KERN_ERR "%s: corrupted firmware file ?\n",
+ card->name);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+/* ------------------------------------------------------------- */
+
+static inline void _put_byte(void **pp, u8 val)
+{
+ u8 *s = *pp;
+ *s++ = val;
+ *pp = s;
+}
+
+static inline void _put_word(void **pp, u32 val)
+{
+ u8 *s = *pp;
+ *s++ = val & 0xff;
+ *s++ = (val >> 8) & 0xff;
+ *s++ = (val >> 16) & 0xff;
+ *s++ = (val >> 24) & 0xff;
+ *pp = s;
+}
+
+static inline void _put_slice(void **pp, unsigned char *dp, unsigned int len)
+{
+ unsigned i = len;
+ _put_word(pp, i);
+ while (i-- > 0)
+ _put_byte(pp, *dp++);
+}
+
+static inline u8 _get_byte(void **pp)
+{
+ u8 *s = *pp;
+ u8 val;
+ val = *s++;
+ *pp = s;
+ return val;
+}
+
+static inline u32 _get_word(void **pp)
+{
+ u8 *s = *pp;
+ u32 val;
+ val = *s++;
+ val |= (*s++ << 8);
+ val |= (*s++ << 16);
+ val |= (*s++ << 24);
+ *pp = s;
+ return val;
+}
+
+static inline u32 _get_slice(void **pp, unsigned char *dp)
+{
+ unsigned int len, i;
+
+ len = i = _get_word(pp);
+ while (i-- > 0) *dp++ = _get_byte(pp);
+ return len;
+}
+
+/* ------------------------------------------------------------- */
+
+static void c4_reset(avmcard *card)
+{
+ unsigned long stop;
+
+ c4outmeml(card->mbase + DOORBELL, DBELL_RESET_ARM);
+
+ stop = jiffies + HZ * 10;
+ while (c4inmeml(card->mbase + DOORBELL) != 0xffffffff) {
+ if (!time_before(jiffies, stop))
+ return;
+ c4outmeml(card->mbase + DOORBELL, DBELL_ADDR);
+ mb();
+ }
+
+ c4_poke(card, DC21285_ARMCSR_BASE + CHAN_1_CONTROL, 0);
+ c4_poke(card, DC21285_ARMCSR_BASE + CHAN_2_CONTROL, 0);
+}
+
+/* ------------------------------------------------------------- */
+
+static int c4_detect(avmcard *card)
+{
+ unsigned long stop, dummy;
+
+ c4outmeml(card->mbase + PCI_OUT_INT_MASK, 0x0c);
+ if (c4inmeml(card->mbase + PCI_OUT_INT_MASK) != 0x0c)
+ return 1;
+
+ c4outmeml(card->mbase + DOORBELL, DBELL_RESET_ARM);
+
+ stop = jiffies + HZ * 10;
+ while (c4inmeml(card->mbase + DOORBELL) != 0xffffffff) {
+ if (!time_before(jiffies, stop))
+ return 2;
+ c4outmeml(card->mbase + DOORBELL, DBELL_ADDR);
+ mb();
+ }
+
+ c4_poke(card, DC21285_ARMCSR_BASE + CHAN_1_CONTROL, 0);
+ c4_poke(card, DC21285_ARMCSR_BASE + CHAN_2_CONTROL, 0);
+
+ c4outmeml(card->mbase + MAILBOX_0, 0x55aa55aa);
+ if (c4inmeml(card->mbase + MAILBOX_0) != 0x55aa55aa) return 3;
+
+ c4outmeml(card->mbase + MAILBOX_0, 0xaa55aa55);
+ if (c4inmeml(card->mbase + MAILBOX_0) != 0xaa55aa55) return 4;
+
+ if (c4_poke(card, DC21285_ARMCSR_BASE + DBELL_SA_MASK, 0)) return 5;
+ if (c4_poke(card, DC21285_ARMCSR_BASE + DBELL_PCI_MASK, 0)) return 6;
+ if (c4_poke(card, DC21285_ARMCSR_BASE + SA_CONTROL, SA_CTL_ALLRIGHT))
+ return 7;
+ if (c4_poke(card, DC21285_ARMCSR_BASE + XBUS_CYCLE, INIT_XBUS_CYCLE))
+ return 8;
+ if (c4_poke(card, DC21285_ARMCSR_BASE + XBUS_STROBE, INIT_XBUS_STROBE))
+ return 8;
+ if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_TIMING, 0)) return 9;
+
+ mdelay(1);
+
+ if (c4_peek(card, DC21285_DRAM_A0MR, &dummy)) return 10;
+ if (c4_peek(card, DC21285_DRAM_A1MR, &dummy)) return 11;
+ if (c4_peek(card, DC21285_DRAM_A2MR, &dummy)) return 12;
+ if (c4_peek(card, DC21285_DRAM_A3MR, &dummy)) return 13;
+
+ if (c4_poke(card, DC21285_DRAM_A0MR + CAS_OFFSET, 0)) return 14;
+ if (c4_poke(card, DC21285_DRAM_A1MR + CAS_OFFSET, 0)) return 15;
+ if (c4_poke(card, DC21285_DRAM_A2MR + CAS_OFFSET, 0)) return 16;
+ if (c4_poke(card, DC21285_DRAM_A3MR + CAS_OFFSET, 0)) return 17;
+
+ mdelay(1);
+
+ if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_TIMING, DRAM_TIMING_DEF))
+ return 18;
+
+ if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_0, DRAM_AD_SZ_DEF0))
+ return 19;
+ if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_1, DRAM_AD_SZ_NULL))
+ return 20;
+ if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_2, DRAM_AD_SZ_NULL))
+ return 21;
+ if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_3, DRAM_AD_SZ_NULL))
+ return 22;
+
+ /* Transputer test */
+
+ if (c4_poke(card, 0x000000, 0x11111111)
+ || c4_poke(card, 0x400000, 0x22222222)
+ || c4_poke(card, 0x800000, 0x33333333)
+ || c4_poke(card, 0xC00000, 0x44444444))
+ return 23;
+
+ if (c4_peek(card, 0x000000, &dummy) || dummy != 0x11111111
+ || c4_peek(card, 0x400000, &dummy) || dummy != 0x22222222
+ || c4_peek(card, 0x800000, &dummy) || dummy != 0x33333333
+ || c4_peek(card, 0xC00000, &dummy) || dummy != 0x44444444)
+ return 24;
+
+ if (c4_poke(card, 0x000000, 0x55555555)
+ || c4_poke(card, 0x400000, 0x66666666)
+ || c4_poke(card, 0x800000, 0x77777777)
+ || c4_poke(card, 0xC00000, 0x88888888))
+ return 25;
+
+ if (c4_peek(card, 0x000000, &dummy) || dummy != 0x55555555
+ || c4_peek(card, 0x400000, &dummy) || dummy != 0x66666666
+ || c4_peek(card, 0x800000, &dummy) || dummy != 0x77777777
+ || c4_peek(card, 0xC00000, &dummy) || dummy != 0x88888888)
+ return 26;
+
+ return 0;
+}
+
+/* ------------------------------------------------------------- */
+
+static void c4_dispatch_tx(avmcard *card)
+{
+ avmcard_dmainfo *dma = card->dma;
+ struct sk_buff *skb;
+ u8 cmd, subcmd;
+ u16 len;
+ u32 txlen;
+ void *p;
+
+
+ if (card->csr & DBELL_DOWN_ARM) { /* tx busy */
+ return;
+ }
+
+ skb = skb_dequeue(&dma->send_queue);
+ if (!skb) {
+#ifdef AVM_C4_DEBUG
+ printk(KERN_DEBUG "%s: tx underrun\n", card->name);
+#endif
+ return;
+ }
+
+ len = CAPIMSG_LEN(skb->data);
+
+ if (len) {
+ cmd = CAPIMSG_COMMAND(skb->data);
+ subcmd = CAPIMSG_SUBCOMMAND(skb->data);
+
+ p = dma->sendbuf.dmabuf;
+
+ if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
+ u16 dlen = CAPIMSG_DATALEN(skb->data);
+ _put_byte(&p, SEND_DATA_B3_REQ);
+ _put_slice(&p, skb->data, len);
+ _put_slice(&p, skb->data + len, dlen);
+ } else {
+ _put_byte(&p, SEND_MESSAGE);
+ _put_slice(&p, skb->data, len);
+ }
+ txlen = (u8 *)p - (u8 *)dma->sendbuf.dmabuf;
+#ifdef AVM_C4_DEBUG
+ printk(KERN_DEBUG "%s: tx put msg len=%d\n", card->name, txlen);
+#endif
+ } else {
+ txlen = skb->len - 2;
+#ifdef AVM_C4_POLLDEBUG
+ if (skb->data[2] == SEND_POLLACK)
+ printk(KERN_INFO "%s: ack to c4\n", card->name);
+#endif
+#ifdef AVM_C4_DEBUG
+ printk(KERN_DEBUG "%s: tx put 0x%x len=%d\n",
+ card->name, skb->data[2], txlen);
+#endif
+ skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
+ skb->len - 2);
+ }
+ txlen = (txlen + 3) & ~3;
+
+ c4outmeml(card->mbase + MBOX_DOWN_ADDR, dma->sendbuf.dmaaddr);
+ c4outmeml(card->mbase + MBOX_DOWN_LEN, txlen);
+
+ card->csr |= DBELL_DOWN_ARM;
+
+ c4outmeml(card->mbase + DOORBELL, DBELL_DOWN_ARM);
+
+ dev_kfree_skb_any(skb);
+}
+
+/* ------------------------------------------------------------- */
+
+static void queue_pollack(avmcard *card)
+{
+ struct sk_buff *skb;
+ void *p;
+
+ skb = alloc_skb(3, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_CRIT "%s: no memory, lost poll ack\n",
+ card->name);
+ return;
+ }
+ p = skb->data;
+ _put_byte(&p, 0);
+ _put_byte(&p, 0);
+ _put_byte(&p, SEND_POLLACK);
+ skb_put(skb, (u8 *)p - (u8 *)skb->data);
+
+ skb_queue_tail(&card->dma->send_queue, skb);
+ c4_dispatch_tx(card);
+}
+
+/* ------------------------------------------------------------- */
+
+static void c4_handle_rx(avmcard *card)
+{
+ avmcard_dmainfo *dma = card->dma;
+ struct capi_ctr *ctrl;
+ avmctrl_info *cinfo;
+ struct sk_buff *skb;
+ void *p = dma->recvbuf.dmabuf;
+ u32 ApplId, MsgLen, DataB3Len, NCCI, WindowSize;
+ u8 b1cmd = _get_byte(&p);
+ u32 cidx;
+
+
+#ifdef AVM_C4_DEBUG
+ printk(KERN_DEBUG "%s: rx 0x%x len=%lu\n", card->name,
+ b1cmd, (unsigned long)dma->recvlen);
+#endif
+
+ switch (b1cmd) {
+ case RECEIVE_DATA_B3_IND:
+
+ ApplId = (unsigned) _get_word(&p);
+ MsgLen = _get_slice(&p, card->msgbuf);
+ DataB3Len = _get_slice(&p, card->databuf);
+ cidx = CAPIMSG_CONTROLLER(card->msgbuf)-card->cardnr;
+ if (cidx >= card->nlogcontr) cidx = 0;
+ ctrl = &card->ctrlinfo[cidx].capi_ctrl;
+
+ if (MsgLen < 30) { /* not CAPI 64Bit */
+ memset(card->msgbuf + MsgLen, 0, 30 - MsgLen);
+ MsgLen = 30;
+ CAPIMSG_SETLEN(card->msgbuf, 30);
+ }
+ if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
+ printk(KERN_ERR "%s: incoming packet dropped\n",
+ card->name);
+ } else {
+ skb_put_data(skb, card->msgbuf, MsgLen);
+ skb_put_data(skb, card->databuf, DataB3Len);
+ capi_ctr_handle_message(ctrl, ApplId, skb);
+ }
+ break;
+
+ case RECEIVE_MESSAGE:
+
+ ApplId = (unsigned) _get_word(&p);
+ MsgLen = _get_slice(&p, card->msgbuf);
+ cidx = CAPIMSG_CONTROLLER(card->msgbuf)-card->cardnr;
+ if (cidx >= card->nlogcontr) cidx = 0;
+ cinfo = &card->ctrlinfo[cidx];
+ ctrl = &card->ctrlinfo[cidx].capi_ctrl;
+
+ if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
+ printk(KERN_ERR "%s: incoming packet dropped\n",
+ card->name);
+ } else {
+ skb_put_data(skb, card->msgbuf, MsgLen);
+ if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF)
+ capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
+ CAPIMSG_NCCI(skb->data),
+ CAPIMSG_MSGID(skb->data));
+
+ capi_ctr_handle_message(ctrl, ApplId, skb);
+ }
+ break;
+
+ case RECEIVE_NEW_NCCI:
+
+ ApplId = _get_word(&p);
+ NCCI = _get_word(&p);
+ WindowSize = _get_word(&p);
+ cidx = (NCCI & 0x7f) - card->cardnr;
+ if (cidx >= card->nlogcontr) cidx = 0;
+
+ capilib_new_ncci(&card->ctrlinfo[cidx].ncci_head, ApplId, NCCI, WindowSize);
+
+ break;
+
+ case RECEIVE_FREE_NCCI:
+
+ ApplId = _get_word(&p);
+ NCCI = _get_word(&p);
+
+ if (NCCI != 0xffffffff) {
+ cidx = (NCCI & 0x7f) - card->cardnr;
+ if (cidx >= card->nlogcontr) cidx = 0;
+ capilib_free_ncci(&card->ctrlinfo[cidx].ncci_head, ApplId, NCCI);
+ }
+ break;
+
+ case RECEIVE_START:
+#ifdef AVM_C4_POLLDEBUG
+ printk(KERN_INFO "%s: poll from c4\n", card->name);
+#endif
+ if (!suppress_pollack)
+ queue_pollack(card);
+ for (cidx = 0; cidx < card->nr_controllers; cidx++) {
+ ctrl = &card->ctrlinfo[cidx].capi_ctrl;
+ capi_ctr_resume_output(ctrl);
+ }
+ break;
+
+ case RECEIVE_STOP:
+ for (cidx = 0; cidx < card->nr_controllers; cidx++) {
+ ctrl = &card->ctrlinfo[cidx].capi_ctrl;
+ capi_ctr_suspend_output(ctrl);
+ }
+ break;
+
+ case RECEIVE_INIT:
+
+ cidx = card->nlogcontr;
+ if (cidx >= card->nr_controllers) {
+ printk(KERN_ERR "%s: card with %d controllers ??\n",
+ card->name, cidx + 1);
+ break;
+ }
+ card->nlogcontr++;
+ cinfo = &card->ctrlinfo[cidx];
+ ctrl = &cinfo->capi_ctrl;
+ cinfo->versionlen = _get_slice(&p, cinfo->versionbuf);
+ b1_parse_version(cinfo);
+ printk(KERN_INFO "%s: %s-card (%s) now active\n",
+ card->name,
+ cinfo->version[VER_CARDTYPE],
+ cinfo->version[VER_DRIVER]);
+ capi_ctr_ready(&cinfo->capi_ctrl);
+ break;
+
+ case RECEIVE_TASK_READY:
+ ApplId = (unsigned) _get_word(&p);
+ MsgLen = _get_slice(&p, card->msgbuf);
+ card->msgbuf[MsgLen] = 0;
+ while (MsgLen > 0
+ && (card->msgbuf[MsgLen - 1] == '\n'
+ || card->msgbuf[MsgLen - 1] == '\r')) {
+ card->msgbuf[MsgLen - 1] = 0;
+ MsgLen--;
+ }
+ printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
+ card->name, ApplId, card->msgbuf);
+ break;
+
+ case RECEIVE_DEBUGMSG:
+ MsgLen = _get_slice(&p, card->msgbuf);
+ card->msgbuf[MsgLen] = 0;
+ while (MsgLen > 0
+ && (card->msgbuf[MsgLen - 1] == '\n'
+ || card->msgbuf[MsgLen - 1] == '\r')) {
+ card->msgbuf[MsgLen - 1] = 0;
+ MsgLen--;
+ }
+ printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
+ break;
+
+ default:
+ printk(KERN_ERR "%s: c4_interrupt: 0x%x ???\n",
+ card->name, b1cmd);
+ return;
+ }
+}
+
+/* ------------------------------------------------------------- */
+
+static irqreturn_t c4_handle_interrupt(avmcard *card)
+{
+ unsigned long flags;
+ u32 status;
+
+ spin_lock_irqsave(&card->lock, flags);
+ status = c4inmeml(card->mbase + DOORBELL);
+
+ if (status & DBELL_RESET_HOST) {
+ u_int i;
+ c4outmeml(card->mbase + PCI_OUT_INT_MASK, 0x0c);
+ spin_unlock_irqrestore(&card->lock, flags);
+ if (card->nlogcontr == 0)
+ return IRQ_HANDLED;
+ printk(KERN_ERR "%s: unexpected reset\n", card->name);
+ for (i = 0; i < card->nr_controllers; i++) {
+ avmctrl_info *cinfo = &card->ctrlinfo[i];
+ memset(cinfo->version, 0, sizeof(cinfo->version));
+ spin_lock_irqsave(&card->lock, flags);
+ capilib_release(&cinfo->ncci_head);
+ spin_unlock_irqrestore(&card->lock, flags);
+ capi_ctr_down(&cinfo->capi_ctrl);
+ }
+ card->nlogcontr = 0;
+ return IRQ_HANDLED;
+ }
+
+ status &= (DBELL_UP_HOST | DBELL_DOWN_HOST);
+ if (!status) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ return IRQ_HANDLED;
+ }
+ c4outmeml(card->mbase + DOORBELL, status);
+
+ if ((status & DBELL_UP_HOST) != 0) {
+ card->dma->recvlen = c4inmeml(card->mbase + MBOX_UP_LEN);
+ c4outmeml(card->mbase + MBOX_UP_LEN, 0);
+ c4_handle_rx(card);
+ card->dma->recvlen = 0;
+ c4outmeml(card->mbase + MBOX_UP_LEN, card->dma->recvbuf.size);
+ c4outmeml(card->mbase + DOORBELL, DBELL_UP_ARM);
+ }
+
+ if ((status & DBELL_DOWN_HOST) != 0) {
+ card->csr &= ~DBELL_DOWN_ARM;
+ c4_dispatch_tx(card);
+ } else if (card->csr & DBELL_DOWN_HOST) {
+ if (c4inmeml(card->mbase + MBOX_DOWN_LEN) == 0) {
+ card->csr &= ~DBELL_DOWN_ARM;
+ c4_dispatch_tx(card);
+ }
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t c4_interrupt(int interrupt, void *devptr)
+{
+ avmcard *card = devptr;
+
+ return c4_handle_interrupt(card);
+}
+
+/* ------------------------------------------------------------- */
+
+static void c4_send_init(avmcard *card)
+{
+ struct sk_buff *skb;
+ void *p;
+ unsigned long flags;
+
+ skb = alloc_skb(15, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_CRIT "%s: no memory, lost register appl.\n",
+ card->name);
+ return;
+ }
+ p = skb->data;
+ _put_byte(&p, 0);
+ _put_byte(&p, 0);
+ _put_byte(&p, SEND_INIT);
+ _put_word(&p, CAPI_MAXAPPL);
+ _put_word(&p, AVM_NCCI_PER_CHANNEL * 30);
+ _put_word(&p, card->cardnr - 1);
+ skb_put(skb, (u8 *)p - (u8 *)skb->data);
+
+ skb_queue_tail(&card->dma->send_queue, skb);
+ spin_lock_irqsave(&card->lock, flags);
+ c4_dispatch_tx(card);
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
+static int queue_sendconfigword(avmcard *card, u32 val)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ void *p;
+
+ skb = alloc_skb(3 + 4, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_CRIT "%s: no memory, send config\n",
+ card->name);
+ return -ENOMEM;
+ }
+ p = skb->data;
+ _put_byte(&p, 0);
+ _put_byte(&p, 0);
+ _put_byte(&p, SEND_CONFIG);
+ _put_word(&p, val);
+ skb_put(skb, (u8 *)p - (u8 *)skb->data);
+
+ skb_queue_tail(&card->dma->send_queue, skb);
+ spin_lock_irqsave(&card->lock, flags);
+ c4_dispatch_tx(card);
+ spin_unlock_irqrestore(&card->lock, flags);
+ return 0;
+}
+
+static int queue_sendconfig(avmcard *card, char cval[4])
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ void *p;
+
+ skb = alloc_skb(3 + 4, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_CRIT "%s: no memory, send config\n",
+ card->name);
+ return -ENOMEM;
+ }
+ p = skb->data;
+ _put_byte(&p, 0);
+ _put_byte(&p, 0);
+ _put_byte(&p, SEND_CONFIG);
+ _put_byte(&p, cval[0]);
+ _put_byte(&p, cval[1]);
+ _put_byte(&p, cval[2]);
+ _put_byte(&p, cval[3]);
+ skb_put(skb, (u8 *)p - (u8 *)skb->data);
+
+ skb_queue_tail(&card->dma->send_queue, skb);
+
+ spin_lock_irqsave(&card->lock, flags);
+ c4_dispatch_tx(card);
+ spin_unlock_irqrestore(&card->lock, flags);
+ return 0;
+}
+
+static int c4_send_config(avmcard *card, capiloaddatapart *config)
+{
+ u8 val[4];
+ unsigned char *dp;
+ u_int left;
+ int retval;
+
+ if ((retval = queue_sendconfigword(card, 1)) != 0)
+ return retval;
+ if ((retval = queue_sendconfigword(card, config->len)) != 0)
+ return retval;
+
+ dp = config->data;
+ left = config->len;
+ while (left >= sizeof(u32)) {
+ if (config->user) {
+ if (copy_from_user(val, dp, sizeof(val)))
+ return -EFAULT;
+ } else {
+ memcpy(val, dp, sizeof(val));
+ }
+ if ((retval = queue_sendconfig(card, val)) != 0)
+ return retval;
+ left -= sizeof(val);
+ dp += sizeof(val);
+ }
+ if (left) {
+ memset(val, 0, sizeof(val));
+ if (config->user) {
+ if (copy_from_user(&val, dp, left))
+ return -EFAULT;
+ } else {
+ memcpy(&val, dp, left);
+ }
+ if ((retval = queue_sendconfig(card, val)) != 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int c4_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ int retval;
+
+ if ((retval = c4_load_t4file(card, &data->firmware))) {
+ printk(KERN_ERR "%s: failed to load t4file!!\n",
+ card->name);
+ c4_reset(card);
+ return retval;
+ }
+
+ card->csr = 0;
+ c4outmeml(card->mbase + MBOX_UP_LEN, 0);
+ c4outmeml(card->mbase + MBOX_DOWN_LEN, 0);
+ c4outmeml(card->mbase + DOORBELL, DBELL_INIT);
+ mdelay(1);
+ c4outmeml(card->mbase + DOORBELL,
+ DBELL_UP_HOST | DBELL_DOWN_HOST | DBELL_RESET_HOST);
+
+ c4outmeml(card->mbase + PCI_OUT_INT_MASK, 0x08);
+
+ card->dma->recvlen = 0;
+ c4outmeml(card->mbase + MBOX_UP_ADDR, card->dma->recvbuf.dmaaddr);
+ c4outmeml(card->mbase + MBOX_UP_LEN, card->dma->recvbuf.size);
+ c4outmeml(card->mbase + DOORBELL, DBELL_UP_ARM);
+
+ if (data->configuration.len > 0 && data->configuration.data) {
+ retval = c4_send_config(card, &data->configuration);
+ if (retval) {
+ printk(KERN_ERR "%s: failed to set config!!\n",
+ card->name);
+ c4_reset(card);
+ return retval;
+ }
+ }
+
+ c4_send_init(card);
+
+ return 0;
+}
+
+
+static void c4_reset_ctr(struct capi_ctr *ctrl)
+{
+ avmcard *card = ((avmctrl_info *)(ctrl->driverdata))->card;
+ avmctrl_info *cinfo;
+ u_int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ c4_reset(card);
+
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ for (i = 0; i < card->nr_controllers; i++) {
+ cinfo = &card->ctrlinfo[i];
+ memset(cinfo->version, 0, sizeof(cinfo->version));
+ capi_ctr_down(&cinfo->capi_ctrl);
+ }
+ card->nlogcontr = 0;
+}
+
+static void c4_remove(struct pci_dev *pdev)
+{
+ avmcard *card = pci_get_drvdata(pdev);
+ avmctrl_info *cinfo;
+ u_int i;
+
+ if (!card)
+ return;
+
+ c4_reset(card);
+
+ for (i = 0; i < card->nr_controllers; i++) {
+ cinfo = &card->ctrlinfo[i];
+ detach_capi_ctr(&cinfo->capi_ctrl);
+ }
+
+ free_irq(card->irq, card);
+ iounmap(card->mbase);
+ release_region(card->port, AVMB1_PORTLEN);
+ avmcard_dma_free(card->dma);
+ pci_set_drvdata(pdev, NULL);
+ b1_free_card(card);
+}
+
+/* ------------------------------------------------------------- */
+
+
+static void c4_register_appl(struct capi_ctr *ctrl,
+ u16 appl,
+ capi_register_params *rp)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ struct sk_buff *skb;
+ int want = rp->level3cnt;
+ unsigned long flags;
+ int nconn;
+ void *p;
+
+ if (ctrl->cnr == card->cardnr) {
+
+ if (want > 0) nconn = want;
+ else nconn = ctrl->profile.nbchannel * 4 * -want;
+ if (nconn == 0) nconn = ctrl->profile.nbchannel * 4;
+
+ skb = alloc_skb(23, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_CRIT "%s: no memory, lost register appl.\n",
+ card->name);
+ return;
+ }
+ p = skb->data;
+ _put_byte(&p, 0);
+ _put_byte(&p, 0);
+ _put_byte(&p, SEND_REGISTER);
+ _put_word(&p, appl);
+ _put_word(&p, 1024 * (nconn + 1));
+ _put_word(&p, nconn);
+ _put_word(&p, rp->datablkcnt);
+ _put_word(&p, rp->datablklen);
+ skb_put(skb, (u8 *)p - (u8 *)skb->data);
+
+ skb_queue_tail(&card->dma->send_queue, skb);
+
+ spin_lock_irqsave(&card->lock, flags);
+ c4_dispatch_tx(card);
+ spin_unlock_irqrestore(&card->lock, flags);
+ }
+}
+
+/* ------------------------------------------------------------- */
+
+static void c4_release_appl(struct capi_ctr *ctrl, u16 appl)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ unsigned long flags;
+ struct sk_buff *skb;
+ void *p;
+
+ spin_lock_irqsave(&card->lock, flags);
+ capilib_release_appl(&cinfo->ncci_head, appl);
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ if (ctrl->cnr == card->cardnr) {
+ skb = alloc_skb(7, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_CRIT "%s: no memory, lost release appl.\n",
+ card->name);
+ return;
+ }
+ p = skb->data;
+ _put_byte(&p, 0);
+ _put_byte(&p, 0);
+ _put_byte(&p, SEND_RELEASE);
+ _put_word(&p, appl);
+
+ skb_put(skb, (u8 *)p - (u8 *)skb->data);
+ skb_queue_tail(&card->dma->send_queue, skb);
+ spin_lock_irqsave(&card->lock, flags);
+ c4_dispatch_tx(card);
+ spin_unlock_irqrestore(&card->lock, flags);
+ }
+}
+
+/* ------------------------------------------------------------- */
+
+
+static u16 c4_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ u16 retval = CAPI_NOERROR;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
+ retval = capilib_data_b3_req(&cinfo->ncci_head,
+ CAPIMSG_APPID(skb->data),
+ CAPIMSG_NCCI(skb->data),
+ CAPIMSG_MSGID(skb->data));
+ }
+ if (retval == CAPI_NOERROR) {
+ skb_queue_tail(&card->dma->send_queue, skb);
+ c4_dispatch_tx(card);
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+ return retval;
+}
+
+/* ------------------------------------------------------------- */
+
+static char *c4_procinfo(struct capi_ctr *ctrl)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+
+ if (!cinfo)
+ return "";
+ sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx",
+ cinfo->cardname[0] ? cinfo->cardname : "-",
+ cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
+ cinfo->card ? cinfo->card->port : 0x0,
+ cinfo->card ? cinfo->card->irq : 0,
+ cinfo->card ? cinfo->card->membase : 0
+ );
+ return cinfo->infobuf;
+}
+
+static int c4_proc_show(struct seq_file *m, void *v)
+{
+ struct capi_ctr *ctrl = m->private;
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ u8 flag;
+ char *s;
+
+ seq_printf(m, "%-16s %s\n", "name", card->name);
+ seq_printf(m, "%-16s 0x%x\n", "io", card->port);
+ seq_printf(m, "%-16s %d\n", "irq", card->irq);
+ seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
+ switch (card->cardtype) {
+ case avm_b1isa: s = "B1 ISA"; break;
+ case avm_b1pci: s = "B1 PCI"; break;
+ case avm_b1pcmcia: s = "B1 PCMCIA"; break;
+ case avm_m1: s = "M1"; break;
+ case avm_m2: s = "M2"; break;
+ case avm_t1isa: s = "T1 ISA (HEMA)"; break;
+ case avm_t1pci: s = "T1 PCI"; break;
+ case avm_c4: s = "C4"; break;
+ case avm_c2: s = "C2"; break;
+ default: s = "???"; break;
+ }
+ seq_printf(m, "%-16s %s\n", "type", s);
+ if ((s = cinfo->version[VER_DRIVER]) != NULL)
+ seq_printf(m, "%-16s %s\n", "ver_driver", s);
+ if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
+ seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
+ if ((s = cinfo->version[VER_SERIAL]) != NULL)
+ seq_printf(m, "%-16s %s\n", "ver_serial", s);
+
+ if (card->cardtype != avm_m1) {
+ flag = ((u8 *)(ctrl->profile.manu))[3];
+ if (flag)
+ seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
+ "protocol",
+ (flag & 0x01) ? " DSS1" : "",
+ (flag & 0x02) ? " CT1" : "",
+ (flag & 0x04) ? " VN3" : "",
+ (flag & 0x08) ? " NI1" : "",
+ (flag & 0x10) ? " AUSTEL" : "",
+ (flag & 0x20) ? " ESS" : "",
+ (flag & 0x40) ? " 1TR6" : ""
+ );
+ }
+ if (card->cardtype != avm_m1) {
+ flag = ((u8 *)(ctrl->profile.manu))[5];
+ if (flag)
+ seq_printf(m, "%-16s%s%s%s%s\n",
+ "linetype",
+ (flag & 0x01) ? " point to point" : "",
+ (flag & 0x02) ? " point to multipoint" : "",
+ (flag & 0x08) ? " leased line without D-channel" : "",
+ (flag & 0x04) ? " leased line with D-channel" : ""
+ );
+ }
+ seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
+
+ return 0;
+}
+
+/* ------------------------------------------------------------- */
+
+static int c4_add_card(struct capicardparams *p, struct pci_dev *dev,
+ int nr_controllers)
+{
+ avmcard *card;
+ avmctrl_info *cinfo;
+ int retval;
+ int i;
+
+ card = b1_alloc_card(nr_controllers);
+ if (!card) {
+ printk(KERN_WARNING "c4: no memory.\n");
+ retval = -ENOMEM;
+ goto err;
+ }
+ card->dma = avmcard_dma_alloc("c4", dev, 2048 + 128, 2048 + 128);
+ if (!card->dma) {
+ printk(KERN_WARNING "c4: no memory.\n");
+ retval = -ENOMEM;
+ goto err_free;
+ }
+
+ sprintf(card->name, "c%d-%x", nr_controllers, p->port);
+ card->port = p->port;
+ card->irq = p->irq;
+ card->membase = p->membase;
+ card->cardtype = (nr_controllers == 4) ? avm_c4 : avm_c2;
+
+ if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
+ printk(KERN_WARNING "c4: ports 0x%03x-0x%03x in use.\n",
+ card->port, card->port + AVMB1_PORTLEN);
+ retval = -EBUSY;
+ goto err_free_dma;
+ }
+
+ card->mbase = ioremap(card->membase, 128);
+ if (card->mbase == NULL) {
+ printk(KERN_NOTICE "c4: can't remap memory at 0x%lx\n",
+ card->membase);
+ retval = -EIO;
+ goto err_release_region;
+ }
+
+ retval = c4_detect(card);
+ if (retval != 0) {
+ printk(KERN_NOTICE "c4: NO card at 0x%x error(%d)\n",
+ card->port, retval);
+ retval = -EIO;
+ goto err_unmap;
+ }
+ c4_reset(card);
+
+ retval = request_irq(card->irq, c4_interrupt, IRQF_SHARED, card->name, card);
+ if (retval) {
+ printk(KERN_ERR "c4: unable to get IRQ %d.\n", card->irq);
+ retval = -EBUSY;
+ goto err_unmap;
+ }
+
+ for (i = 0; i < nr_controllers; i++) {
+ cinfo = &card->ctrlinfo[i];
+ cinfo->capi_ctrl.owner = THIS_MODULE;
+ cinfo->capi_ctrl.driver_name = "c4";
+ cinfo->capi_ctrl.driverdata = cinfo;
+ cinfo->capi_ctrl.register_appl = c4_register_appl;
+ cinfo->capi_ctrl.release_appl = c4_release_appl;
+ cinfo->capi_ctrl.send_message = c4_send_message;
+ cinfo->capi_ctrl.load_firmware = c4_load_firmware;
+ cinfo->capi_ctrl.reset_ctr = c4_reset_ctr;
+ cinfo->capi_ctrl.procinfo = c4_procinfo;
+ cinfo->capi_ctrl.proc_show = c4_proc_show;
+ strcpy(cinfo->capi_ctrl.name, card->name);
+
+ retval = attach_capi_ctr(&cinfo->capi_ctrl);
+ if (retval) {
+ printk(KERN_ERR "c4: attach controller failed (%d).\n", i);
+ for (i--; i >= 0; i--) {
+ cinfo = &card->ctrlinfo[i];
+ detach_capi_ctr(&cinfo->capi_ctrl);
+ }
+ goto err_free_irq;
+ }
+ if (i == 0)
+ card->cardnr = cinfo->capi_ctrl.cnr;
+ }
+
+ printk(KERN_INFO "c4: AVM C%d at i/o %#x, irq %d, mem %#lx\n",
+ nr_controllers, card->port, card->irq,
+ card->membase);
+ pci_set_drvdata(dev, card);
+ return 0;
+
+err_free_irq:
+ free_irq(card->irq, card);
+err_unmap:
+ iounmap(card->mbase);
+err_release_region:
+ release_region(card->port, AVMB1_PORTLEN);
+err_free_dma:
+ avmcard_dma_free(card->dma);
+err_free:
+ b1_free_card(card);
+err:
+ return retval;
+}
+
+/* ------------------------------------------------------------- */
+
+static int c4_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+ int nr = ent->driver_data;
+ int retval = 0;
+ struct capicardparams param;
+
+ if (pci_enable_device(dev) < 0) {
+ printk(KERN_ERR "c4: failed to enable AVM-C%d\n", nr);
+ return -ENODEV;
+ }
+ pci_set_master(dev);
+
+ param.port = pci_resource_start(dev, 1);
+ param.irq = dev->irq;
+ param.membase = pci_resource_start(dev, 0);
+
+ printk(KERN_INFO "c4: PCI BIOS reports AVM-C%d at i/o %#x, irq %d, mem %#x\n",
+ nr, param.port, param.irq, param.membase);
+
+ retval = c4_add_card(&param, dev, nr);
+ if (retval != 0) {
+ printk(KERN_ERR "c4: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n",
+ nr, param.port, param.irq, param.membase);
+ pci_disable_device(dev);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static struct pci_driver c4_pci_driver = {
+ .name = "c4",
+ .id_table = c4_pci_tbl,
+ .probe = c4_probe,
+ .remove = c4_remove,
+};
+
+static struct capi_driver capi_driver_c2 = {
+ .name = "c2",
+ .revision = "1.0",
+};
+
+static struct capi_driver capi_driver_c4 = {
+ .name = "c4",
+ .revision = "1.0",
+};
+
+static int __init c4_init(void)
+{
+ char *p;
+ char rev[32];
+ int err;
+
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
+ strlcpy(rev, p + 2, 32);
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
+ *(p - 1) = 0;
+ } else
+ strcpy(rev, "1.0");
+
+ err = pci_register_driver(&c4_pci_driver);
+ if (!err) {
+ strlcpy(capi_driver_c2.revision, rev, 32);
+ register_capi_driver(&capi_driver_c2);
+ strlcpy(capi_driver_c4.revision, rev, 32);
+ register_capi_driver(&capi_driver_c4);
+ printk(KERN_INFO "c4: revision %s\n", rev);
+ }
+ return err;
+}
+
+static void __exit c4_exit(void)
+{
+ unregister_capi_driver(&capi_driver_c2);
+ unregister_capi_driver(&capi_driver_c4);
+ pci_unregister_driver(&c4_pci_driver);
+}
+
+module_init(c4_init);
+module_exit(c4_exit);
diff --git a/drivers/staging/isdn/avm/t1isa.c b/drivers/staging/isdn/avm/t1isa.c
new file mode 100644
index 000000000000..2153619c5b31
--- /dev/null
+++ b/drivers/staging/isdn/avm/t1isa.c
@@ -0,0 +1,594 @@
+/* $Id: t1isa.c,v 1.1.2.3 2004/02/10 01:07:12 keil Exp $
+ *
+ * Module for AVM T1 HEMA-card.
+ *
+ * Copyright 1999 by Carsten Paeth <calle@calle.de>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/capi.h>
+#include <linux/netdevice.h>
+#include <linux/kernelcapi.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/gfp.h>
+#include <asm/io.h>
+#include <linux/isdn/capicmd.h>
+#include <linux/isdn/capiutil.h>
+#include <linux/isdn/capilli.h>
+#include "avmcard.h"
+
+/* ------------------------------------------------------------- */
+
+static char *revision = "$Revision: 1.1.2.3 $";
+
+/* ------------------------------------------------------------- */
+
+MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM T1 HEMA ISA card");
+MODULE_AUTHOR("Carsten Paeth");
+MODULE_LICENSE("GPL");
+
+/* ------------------------------------------------------------- */
+
+static int hema_irq_table[16] =
+{0,
+ 0,
+ 0,
+ 0x80, /* irq 3 */
+ 0,
+ 0x90, /* irq 5 */
+ 0,
+ 0xA0, /* irq 7 */
+ 0,
+ 0xB0, /* irq 9 */
+ 0xC0, /* irq 10 */
+ 0xD0, /* irq 11 */
+ 0xE0, /* irq 12 */
+ 0,
+ 0,
+ 0xF0, /* irq 15 */
+};
+
+static int t1_detectandinit(unsigned int base, unsigned irq, int cardnr)
+{
+ unsigned char cregs[8];
+ unsigned char reverse_cardnr;
+ unsigned char dummy;
+ int i;
+
+ reverse_cardnr = ((cardnr & 0x01) << 3) | ((cardnr & 0x02) << 1)
+ | ((cardnr & 0x04) >> 1) | ((cardnr & 0x08) >> 3);
+ cregs[0] = (HEMA_VERSION_ID << 4) | (reverse_cardnr & 0xf);
+ cregs[1] = 0x00; /* fast & slow link connected to CON1 */
+ cregs[2] = 0x05; /* fast link 20MBit, slow link 20 MBit */
+ cregs[3] = 0;
+ cregs[4] = 0x11; /* zero wait state */
+ cregs[5] = hema_irq_table[irq & 0xf];
+ cregs[6] = 0;
+ cregs[7] = 0;
+
+ /*
+ * no one else should use the ISA bus in this moment,
+ * but no function there to prevent this :-(
+ * save_flags(flags); cli();
+ */
+
+ /* board reset */
+ t1outp(base, T1_RESETBOARD, 0xf);
+ mdelay(100);
+ dummy = t1inp(base, T1_FASTLINK + T1_OUTSTAT); /* first read */
+
+ /* write config */
+ dummy = (base >> 4) & 0xff;
+ for (i = 1; i <= 0xf; i++) t1outp(base, i, dummy);
+ t1outp(base, HEMA_PAL_ID & 0xf, dummy);
+ t1outp(base, HEMA_PAL_ID >> 4, cregs[0]);
+ for (i = 1; i < 7; i++) t1outp(base, 0, cregs[i]);
+ t1outp(base, ((base >> 4)) & 0x3, cregs[7]);
+ /* restore_flags(flags); */
+
+ mdelay(100);
+ t1outp(base, T1_FASTLINK + T1_RESETLINK, 0);
+ t1outp(base, T1_SLOWLINK + T1_RESETLINK, 0);
+ mdelay(10);
+ t1outp(base, T1_FASTLINK + T1_RESETLINK, 1);
+ t1outp(base, T1_SLOWLINK + T1_RESETLINK, 1);
+ mdelay(100);
+ t1outp(base, T1_FASTLINK + T1_RESETLINK, 0);
+ t1outp(base, T1_SLOWLINK + T1_RESETLINK, 0);
+ mdelay(10);
+ t1outp(base, T1_FASTLINK + T1_ANALYSE, 0);
+ mdelay(5);
+ t1outp(base, T1_SLOWLINK + T1_ANALYSE, 0);
+
+ if (t1inp(base, T1_FASTLINK + T1_OUTSTAT) != 0x1) /* tx empty */
+ return 1;
+ if (t1inp(base, T1_FASTLINK + T1_INSTAT) != 0x0) /* rx empty */
+ return 2;
+ if (t1inp(base, T1_FASTLINK + T1_IRQENABLE) != 0x0)
+ return 3;
+ if ((t1inp(base, T1_FASTLINK + T1_FIFOSTAT) & 0xf0) != 0x70)
+ return 4;
+ if ((t1inp(base, T1_FASTLINK + T1_IRQMASTER) & 0x0e) != 0)
+ return 5;
+ if ((t1inp(base, T1_FASTLINK + T1_IDENT) & 0x7d) != 1)
+ return 6;
+ if (t1inp(base, T1_SLOWLINK + T1_OUTSTAT) != 0x1) /* tx empty */
+ return 7;
+ if ((t1inp(base, T1_SLOWLINK + T1_IRQMASTER) & 0x0e) != 0)
+ return 8;
+ if ((t1inp(base, T1_SLOWLINK + T1_IDENT) & 0x7d) != 0)
+ return 9;
+ return 0;
+}
+
+static irqreturn_t t1isa_interrupt(int interrupt, void *devptr)
+{
+ avmcard *card = devptr;
+ avmctrl_info *cinfo = &card->ctrlinfo[0];
+ struct capi_ctr *ctrl = &cinfo->capi_ctrl;
+ unsigned char b1cmd;
+ struct sk_buff *skb;
+
+ unsigned ApplId;
+ unsigned MsgLen;
+ unsigned DataB3Len;
+ unsigned NCCI;
+ unsigned WindowSize;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ while (b1_rx_full(card->port)) {
+
+ b1cmd = b1_get_byte(card->port);
+
+ switch (b1cmd) {
+
+ case RECEIVE_DATA_B3_IND:
+
+ ApplId = (unsigned) b1_get_word(card->port);
+ MsgLen = t1_get_slice(card->port, card->msgbuf);
+ DataB3Len = t1_get_slice(card->port, card->databuf);
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ if (MsgLen < 30) { /* not CAPI 64Bit */
+ memset(card->msgbuf + MsgLen, 0, 30 - MsgLen);
+ MsgLen = 30;
+ CAPIMSG_SETLEN(card->msgbuf, 30);
+ }
+ if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
+ printk(KERN_ERR "%s: incoming packet dropped\n",
+ card->name);
+ } else {
+ skb_put_data(skb, card->msgbuf, MsgLen);
+ skb_put_data(skb, card->databuf, DataB3Len);
+ capi_ctr_handle_message(ctrl, ApplId, skb);
+ }
+ break;
+
+ case RECEIVE_MESSAGE:
+
+ ApplId = (unsigned) b1_get_word(card->port);
+ MsgLen = t1_get_slice(card->port, card->msgbuf);
+ if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ printk(KERN_ERR "%s: incoming packet dropped\n",
+ card->name);
+ } else {
+ skb_put_data(skb, card->msgbuf, MsgLen);
+ if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3)
+ capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
+ CAPIMSG_NCCI(skb->data),
+ CAPIMSG_MSGID(skb->data));
+ spin_unlock_irqrestore(&card->lock, flags);
+ capi_ctr_handle_message(ctrl, ApplId, skb);
+ }
+ break;
+
+ case RECEIVE_NEW_NCCI:
+
+ ApplId = b1_get_word(card->port);
+ NCCI = b1_get_word(card->port);
+ WindowSize = b1_get_word(card->port);
+ capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize);
+ spin_unlock_irqrestore(&card->lock, flags);
+ break;
+
+ case RECEIVE_FREE_NCCI:
+
+ ApplId = b1_get_word(card->port);
+ NCCI = b1_get_word(card->port);
+ if (NCCI != 0xffffffff)
+ capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI);
+ spin_unlock_irqrestore(&card->lock, flags);
+ break;
+
+ case RECEIVE_START:
+ b1_put_byte(card->port, SEND_POLLACK);
+ spin_unlock_irqrestore(&card->lock, flags);
+ capi_ctr_resume_output(ctrl);
+ break;
+
+ case RECEIVE_STOP:
+ spin_unlock_irqrestore(&card->lock, flags);
+ capi_ctr_suspend_output(ctrl);
+ break;
+
+ case RECEIVE_INIT:
+
+ cinfo->versionlen = t1_get_slice(card->port, cinfo->versionbuf);
+ spin_unlock_irqrestore(&card->lock, flags);
+ b1_parse_version(cinfo);
+ printk(KERN_INFO "%s: %s-card (%s) now active\n",
+ card->name,
+ cinfo->version[VER_CARDTYPE],
+ cinfo->version[VER_DRIVER]);
+ capi_ctr_ready(ctrl);
+ break;
+
+ case RECEIVE_TASK_READY:
+ ApplId = (unsigned) b1_get_word(card->port);
+ MsgLen = t1_get_slice(card->port, card->msgbuf);
+ spin_unlock_irqrestore(&card->lock, flags);
+ card->msgbuf[MsgLen] = 0;
+ while (MsgLen > 0
+ && (card->msgbuf[MsgLen - 1] == '\n'
+ || card->msgbuf[MsgLen - 1] == '\r')) {
+ card->msgbuf[MsgLen - 1] = 0;
+ MsgLen--;
+ }
+ printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
+ card->name, ApplId, card->msgbuf);
+ break;
+
+ case RECEIVE_DEBUGMSG:
+ MsgLen = t1_get_slice(card->port, card->msgbuf);
+ spin_unlock_irqrestore(&card->lock, flags);
+ card->msgbuf[MsgLen] = 0;
+ while (MsgLen > 0
+ && (card->msgbuf[MsgLen - 1] == '\n'
+ || card->msgbuf[MsgLen - 1] == '\r')) {
+ card->msgbuf[MsgLen - 1] = 0;
+ MsgLen--;
+ }
+ printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
+ break;
+
+
+ case 0xff:
+ spin_unlock_irqrestore(&card->lock, flags);
+ printk(KERN_ERR "%s: card reseted ?\n", card->name);
+ return IRQ_HANDLED;
+ default:
+ spin_unlock_irqrestore(&card->lock, flags);
+ printk(KERN_ERR "%s: b1_interrupt: 0x%x ???\n",
+ card->name, b1cmd);
+ return IRQ_NONE;
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/* ------------------------------------------------------------- */
+
+static int t1isa_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ unsigned int port = card->port;
+ unsigned long flags;
+ int retval;
+
+ t1_disable_irq(port);
+ b1_reset(port);
+
+ if ((retval = b1_load_t4file(card, &data->firmware))) {
+ b1_reset(port);
+ printk(KERN_ERR "%s: failed to load t4file!!\n",
+ card->name);
+ return retval;
+ }
+
+ if (data->configuration.len > 0 && data->configuration.data) {
+ if ((retval = b1_load_config(card, &data->configuration))) {
+ b1_reset(port);
+ printk(KERN_ERR "%s: failed to load config!!\n",
+ card->name);
+ return retval;
+ }
+ }
+
+ if (!b1_loaded(card)) {
+ printk(KERN_ERR "%s: failed to load t4file.\n", card->name);
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&card->lock, flags);
+ b1_setinterrupt(port, card->irq, card->cardtype);
+ b1_put_byte(port, SEND_INIT);
+ b1_put_word(port, CAPI_MAXAPPL);
+ b1_put_word(port, AVM_NCCI_PER_CHANNEL * 30);
+ b1_put_word(port, ctrl->cnr - 1);
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ return 0;
+}
+
+static void t1isa_reset_ctr(struct capi_ctr *ctrl)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ unsigned int port = card->port;
+ unsigned long flags;
+
+ t1_disable_irq(port);
+ b1_reset(port);
+ b1_reset(port);
+
+ memset(cinfo->version, 0, sizeof(cinfo->version));
+ spin_lock_irqsave(&card->lock, flags);
+ capilib_release(&cinfo->ncci_head);
+ spin_unlock_irqrestore(&card->lock, flags);
+ capi_ctr_down(ctrl);
+}
+
+static void t1isa_remove(struct pci_dev *pdev)
+{
+ avmctrl_info *cinfo = pci_get_drvdata(pdev);
+ avmcard *card;
+
+ if (!cinfo)
+ return;
+
+ card = cinfo->card;
+
+ t1_disable_irq(card->port);
+ b1_reset(card->port);
+ b1_reset(card->port);
+ t1_reset(card->port);
+
+ detach_capi_ctr(&cinfo->capi_ctrl);
+ free_irq(card->irq, card);
+ release_region(card->port, AVMB1_PORTLEN);
+ b1_free_card(card);
+}
+
+/* ------------------------------------------------------------- */
+
+static u16 t1isa_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
+static char *t1isa_procinfo(struct capi_ctr *ctrl);
+
+static int t1isa_probe(struct pci_dev *pdev, int cardnr)
+{
+ avmctrl_info *cinfo;
+ avmcard *card;
+ int retval;
+
+ card = b1_alloc_card(1);
+ if (!card) {
+ printk(KERN_WARNING "t1isa: no memory.\n");
+ retval = -ENOMEM;
+ goto err;
+ }
+
+ cinfo = card->ctrlinfo;
+ card->port = pci_resource_start(pdev, 0);
+ card->irq = pdev->irq;
+ card->cardtype = avm_t1isa;
+ card->cardnr = cardnr;
+ sprintf(card->name, "t1isa-%x", card->port);
+
+ if (!(((card->port & 0x7) == 0) && ((card->port & 0x30) != 0x30))) {
+ printk(KERN_WARNING "t1isa: invalid port 0x%x.\n", card->port);
+ retval = -EINVAL;
+ goto err_free;
+ }
+ if (hema_irq_table[card->irq & 0xf] == 0) {
+ printk(KERN_WARNING "t1isa: irq %d not valid.\n", card->irq);
+ retval = -EINVAL;
+ goto err_free;
+ }
+ if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
+ printk(KERN_INFO "t1isa: ports 0x%03x-0x%03x in use.\n",
+ card->port, card->port + AVMB1_PORTLEN);
+ retval = -EBUSY;
+ goto err_free;
+ }
+ retval = request_irq(card->irq, t1isa_interrupt, 0, card->name, card);
+ if (retval) {
+ printk(KERN_INFO "t1isa: unable to get IRQ %d.\n", card->irq);
+ retval = -EBUSY;
+ goto err_release_region;
+ }
+
+ if ((retval = t1_detectandinit(card->port, card->irq, card->cardnr)) != 0) {
+ printk(KERN_INFO "t1isa: NO card at 0x%x (%d)\n",
+ card->port, retval);
+ retval = -ENODEV;
+ goto err_free_irq;
+ }
+ t1_disable_irq(card->port);
+ b1_reset(card->port);
+
+ cinfo->capi_ctrl.owner = THIS_MODULE;
+ cinfo->capi_ctrl.driver_name = "t1isa";
+ cinfo->capi_ctrl.driverdata = cinfo;
+ cinfo->capi_ctrl.register_appl = b1_register_appl;
+ cinfo->capi_ctrl.release_appl = b1_release_appl;
+ cinfo->capi_ctrl.send_message = t1isa_send_message;
+ cinfo->capi_ctrl.load_firmware = t1isa_load_firmware;
+ cinfo->capi_ctrl.reset_ctr = t1isa_reset_ctr;
+ cinfo->capi_ctrl.procinfo = t1isa_procinfo;
+ cinfo->capi_ctrl.proc_show = b1_proc_show;
+ strcpy(cinfo->capi_ctrl.name, card->name);
+
+ retval = attach_capi_ctr(&cinfo->capi_ctrl);
+ if (retval) {
+ printk(KERN_INFO "t1isa: attach controller failed.\n");
+ goto err_free_irq;
+ }
+
+ printk(KERN_INFO "t1isa: AVM T1 ISA at i/o %#x, irq %d, card %d\n",
+ card->port, card->irq, card->cardnr);
+
+ pci_set_drvdata(pdev, cinfo);
+ return 0;
+
+err_free_irq:
+ free_irq(card->irq, card);
+err_release_region:
+ release_region(card->port, AVMB1_PORTLEN);
+err_free:
+ b1_free_card(card);
+err:
+ return retval;
+}
+
+static u16 t1isa_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ unsigned int port = card->port;
+ unsigned long flags;
+ u16 len = CAPIMSG_LEN(skb->data);
+ u8 cmd = CAPIMSG_COMMAND(skb->data);
+ u8 subcmd = CAPIMSG_SUBCOMMAND(skb->data);
+ u16 dlen, retval;
+
+ spin_lock_irqsave(&card->lock, flags);
+ if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
+ retval = capilib_data_b3_req(&cinfo->ncci_head,
+ CAPIMSG_APPID(skb->data),
+ CAPIMSG_NCCI(skb->data),
+ CAPIMSG_MSGID(skb->data));
+ if (retval != CAPI_NOERROR) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ return retval;
+ }
+ dlen = CAPIMSG_DATALEN(skb->data);
+
+ b1_put_byte(port, SEND_DATA_B3_REQ);
+ t1_put_slice(port, skb->data, len);
+ t1_put_slice(port, skb->data + len, dlen);
+ } else {
+ b1_put_byte(port, SEND_MESSAGE);
+ t1_put_slice(port, skb->data, len);
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+ dev_kfree_skb_any(skb);
+ return CAPI_NOERROR;
+}
+/* ------------------------------------------------------------- */
+
+static char *t1isa_procinfo(struct capi_ctr *ctrl)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+
+ if (!cinfo)
+ return "";
+ sprintf(cinfo->infobuf, "%s %s 0x%x %d %d",
+ cinfo->cardname[0] ? cinfo->cardname : "-",
+ cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
+ cinfo->card ? cinfo->card->port : 0x0,
+ cinfo->card ? cinfo->card->irq : 0,
+ cinfo->card ? cinfo->card->cardnr : 0
+ );
+ return cinfo->infobuf;
+}
+
+
+/* ------------------------------------------------------------- */
+
+#define MAX_CARDS 4
+static struct pci_dev isa_dev[MAX_CARDS];
+static int io[MAX_CARDS];
+static int irq[MAX_CARDS];
+static int cardnr[MAX_CARDS];
+
+module_param_hw_array(io, int, ioport, NULL, 0);
+module_param_hw_array(irq, int, irq, NULL, 0);
+module_param_array(cardnr, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
+MODULE_PARM_DESC(cardnr, "Card number(s) (as jumpered)");
+
+static int t1isa_add_card(struct capi_driver *driver, capicardparams *data)
+{
+ int i;
+
+ for (i = 0; i < MAX_CARDS; i++) {
+ if (isa_dev[i].resource[0].start)
+ continue;
+
+ isa_dev[i].resource[0].start = data->port;
+ isa_dev[i].irq = data->irq;
+
+ if (t1isa_probe(&isa_dev[i], data->cardnr) == 0)
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static struct capi_driver capi_driver_t1isa = {
+ .name = "t1isa",
+ .revision = "1.0",
+ .add_card = t1isa_add_card,
+};
+
+static int __init t1isa_init(void)
+{
+ char rev[32];
+ char *p;
+ int i;
+
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
+ strlcpy(rev, p + 2, 32);
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
+ *(p - 1) = 0;
+ } else
+ strcpy(rev, "1.0");
+
+ for (i = 0; i < MAX_CARDS; i++) {
+ if (!io[i])
+ break;
+
+ isa_dev[i].resource[0].start = io[i];
+ isa_dev[i].irq = irq[i];
+
+ if (t1isa_probe(&isa_dev[i], cardnr[i]) != 0)
+ return -ENODEV;
+ }
+
+ strlcpy(capi_driver_t1isa.revision, rev, 32);
+ register_capi_driver(&capi_driver_t1isa);
+ printk(KERN_INFO "t1isa: revision %s\n", rev);
+
+ return 0;
+}
+
+static void __exit t1isa_exit(void)
+{
+ int i;
+
+ unregister_capi_driver(&capi_driver_t1isa);
+ for (i = 0; i < MAX_CARDS; i++) {
+ if (!io[i])
+ break;
+
+ t1isa_remove(&isa_dev[i]);
+ }
+}
+
+module_init(t1isa_init);
+module_exit(t1isa_exit);
diff --git a/drivers/staging/isdn/avm/t1pci.c b/drivers/staging/isdn/avm/t1pci.c
new file mode 100644
index 000000000000..f5ed1d5004c9
--- /dev/null
+++ b/drivers/staging/isdn/avm/t1pci.c
@@ -0,0 +1,259 @@
+/* $Id: t1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
+ *
+ * Module for AVM T1 PCI-card.
+ *
+ * Copyright 1999 by Carsten Paeth <calle@calle.de>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/capi.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/isdn/capicmd.h>
+#include <linux/isdn/capiutil.h>
+#include <linux/isdn/capilli.h>
+#include "avmcard.h"
+
+#undef CONFIG_T1PCI_DEBUG
+#undef CONFIG_T1PCI_POLLDEBUG
+
+/* ------------------------------------------------------------- */
+static char *revision = "$Revision: 1.1.2.2 $";
+/* ------------------------------------------------------------- */
+
+static struct pci_device_id t1pci_pci_tbl[] = {
+ { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_T1, PCI_ANY_ID, PCI_ANY_ID },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(pci, t1pci_pci_tbl);
+MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM T1 PCI card");
+MODULE_AUTHOR("Carsten Paeth");
+MODULE_LICENSE("GPL");
+
+/* ------------------------------------------------------------- */
+
+static char *t1pci_procinfo(struct capi_ctr *ctrl);
+
+static int t1pci_add_card(struct capicardparams *p, struct pci_dev *pdev)
+{
+ avmcard *card;
+ avmctrl_info *cinfo;
+ int retval;
+
+ card = b1_alloc_card(1);
+ if (!card) {
+ printk(KERN_WARNING "t1pci: no memory.\n");
+ retval = -ENOMEM;
+ goto err;
+ }
+
+ card->dma = avmcard_dma_alloc("t1pci", pdev, 2048 + 128, 2048 + 128);
+ if (!card->dma) {
+ printk(KERN_WARNING "t1pci: no memory.\n");
+ retval = -ENOMEM;
+ goto err_free;
+ }
+
+ cinfo = card->ctrlinfo;
+ sprintf(card->name, "t1pci-%x", p->port);
+ card->port = p->port;
+ card->irq = p->irq;
+ card->membase = p->membase;
+ card->cardtype = avm_t1pci;
+
+ if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
+ printk(KERN_WARNING "t1pci: ports 0x%03x-0x%03x in use.\n",
+ card->port, card->port + AVMB1_PORTLEN);
+ retval = -EBUSY;
+ goto err_free_dma;
+ }
+
+ card->mbase = ioremap(card->membase, 64);
+ if (!card->mbase) {
+ printk(KERN_NOTICE "t1pci: can't remap memory at 0x%lx\n",
+ card->membase);
+ retval = -EIO;
+ goto err_release_region;
+ }
+
+ b1dma_reset(card);
+
+ retval = t1pci_detect(card);
+ if (retval != 0) {
+ if (retval < 6)
+ printk(KERN_NOTICE "t1pci: NO card at 0x%x (%d)\n",
+ card->port, retval);
+ else
+ printk(KERN_NOTICE "t1pci: card at 0x%x, but cable not connected or T1 has no power (%d)\n",
+ card->port, retval);
+ retval = -EIO;
+ goto err_unmap;
+ }
+ b1dma_reset(card);
+
+ retval = request_irq(card->irq, b1dma_interrupt, IRQF_SHARED, card->name, card);
+ if (retval) {
+ printk(KERN_ERR "t1pci: unable to get IRQ %d.\n", card->irq);
+ retval = -EBUSY;
+ goto err_unmap;
+ }
+
+ cinfo->capi_ctrl.owner = THIS_MODULE;
+ cinfo->capi_ctrl.driver_name = "t1pci";
+ cinfo->capi_ctrl.driverdata = cinfo;
+ cinfo->capi_ctrl.register_appl = b1dma_register_appl;
+ cinfo->capi_ctrl.release_appl = b1dma_release_appl;
+ cinfo->capi_ctrl.send_message = b1dma_send_message;
+ cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
+ cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
+ cinfo->capi_ctrl.procinfo = t1pci_procinfo;
+ cinfo->capi_ctrl.proc_show = b1dma_proc_show;
+ strcpy(cinfo->capi_ctrl.name, card->name);
+
+ retval = attach_capi_ctr(&cinfo->capi_ctrl);
+ if (retval) {
+ printk(KERN_ERR "t1pci: attach controller failed.\n");
+ retval = -EBUSY;
+ goto err_free_irq;
+ }
+ card->cardnr = cinfo->capi_ctrl.cnr;
+
+ printk(KERN_INFO "t1pci: AVM T1 PCI at i/o %#x, irq %d, mem %#lx\n",
+ card->port, card->irq, card->membase);
+
+ pci_set_drvdata(pdev, card);
+ return 0;
+
+err_free_irq:
+ free_irq(card->irq, card);
+err_unmap:
+ iounmap(card->mbase);
+err_release_region:
+ release_region(card->port, AVMB1_PORTLEN);
+err_free_dma:
+ avmcard_dma_free(card->dma);
+err_free:
+ b1_free_card(card);
+err:
+ return retval;
+}
+
+/* ------------------------------------------------------------- */
+
+static void t1pci_remove(struct pci_dev *pdev)
+{
+ avmcard *card = pci_get_drvdata(pdev);
+ avmctrl_info *cinfo = card->ctrlinfo;
+
+ b1dma_reset(card);
+
+ detach_capi_ctr(&cinfo->capi_ctrl);
+ free_irq(card->irq, card);
+ iounmap(card->mbase);
+ release_region(card->port, AVMB1_PORTLEN);
+ avmcard_dma_free(card->dma);
+ b1_free_card(card);
+}
+
+/* ------------------------------------------------------------- */
+
+static char *t1pci_procinfo(struct capi_ctr *ctrl)
+{
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+
+ if (!cinfo)
+ return "";
+ sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx",
+ cinfo->cardname[0] ? cinfo->cardname : "-",
+ cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
+ cinfo->card ? cinfo->card->port : 0x0,
+ cinfo->card ? cinfo->card->irq : 0,
+ cinfo->card ? cinfo->card->membase : 0
+ );
+ return cinfo->infobuf;
+}
+
+/* ------------------------------------------------------------- */
+
+static int t1pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+ struct capicardparams param;
+ int retval;
+
+ if (pci_enable_device(dev) < 0) {
+ printk(KERN_ERR "t1pci: failed to enable AVM-T1-PCI\n");
+ return -ENODEV;
+ }
+ pci_set_master(dev);
+
+ param.port = pci_resource_start(dev, 1);
+ param.irq = dev->irq;
+ param.membase = pci_resource_start(dev, 0);
+
+ printk(KERN_INFO "t1pci: PCI BIOS reports AVM-T1-PCI at i/o %#x, irq %d, mem %#x\n",
+ param.port, param.irq, param.membase);
+
+ retval = t1pci_add_card(&param, dev);
+ if (retval != 0) {
+ printk(KERN_ERR "t1pci: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n",
+ param.port, param.irq, param.membase);
+ pci_disable_device(dev);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static struct pci_driver t1pci_pci_driver = {
+ .name = "t1pci",
+ .id_table = t1pci_pci_tbl,
+ .probe = t1pci_probe,
+ .remove = t1pci_remove,
+};
+
+static struct capi_driver capi_driver_t1pci = {
+ .name = "t1pci",
+ .revision = "1.0",
+};
+
+static int __init t1pci_init(void)
+{
+ char *p;
+ char rev[32];
+ int err;
+
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
+ strlcpy(rev, p + 2, 32);
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
+ *(p - 1) = 0;
+ } else
+ strcpy(rev, "1.0");
+
+ err = pci_register_driver(&t1pci_pci_driver);
+ if (!err) {
+ strlcpy(capi_driver_t1pci.revision, rev, 32);
+ register_capi_driver(&capi_driver_t1pci);
+ printk(KERN_INFO "t1pci: revision %s\n", rev);
+ }
+ return err;
+}
+
+static void __exit t1pci_exit(void)
+{
+ unregister_capi_driver(&capi_driver_t1pci);
+ pci_unregister_driver(&t1pci_pci_driver);
+}
+
+module_init(t1pci_init);
+module_exit(t1pci_exit);
diff --git a/drivers/staging/isdn/gigaset/Kconfig b/drivers/staging/isdn/gigaset/Kconfig
new file mode 100644
index 000000000000..c593105b3600
--- /dev/null
+++ b/drivers/staging/isdn/gigaset/Kconfig
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig ISDN_DRV_GIGASET
+ tristate "Siemens Gigaset support"
+ depends on TTY
+ select CRC_CCITT
+ select BITREVERSE
+ help
+ This driver supports the Siemens Gigaset SX205/255 family of
+ ISDN DECT bases, including the predecessors Gigaset 3070/3075
+ and 4170/4175 and their T-Com versions Sinus 45isdn and Sinus
+ 721X.
+ If you have one of these devices, say M here and for at least
+ one of the connection specific parts that follow.
+ This will build a module called "gigaset".
+ Note: If you build your ISDN subsystem (ISDN_CAPI or ISDN_I4L)
+ as a module, you have to build this driver as a module too,
+ otherwise the Gigaset device won't show up as an ISDN device.
+
+if ISDN_DRV_GIGASET
+
+config GIGASET_CAPI
+ bool "Gigaset CAPI support"
+ depends on ISDN_CAPI='y'||(ISDN_CAPI='m'&&ISDN_DRV_GIGASET='m')
+ default 'y'
+ help
+ Build the Gigaset driver as a CAPI 2.0 driver interfacing with
+ the Kernel CAPI subsystem. To use it with the old ISDN4Linux
+ subsystem you'll have to enable the capidrv glue driver.
+ (select ISDN_CAPI_CAPIDRV.)
+ Say N to build the old native ISDN4Linux variant.
+ If unsure, say Y.
+
+config GIGASET_BASE
+ tristate "Gigaset base station support"
+ depends on USB
+ help
+ Say M here if you want to use the USB interface of the Gigaset
+ base for connection to your system.
+ This will build a module called "bas_gigaset".
+
+config GIGASET_M105
+ tristate "Gigaset M105 support"
+ depends on USB
+ help
+ Say M here if you want to connect to the Gigaset base via DECT
+ using a Gigaset M105 (Sinus 45 Data 2) USB DECT device.
+ This will build a module called "usb_gigaset".
+
+config GIGASET_M101
+ tristate "Gigaset M101 support"
+ help
+ Say M here if you want to connect to the Gigaset base via DECT
+ using a Gigaset M101 (Sinus 45 Data 1) RS232 DECT device.
+ This will build a module called "ser_gigaset".
+
+config GIGASET_DEBUG
+ bool "Gigaset debugging"
+ help
+ This enables debugging code in the Gigaset drivers.
+ If in doubt, say yes.
+
+endif # ISDN_DRV_GIGASET
diff --git a/drivers/staging/isdn/gigaset/Makefile b/drivers/staging/isdn/gigaset/Makefile
new file mode 100644
index 000000000000..9c010891dcd7
--- /dev/null
+++ b/drivers/staging/isdn/gigaset/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+gigaset-y := common.o interface.o proc.o ev-layer.o asyncdata.o
+
+ifdef CONFIG_GIGASET_CAPI
+gigaset-y += capi.o
+else
+gigaset-y += dummyll.o
+endif
+
+usb_gigaset-y := usb-gigaset.o
+ser_gigaset-y := ser-gigaset.o
+bas_gigaset-y := bas-gigaset.o isocdata.o
+
+obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset.o
+obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o
+obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o
+obj-$(CONFIG_GIGASET_M101) += ser_gigaset.o
diff --git a/drivers/staging/isdn/gigaset/asyncdata.c b/drivers/staging/isdn/gigaset/asyncdata.c
new file mode 100644
index 000000000000..a34b3c9d8a71
--- /dev/null
+++ b/drivers/staging/isdn/gigaset/asyncdata.c
@@ -0,0 +1,606 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Common data handling layer for ser_gigaset and usb_gigaset
+ *
+ * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
+ * Hansjoerg Lipp <hjlipp@web.de>,
+ * Stefan Eilers.
+ *
+ * =====================================================================
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+#include <linux/crc-ccitt.h>
+#include <linux/bitrev.h>
+#include <linux/export.h>
+
+/* check if byte must be stuffed/escaped
+ * I'm not sure which data should be encoded.
+ * Therefore I will go the hard way and encode every value
+ * less than 0x20, the flag sequence and the control escape char.
+ */
+static inline int muststuff(unsigned char c)
+{
+ if (c < PPP_TRANS) return 1;
+ if (c == PPP_FLAG) return 1;
+ if (c == PPP_ESCAPE) return 1;
+ /* other possible candidates: */
+ /* 0x91: XON with parity set */
+ /* 0x93: XOFF with parity set */
+ return 0;
+}
+
+/* == data input =========================================================== */
+
+/* process a block of received bytes in command mode
+ * (mstate != MS_LOCKED && (inputstate & INS_command))
+ * Append received bytes to the command response buffer and forward them
+ * line by line to the response handler. Exit whenever a mode/state change
+ * might have occurred.
+ * Note: Received lines may be terminated by CR, LF, or CR LF, which will be
+ * removed before passing the line to the response handler.
+ * Return value:
+ * number of processed bytes
+ */
+static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf)
+{
+ unsigned char *src = inbuf->data + inbuf->head;
+ struct cardstate *cs = inbuf->cs;
+ unsigned cbytes = cs->cbytes;
+ unsigned procbytes = 0;
+ unsigned char c;
+
+ while (procbytes < numbytes) {
+ c = *src++;
+ procbytes++;
+
+ switch (c) {
+ case '\n':
+ if (cbytes == 0 && cs->respdata[0] == '\r') {
+ /* collapse LF with preceding CR */
+ cs->respdata[0] = 0;
+ break;
+ }
+ /* fall through */
+ case '\r':
+ /* end of message line, pass to response handler */
+ if (cbytes >= MAX_RESP_SIZE) {
+ dev_warn(cs->dev, "response too large (%d)\n",
+ cbytes);
+ cbytes = MAX_RESP_SIZE;
+ }
+ cs->cbytes = cbytes;
+ gigaset_dbg_buffer(DEBUG_TRANSCMD, "received response",
+ cbytes, cs->respdata);
+ gigaset_handle_modem_response(cs);
+ cbytes = 0;
+
+ /* store EOL byte for CRLF collapsing */
+ cs->respdata[0] = c;
+
+ /* cs->dle may have changed */
+ if (cs->dle && !(inbuf->inputstate & INS_DLE_command))
+ inbuf->inputstate &= ~INS_command;
+
+ /* return for reevaluating state */
+ goto exit;
+
+ case DLE_FLAG:
+ if (inbuf->inputstate & INS_DLE_char) {
+ /* quoted DLE: clear quote flag */
+ inbuf->inputstate &= ~INS_DLE_char;
+ } else if (cs->dle ||
+ (inbuf->inputstate & INS_DLE_command)) {
+ /* DLE escape, pass up for handling */
+ inbuf->inputstate |= INS_DLE_char;
+ goto exit;
+ }
+ /* quoted or not in DLE mode: treat as regular data */
+ /* fall through */
+ default:
+ /* append to line buffer if possible */
+ if (cbytes < MAX_RESP_SIZE)
+ cs->respdata[cbytes] = c;
+ cbytes++;
+ }
+ }
+exit:
+ cs->cbytes = cbytes;
+ return procbytes;
+}
+
+/* process a block of received bytes in lock mode
+ * All received bytes are passed unmodified to the tty i/f.
+ * Return value:
+ * number of processed bytes
+ */
+static unsigned lock_loop(unsigned numbytes, struct inbuf_t *inbuf)
+{
+ unsigned char *src = inbuf->data + inbuf->head;
+
+ gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src);
+ gigaset_if_receive(inbuf->cs, src, numbytes);
+ return numbytes;
+}
+
+/* process a block of received bytes in HDLC data mode
+ * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 == L2_HDLC)
+ * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes.
+ * When a frame is complete, check the FCS and pass valid frames to the LL.
+ * If DLE is encountered, return immediately to let the caller handle it.
+ * Return value:
+ * number of processed bytes
+ */
+static unsigned hdlc_loop(unsigned numbytes, struct inbuf_t *inbuf)
+{
+ struct cardstate *cs = inbuf->cs;
+ struct bc_state *bcs = cs->bcs;
+ int inputstate = bcs->inputstate;
+ __u16 fcs = bcs->rx_fcs;
+ struct sk_buff *skb = bcs->rx_skb;
+ unsigned char *src = inbuf->data + inbuf->head;
+ unsigned procbytes = 0;
+ unsigned char c;
+
+ if (inputstate & INS_byte_stuff) {
+ if (!numbytes)
+ return 0;
+ inputstate &= ~INS_byte_stuff;
+ goto byte_stuff;
+ }
+
+ while (procbytes < numbytes) {
+ c = *src++;
+ procbytes++;
+ if (c == DLE_FLAG) {
+ if (inputstate & INS_DLE_char) {
+ /* quoted DLE: clear quote flag */
+ inputstate &= ~INS_DLE_char;
+ } else if (cs->dle || (inputstate & INS_DLE_command)) {
+ /* DLE escape, pass up for handling */
+ inputstate |= INS_DLE_char;
+ break;
+ }
+ }
+
+ if (c == PPP_ESCAPE) {
+ /* byte stuffing indicator: pull in next byte */
+ if (procbytes >= numbytes) {
+ /* end of buffer, save for later processing */
+ inputstate |= INS_byte_stuff;
+ break;
+ }
+byte_stuff:
+ c = *src++;
+ procbytes++;
+ if (c == DLE_FLAG) {
+ if (inputstate & INS_DLE_char) {
+ /* quoted DLE: clear quote flag */
+ inputstate &= ~INS_DLE_char;
+ } else if (cs->dle ||
+ (inputstate & INS_DLE_command)) {
+ /* DLE escape, pass up for handling */
+ inputstate |=
+ INS_DLE_char | INS_byte_stuff;
+ break;
+ }
+ }
+ c ^= PPP_TRANS;
+#ifdef CONFIG_GIGASET_DEBUG
+ if (!muststuff(c))
+ gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c);
+#endif
+ } else if (c == PPP_FLAG) {
+ /* end of frame: process content if any */
+ if (inputstate & INS_have_data) {
+ gig_dbg(DEBUG_HDLC,
+ "7e----------------------------");
+
+ /* check and pass received frame */
+ if (!skb) {
+ /* skipped frame */
+ gigaset_isdn_rcv_err(bcs);
+ } else if (skb->len < 2) {
+ /* frame too short for FCS */
+ dev_warn(cs->dev,
+ "short frame (%d)\n",
+ skb->len);
+ gigaset_isdn_rcv_err(bcs);
+ dev_kfree_skb_any(skb);
+ } else if (fcs != PPP_GOODFCS) {
+ /* frame check error */
+ dev_err(cs->dev,
+ "Checksum failed, %u bytes corrupted!\n",
+ skb->len);
+ gigaset_isdn_rcv_err(bcs);
+ dev_kfree_skb_any(skb);
+ } else {
+ /* good frame */
+ __skb_trim(skb, skb->len - 2);
+ gigaset_skb_rcvd(bcs, skb);
+ }
+
+ /* prepare reception of next frame */
+ inputstate &= ~INS_have_data;
+ skb = gigaset_new_rx_skb(bcs);
+ } else {
+ /* empty frame (7E 7E) */
+#ifdef CONFIG_GIGASET_DEBUG
+ ++bcs->emptycount;
+#endif
+ if (!skb) {
+ /* skipped (?) */
+ gigaset_isdn_rcv_err(bcs);
+ skb = gigaset_new_rx_skb(bcs);
+ }
+ }
+
+ fcs = PPP_INITFCS;
+ continue;
+#ifdef CONFIG_GIGASET_DEBUG
+ } else if (muststuff(c)) {
+ /* Should not happen. Possible after ZDLE=1<CR><LF>. */
+ gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c);
+#endif
+ }
+
+ /* regular data byte, append to skb */
+#ifdef CONFIG_GIGASET_DEBUG
+ if (!(inputstate & INS_have_data)) {
+ gig_dbg(DEBUG_HDLC, "7e (%d x) ================",
+ bcs->emptycount);
+ bcs->emptycount = 0;
+ }
+#endif
+ inputstate |= INS_have_data;
+ if (skb) {
+ if (skb->len >= bcs->rx_bufsize) {
+ dev_warn(cs->dev, "received packet too long\n");
+ dev_kfree_skb_any(skb);
+ /* skip remainder of packet */
+ bcs->rx_skb = skb = NULL;
+ } else {
+ __skb_put_u8(skb, c);
+ fcs = crc_ccitt_byte(fcs, c);
+ }
+ }
+ }
+
+ bcs->inputstate = inputstate;
+ bcs->rx_fcs = fcs;
+ return procbytes;
+}
+
+/* process a block of received bytes in transparent data mode
+ * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 != L2_HDLC)
+ * Invert bytes, undoing byte stuffing and watching for DLE escapes.
+ * If DLE is encountered, return immediately to let the caller handle it.
+ * Return value:
+ * number of processed bytes
+ */
+static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf)
+{
+ struct cardstate *cs = inbuf->cs;
+ struct bc_state *bcs = cs->bcs;
+ int inputstate = bcs->inputstate;
+ struct sk_buff *skb = bcs->rx_skb;
+ unsigned char *src = inbuf->data + inbuf->head;
+ unsigned procbytes = 0;
+ unsigned char c;
+
+ if (!skb) {
+ /* skip this block */
+ gigaset_new_rx_skb(bcs);
+ return numbytes;
+ }
+
+ while (procbytes < numbytes && skb->len < bcs->rx_bufsize) {
+ c = *src++;
+ procbytes++;
+
+ if (c == DLE_FLAG) {
+ if (inputstate & INS_DLE_char) {
+ /* quoted DLE: clear quote flag */
+ inputstate &= ~INS_DLE_char;
+ } else if (cs->dle || (inputstate & INS_DLE_command)) {
+ /* DLE escape, pass up for handling */
+ inputstate |= INS_DLE_char;
+ break;
+ }
+ }
+
+ /* regular data byte: append to current skb */
+ inputstate |= INS_have_data;
+ __skb_put_u8(skb, bitrev8(c));
+ }
+
+ /* pass data up */
+ if (inputstate & INS_have_data) {
+ gigaset_skb_rcvd(bcs, skb);
+ inputstate &= ~INS_have_data;
+ gigaset_new_rx_skb(bcs);
+ }
+
+ bcs->inputstate = inputstate;
+ return procbytes;
+}
+
+/* process DLE escapes
+ * Called whenever a DLE sequence might be encountered in the input stream.
+ * Either processes the entire DLE sequence or, if that isn't possible,
+ * notes the fact that an initial DLE has been received in the INS_DLE_char
+ * inputstate flag and resumes processing of the sequence on the next call.
+ */
+static void handle_dle(struct inbuf_t *inbuf)
+{
+ struct cardstate *cs = inbuf->cs;
+
+ if (cs->mstate == MS_LOCKED)
+ return; /* no DLE processing in lock mode */
+
+ if (!(inbuf->inputstate & INS_DLE_char)) {
+ /* no DLE pending */
+ if (inbuf->data[inbuf->head] == DLE_FLAG &&
+ (cs->dle || inbuf->inputstate & INS_DLE_command)) {
+ /* start of DLE sequence */
+ inbuf->head++;
+ if (inbuf->head == inbuf->tail ||
+ inbuf->head == RBUFSIZE) {
+ /* end of buffer, save for later processing */
+ inbuf->inputstate |= INS_DLE_char;
+ return;
+ }
+ } else {
+ /* regular data byte */
+ return;
+ }
+ }
+
+ /* consume pending DLE */
+ inbuf->inputstate &= ~INS_DLE_char;
+
+ switch (inbuf->data[inbuf->head]) {
+ case 'X': /* begin of event message */
+ if (inbuf->inputstate & INS_command)
+ dev_notice(cs->dev,
+ "received <DLE>X in command mode\n");
+ inbuf->inputstate |= INS_command | INS_DLE_command;
+ inbuf->head++; /* byte consumed */
+ break;
+ case '.': /* end of event message */
+ if (!(inbuf->inputstate & INS_DLE_command))
+ dev_notice(cs->dev,
+ "received <DLE>. without <DLE>X\n");
+ inbuf->inputstate &= ~INS_DLE_command;
+ /* return to data mode if in DLE mode */
+ if (cs->dle)
+ inbuf->inputstate &= ~INS_command;
+ inbuf->head++; /* byte consumed */
+ break;
+ case DLE_FLAG: /* DLE in data stream */
+ /* mark as quoted */
+ inbuf->inputstate |= INS_DLE_char;
+ if (!(cs->dle || inbuf->inputstate & INS_DLE_command))
+ dev_notice(cs->dev,
+ "received <DLE><DLE> not in DLE mode\n");
+ break; /* quoted byte left in buffer */
+ default:
+ dev_notice(cs->dev, "received <DLE><%02x>\n",
+ inbuf->data[inbuf->head]);
+ /* quoted byte left in buffer */
+ }
+}
+
+/**
+ * gigaset_m10x_input() - process a block of data received from the device
+ * @inbuf: received data and device descriptor structure.
+ *
+ * Called by hardware module {ser,usb}_gigaset with a block of received
+ * bytes. Separates the bytes received over the serial data channel into
+ * user data and command replies (locked/unlocked) according to the
+ * current state of the interface.
+ */
+void gigaset_m10x_input(struct inbuf_t *inbuf)
+{
+ struct cardstate *cs = inbuf->cs;
+ unsigned numbytes, procbytes;
+
+ gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", inbuf->head, inbuf->tail);
+
+ while (inbuf->head != inbuf->tail) {
+ /* check for DLE escape */
+ handle_dle(inbuf);
+
+ /* process a contiguous block of bytes */
+ numbytes = (inbuf->head > inbuf->tail ?
+ RBUFSIZE : inbuf->tail) - inbuf->head;
+ gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
+ /*
+ * numbytes may be 0 if handle_dle() ate the last byte.
+ * This does no harm, *_loop() will just return 0 immediately.
+ */
+
+ if (cs->mstate == MS_LOCKED)
+ procbytes = lock_loop(numbytes, inbuf);
+ else if (inbuf->inputstate & INS_command)
+ procbytes = cmd_loop(numbytes, inbuf);
+ else if (cs->bcs->proto2 == L2_HDLC)
+ procbytes = hdlc_loop(numbytes, inbuf);
+ else
+ procbytes = iraw_loop(numbytes, inbuf);
+ inbuf->head += procbytes;
+
+ /* check for buffer wraparound */
+ if (inbuf->head >= RBUFSIZE)
+ inbuf->head = 0;
+
+ gig_dbg(DEBUG_INTR, "head set to %u", inbuf->head);
+ }
+}
+EXPORT_SYMBOL_GPL(gigaset_m10x_input);
+
+
+/* == data output ========================================================== */
+
+/*
+ * Encode a data packet into an octet stuffed HDLC frame with FCS,
+ * opening and closing flags, preserving headroom data.
+ * parameters:
+ * skb skb containing original packet (freed upon return)
+ * Return value:
+ * pointer to newly allocated skb containing the result frame
+ * and the original link layer header, NULL on error
+ */
+static struct sk_buff *HDLC_Encode(struct sk_buff *skb)
+{
+ struct sk_buff *hdlc_skb;
+ __u16 fcs;
+ unsigned char c;
+ unsigned char *cp;
+ int len;
+ unsigned int stuf_cnt;
+
+ stuf_cnt = 0;
+ fcs = PPP_INITFCS;
+ cp = skb->data;
+ len = skb->len;
+ while (len--) {
+ if (muststuff(*cp))
+ stuf_cnt++;
+ fcs = crc_ccitt_byte(fcs, *cp++);
+ }
+ fcs ^= 0xffff; /* complement */
+
+ /* size of new buffer: original size + number of stuffing bytes
+ * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes
+ * + room for link layer header
+ */
+ hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + skb->mac_len);
+ if (!hdlc_skb) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ /* Copy link layer header into new skb */
+ skb_reset_mac_header(hdlc_skb);
+ skb_reserve(hdlc_skb, skb->mac_len);
+ memcpy(skb_mac_header(hdlc_skb), skb_mac_header(skb), skb->mac_len);
+ hdlc_skb->mac_len = skb->mac_len;
+
+ /* Add flag sequence in front of everything.. */
+ skb_put_u8(hdlc_skb, PPP_FLAG);
+
+ /* Perform byte stuffing while copying data. */
+ while (skb->len--) {
+ if (muststuff(*skb->data)) {
+ skb_put_u8(hdlc_skb, PPP_ESCAPE);
+ skb_put_u8(hdlc_skb, (*skb->data++) ^ PPP_TRANS);
+ } else
+ skb_put_u8(hdlc_skb, *skb->data++);
+ }
+
+ /* Finally add FCS (byte stuffed) and flag sequence */
+ c = (fcs & 0x00ff); /* least significant byte first */
+ if (muststuff(c)) {
+ skb_put_u8(hdlc_skb, PPP_ESCAPE);
+ c ^= PPP_TRANS;
+ }
+ skb_put_u8(hdlc_skb, c);
+
+ c = ((fcs >> 8) & 0x00ff);
+ if (muststuff(c)) {
+ skb_put_u8(hdlc_skb, PPP_ESCAPE);
+ c ^= PPP_TRANS;
+ }
+ skb_put_u8(hdlc_skb, c);
+
+ skb_put_u8(hdlc_skb, PPP_FLAG);
+
+ dev_kfree_skb_any(skb);
+ return hdlc_skb;
+}
+
+/*
+ * Encode a data packet into an octet stuffed raw bit inverted frame,
+ * preserving headroom data.
+ * parameters:
+ * skb skb containing original packet (freed upon return)
+ * Return value:
+ * pointer to newly allocated skb containing the result frame
+ * and the original link layer header, NULL on error
+ */
+static struct sk_buff *iraw_encode(struct sk_buff *skb)
+{
+ struct sk_buff *iraw_skb;
+ unsigned char c;
+ unsigned char *cp;
+ int len;
+
+ /* size of new buffer (worst case = every byte must be stuffed):
+ * 2 * original size + room for link layer header
+ */
+ iraw_skb = dev_alloc_skb(2 * skb->len + skb->mac_len);
+ if (!iraw_skb) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ /* copy link layer header into new skb */
+ skb_reset_mac_header(iraw_skb);
+ skb_reserve(iraw_skb, skb->mac_len);
+ memcpy(skb_mac_header(iraw_skb), skb_mac_header(skb), skb->mac_len);
+ iraw_skb->mac_len = skb->mac_len;
+
+ /* copy and stuff data */
+ cp = skb->data;
+ len = skb->len;
+ while (len--) {
+ c = bitrev8(*cp++);
+ if (c == DLE_FLAG)
+ skb_put_u8(iraw_skb, c);
+ skb_put_u8(iraw_skb, c);
+ }
+ dev_kfree_skb_any(skb);
+ return iraw_skb;
+}
+
+/**
+ * gigaset_m10x_send_skb() - queue an skb for sending
+ * @bcs: B channel descriptor structure.
+ * @skb: data to send.
+ *
+ * Called by LL to encode and queue an skb for sending, and start
+ * transmission if necessary.
+ * Once the payload data has been transmitted completely, gigaset_skb_sent()
+ * will be called with the skb's link layer header preserved.
+ *
+ * Return value:
+ * number of bytes accepted for sending (skb->len) if ok,
+ * error code < 0 (eg. -ENOMEM) on error
+ */
+int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
+{
+ struct cardstate *cs = bcs->cs;
+ unsigned len = skb->len;
+ unsigned long flags;
+
+ if (bcs->proto2 == L2_HDLC)
+ skb = HDLC_Encode(skb);
+ else
+ skb = iraw_encode(skb);
+ if (!skb) {
+ dev_err(cs->dev,
+ "unable to allocate memory for encoding!\n");
+ return -ENOMEM;
+ }
+
+ skb_queue_tail(&bcs->squeue, skb);
+ spin_lock_irqsave(&cs->lock, flags);
+ if (cs->connected)
+ tasklet_schedule(&cs->write_tasklet);
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ return len; /* ok so far */
+}
+EXPORT_SYMBOL_GPL(gigaset_m10x_send_skb);
diff --git a/drivers/staging/isdn/gigaset/bas-gigaset.c b/drivers/staging/isdn/gigaset/bas-gigaset.c
new file mode 100644
index 000000000000..c334525a5f63
--- /dev/null
+++ b/drivers/staging/isdn/gigaset/bas-gigaset.c
@@ -0,0 +1,2672 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * USB driver for Gigaset 307x base via direct USB connection.
+ *
+ * Copyright (c) 2001 by Hansjoerg Lipp <hjlipp@web.de>,
+ * Tilman Schmidt <tilman@imap.cc>,
+ * Stefan Eilers.
+ *
+ * =====================================================================
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+#include <linux/usb.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+/* Version Information */
+#define DRIVER_AUTHOR "Tilman Schmidt <tilman@imap.cc>, Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers"
+#define DRIVER_DESC "USB Driver for Gigaset 307x"
+
+
+/* Module parameters */
+
+static int startmode = SM_ISDN;
+static int cidmode = 1;
+
+module_param(startmode, int, S_IRUGO);
+module_param(cidmode, int, S_IRUGO);
+MODULE_PARM_DESC(startmode, "start in isdn4linux mode");
+MODULE_PARM_DESC(cidmode, "Call-ID mode");
+
+#define GIGASET_MINORS 1
+#define GIGASET_MINOR 16
+#define GIGASET_MODULENAME "bas_gigaset"
+#define GIGASET_DEVNAME "ttyGB"
+
+/* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
+#define IF_WRITEBUF 264
+
+/* interrupt pipe message size according to ibid. ch. 2.2 */
+#define IP_MSGSIZE 3
+
+/* Values for the Gigaset 307x */
+#define USB_GIGA_VENDOR_ID 0x0681
+#define USB_3070_PRODUCT_ID 0x0001
+#define USB_3075_PRODUCT_ID 0x0002
+#define USB_SX303_PRODUCT_ID 0x0021
+#define USB_SX353_PRODUCT_ID 0x0022
+
+/* table of devices that work with this driver */
+static const struct usb_device_id gigaset_table[] = {
+ { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3070_PRODUCT_ID) },
+ { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3075_PRODUCT_ID) },
+ { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) },
+ { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX353_PRODUCT_ID) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, gigaset_table);
+
+/*======================= local function prototypes ==========================*/
+
+/* function called if a new device belonging to this driver is connected */
+static int gigaset_probe(struct usb_interface *interface,
+ const struct usb_device_id *id);
+
+/* Function will be called if the device is unplugged */
+static void gigaset_disconnect(struct usb_interface *interface);
+
+/* functions called before/after suspend */
+static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
+static int gigaset_resume(struct usb_interface *intf);
+
+/* functions called before/after device reset */
+static int gigaset_pre_reset(struct usb_interface *intf);
+static int gigaset_post_reset(struct usb_interface *intf);
+
+static int atread_submit(struct cardstate *, int);
+static void stopurbs(struct bas_bc_state *);
+static int req_submit(struct bc_state *, int, int, int);
+static int atwrite_submit(struct cardstate *, unsigned char *, int);
+static int start_cbsend(struct cardstate *);
+
+/*============================================================================*/
+
+struct bas_cardstate {
+ struct usb_device *udev; /* USB device pointer */
+ struct cardstate *cs;
+ struct usb_interface *interface; /* interface for this device */
+ unsigned char minor; /* starting minor number */
+
+ struct urb *urb_ctrl; /* control pipe default URB */
+ struct usb_ctrlrequest dr_ctrl;
+ struct timer_list timer_ctrl; /* control request timeout */
+ int retry_ctrl;
+
+ struct timer_list timer_atrdy; /* AT command ready timeout */
+ struct urb *urb_cmd_out; /* for sending AT commands */
+ struct usb_ctrlrequest dr_cmd_out;
+ int retry_cmd_out;
+
+ struct urb *urb_cmd_in; /* for receiving AT replies */
+ struct usb_ctrlrequest dr_cmd_in;
+ struct timer_list timer_cmd_in; /* receive request timeout */
+ unsigned char *rcvbuf; /* AT reply receive buffer */
+
+ struct urb *urb_int_in; /* URB for interrupt pipe */
+ unsigned char *int_in_buf;
+ struct work_struct int_in_wq; /* for usb_clear_halt() */
+ struct timer_list timer_int_in; /* int read retry delay */
+ int retry_int_in;
+
+ spinlock_t lock; /* locks all following */
+ int basstate; /* bitmap (BS_*) */
+ int pending; /* uncompleted base request */
+ wait_queue_head_t waitqueue;
+ int rcvbuf_size; /* size of AT receive buffer */
+ /* 0: no receive in progress */
+ int retry_cmd_in; /* receive req retry count */
+};
+
+/* status of direct USB connection to 307x base (bits in basstate) */
+#define BS_ATOPEN 0x001 /* AT channel open */
+#define BS_B1OPEN 0x002 /* B channel 1 open */
+#define BS_B2OPEN 0x004 /* B channel 2 open */
+#define BS_ATREADY 0x008 /* base ready for AT command */
+#define BS_INIT 0x010 /* base has signalled INIT_OK */
+#define BS_ATTIMER 0x020 /* waiting for HD_READY_SEND_ATDATA */
+#define BS_ATRDPEND 0x040 /* urb_cmd_in in use */
+#define BS_ATWRPEND 0x080 /* urb_cmd_out in use */
+#define BS_SUSPEND 0x100 /* USB port suspended */
+#define BS_RESETTING 0x200 /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */
+
+
+static struct gigaset_driver *driver;
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver gigaset_usb_driver = {
+ .name = GIGASET_MODULENAME,
+ .probe = gigaset_probe,
+ .disconnect = gigaset_disconnect,
+ .id_table = gigaset_table,
+ .suspend = gigaset_suspend,
+ .resume = gigaset_resume,
+ .reset_resume = gigaset_post_reset,
+ .pre_reset = gigaset_pre_reset,
+ .post_reset = gigaset_post_reset,
+ .disable_hub_initiated_lpm = 1,
+};
+
+/* get message text for usb_submit_urb return code
+ */
+static char *get_usb_rcmsg(int rc)
+{
+ static char unkmsg[28];
+
+ switch (rc) {
+ case 0:
+ return "success";
+ case -ENOMEM:
+ return "out of memory";
+ case -ENODEV:
+ return "device not present";
+ case -ENOENT:
+ return "endpoint not present";
+ case -ENXIO:
+ return "URB type not supported";
+ case -EINVAL:
+ return "invalid argument";
+ case -EAGAIN:
+ return "start frame too early or too much scheduled";
+ case -EFBIG:
+ return "too many isoc frames requested";
+ case -EPIPE:
+ return "endpoint stalled";
+ case -EMSGSIZE:
+ return "invalid packet size";
+ case -ENOSPC:
+ return "would overcommit USB bandwidth";
+ case -ESHUTDOWN:
+ return "device shut down";
+ case -EPERM:
+ return "reject flag set";
+ case -EHOSTUNREACH:
+ return "device suspended";
+ default:
+ snprintf(unkmsg, sizeof(unkmsg), "unknown error %d", rc);
+ return unkmsg;
+ }
+}
+
+/* get message text for USB status code
+ */
+static char *get_usb_statmsg(int status)
+{
+ static char unkmsg[28];
+
+ switch (status) {
+ case 0:
+ return "success";
+ case -ENOENT:
+ return "unlinked (sync)";
+ case -EINPROGRESS:
+ return "URB still pending";
+ case -EPROTO:
+ return "bitstuff error, timeout, or unknown USB error";
+ case -EILSEQ:
+ return "CRC mismatch, timeout, or unknown USB error";
+ case -ETIME:
+ return "USB response timeout";
+ case -EPIPE:
+ return "endpoint stalled";
+ case -ECOMM:
+ return "IN buffer overrun";
+ case -ENOSR:
+ return "OUT buffer underrun";
+ case -EOVERFLOW:
+ return "endpoint babble";
+ case -EREMOTEIO:
+ return "short packet";
+ case -ENODEV:
+ return "device removed";
+ case -EXDEV:
+ return "partial isoc transfer";
+ case -EINVAL:
+ return "ISO madness";
+ case -ECONNRESET:
+ return "unlinked (async)";
+ case -ESHUTDOWN:
+ return "device shut down";
+ default:
+ snprintf(unkmsg, sizeof(unkmsg), "unknown status %d", status);
+ return unkmsg;
+ }
+}
+
+/* usb_pipetype_str
+ * retrieve string representation of USB pipe type
+ */
+static inline char *usb_pipetype_str(int pipe)
+{
+ if (usb_pipeisoc(pipe))
+ return "Isoc";
+ if (usb_pipeint(pipe))
+ return "Int";
+ if (usb_pipecontrol(pipe))
+ return "Ctrl";
+ if (usb_pipebulk(pipe))
+ return "Bulk";
+ return "?";
+}
+
+/* dump_urb
+ * write content of URB to syslog for debugging
+ */
+static inline void dump_urb(enum debuglevel level, const char *tag,
+ struct urb *urb)
+{
+#ifdef CONFIG_GIGASET_DEBUG
+ int i;
+ gig_dbg(level, "%s urb(0x%08lx)->{", tag, (unsigned long) urb);
+ if (urb) {
+ gig_dbg(level,
+ " dev=0x%08lx, pipe=%s:EP%d/DV%d:%s, "
+ "hcpriv=0x%08lx, transfer_flags=0x%x,",
+ (unsigned long) urb->dev,
+ usb_pipetype_str(urb->pipe),
+ usb_pipeendpoint(urb->pipe), usb_pipedevice(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ (unsigned long) urb->hcpriv,
+ urb->transfer_flags);
+ gig_dbg(level,
+ " transfer_buffer=0x%08lx[%d], actual_length=%d, "
+ "setup_packet=0x%08lx,",
+ (unsigned long) urb->transfer_buffer,
+ urb->transfer_buffer_length, urb->actual_length,
+ (unsigned long) urb->setup_packet);
+ gig_dbg(level,
+ " start_frame=%d, number_of_packets=%d, interval=%d, "
+ "error_count=%d,",
+ urb->start_frame, urb->number_of_packets, urb->interval,
+ urb->error_count);
+ gig_dbg(level,
+ " context=0x%08lx, complete=0x%08lx, "
+ "iso_frame_desc[]={",
+ (unsigned long) urb->context,
+ (unsigned long) urb->complete);
+ for (i = 0; i < urb->number_of_packets; i++) {
+ struct usb_iso_packet_descriptor *pifd
+ = &urb->iso_frame_desc[i];
+ gig_dbg(level,
+ " {offset=%u, length=%u, actual_length=%u, "
+ "status=%u}",
+ pifd->offset, pifd->length, pifd->actual_length,
+ pifd->status);
+ }
+ }
+ gig_dbg(level, "}}");
+#endif
+}
+
+/* read/set modem control bits etc. (m10x only) */
+static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
+ unsigned new_state)
+{
+ return -EINVAL;
+}
+
+static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
+{
+ return -EINVAL;
+}
+
+static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
+{
+ return -EINVAL;
+}
+
+/* set/clear bits in base connection state, return previous state
+ */
+static inline int update_basstate(struct bas_cardstate *ucs,
+ int set, int clear)
+{
+ unsigned long flags;
+ int state;
+
+ spin_lock_irqsave(&ucs->lock, flags);
+ state = ucs->basstate;
+ ucs->basstate = (state & ~clear) | set;
+ spin_unlock_irqrestore(&ucs->lock, flags);
+ return state;
+}
+
+/* error_hangup
+ * hang up any existing connection because of an unrecoverable error
+ * This function may be called from any context and takes care of scheduling
+ * the necessary actions for execution outside of interrupt context.
+ * cs->lock must not be held.
+ * argument:
+ * B channel control structure
+ */
+static inline void error_hangup(struct bc_state *bcs)
+{
+ struct cardstate *cs = bcs->cs;
+
+ gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL);
+ gigaset_schedule_event(cs);
+}
+
+/* error_reset
+ * reset Gigaset device because of an unrecoverable error
+ * This function may be called from any context, and takes care of
+ * scheduling the necessary actions for execution outside of interrupt context.
+ * cs->hw.bas->lock must not be held.
+ * argument:
+ * controller state structure
+ */
+static inline void error_reset(struct cardstate *cs)
+{
+ /* reset interrupt pipe to recover (ignore errors) */
+ update_basstate(cs->hw.bas, BS_RESETTING, 0);
+ if (req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT))
+ /* submission failed, escalate to USB port reset */
+ usb_queue_reset_device(cs->hw.bas->interface);
+}
+
+/* check_pending
+ * check for completion of pending control request
+ * parameter:
+ * ucs hardware specific controller state structure
+ */
+static void check_pending(struct bas_cardstate *ucs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ucs->lock, flags);
+ switch (ucs->pending) {
+ case 0:
+ break;
+ case HD_OPEN_ATCHANNEL:
+ if (ucs->basstate & BS_ATOPEN)
+ ucs->pending = 0;
+ break;
+ case HD_OPEN_B1CHANNEL:
+ if (ucs->basstate & BS_B1OPEN)
+ ucs->pending = 0;
+ break;
+ case HD_OPEN_B2CHANNEL:
+ if (ucs->basstate & BS_B2OPEN)
+ ucs->pending = 0;
+ break;
+ case HD_CLOSE_ATCHANNEL:
+ if (!(ucs->basstate & BS_ATOPEN))
+ ucs->pending = 0;
+ break;
+ case HD_CLOSE_B1CHANNEL:
+ if (!(ucs->basstate & BS_B1OPEN))
+ ucs->pending = 0;
+ break;
+ case HD_CLOSE_B2CHANNEL:
+ if (!(ucs->basstate & BS_B2OPEN))
+ ucs->pending = 0;
+ break;
+ case HD_DEVICE_INIT_ACK: /* no reply expected */
+ ucs->pending = 0;
+ break;
+ case HD_RESET_INTERRUPT_PIPE:
+ if (!(ucs->basstate & BS_RESETTING))
+ ucs->pending = 0;
+ break;
+ /*
+ * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately
+ * and should never end up here
+ */
+ default:
+ dev_warn(&ucs->interface->dev,
+ "unknown pending request 0x%02x cleared\n",
+ ucs->pending);
+ ucs->pending = 0;
+ }
+
+ if (!ucs->pending)
+ del_timer(&ucs->timer_ctrl);
+
+ spin_unlock_irqrestore(&ucs->lock, flags);
+}
+
+/* cmd_in_timeout
+ * timeout routine for command input request
+ * argument:
+ * controller state structure
+ */
+static void cmd_in_timeout(struct timer_list *t)
+{
+ struct bas_cardstate *ucs = from_timer(ucs, t, timer_cmd_in);
+ struct cardstate *cs = ucs->cs;
+ int rc;
+
+ if (!ucs->rcvbuf_size) {
+ gig_dbg(DEBUG_USBREQ, "%s: no receive in progress", __func__);
+ return;
+ }
+
+ if (ucs->retry_cmd_in++ >= BAS_RETRY) {
+ dev_err(cs->dev,
+ "control read: timeout, giving up after %d tries\n",
+ ucs->retry_cmd_in);
+ kfree(ucs->rcvbuf);
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+ error_reset(cs);
+ return;
+ }
+
+ gig_dbg(DEBUG_USBREQ, "%s: timeout, retry %d",
+ __func__, ucs->retry_cmd_in);
+ rc = atread_submit(cs, BAS_TIMEOUT);
+ if (rc < 0) {
+ kfree(ucs->rcvbuf);
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+ if (rc != -ENODEV)
+ error_reset(cs);
+ }
+}
+
+/* read_ctrl_callback
+ * USB completion handler for control pipe input
+ * called by the USB subsystem in interrupt context
+ * parameter:
+ * urb USB request block
+ * urb->context = inbuf structure for controller state
+ */
+static void read_ctrl_callback(struct urb *urb)
+{
+ struct inbuf_t *inbuf = urb->context;
+ struct cardstate *cs = inbuf->cs;
+ struct bas_cardstate *ucs = cs->hw.bas;
+ int status = urb->status;
+ unsigned numbytes;
+ int rc;
+
+ update_basstate(ucs, 0, BS_ATRDPEND);
+ wake_up(&ucs->waitqueue);
+ del_timer(&ucs->timer_cmd_in);
+
+ switch (status) {
+ case 0: /* normal completion */
+ numbytes = urb->actual_length;
+ if (unlikely(numbytes != ucs->rcvbuf_size)) {
+ dev_warn(cs->dev,
+ "control read: received %d chars, expected %d\n",
+ numbytes, ucs->rcvbuf_size);
+ if (numbytes > ucs->rcvbuf_size)
+ numbytes = ucs->rcvbuf_size;
+ }
+
+ /* copy received bytes to inbuf, notify event layer */
+ if (gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes)) {
+ gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
+ gigaset_schedule_event(cs);
+ }
+ break;
+
+ case -ENOENT: /* cancelled */
+ case -ECONNRESET: /* cancelled (async) */
+ case -EINPROGRESS: /* pending */
+ case -ENODEV: /* device removed */
+ case -ESHUTDOWN: /* device shut down */
+ /* no further action necessary */
+ gig_dbg(DEBUG_USBREQ, "%s: %s",
+ __func__, get_usb_statmsg(status));
+ break;
+
+ default: /* other errors: retry */
+ if (ucs->retry_cmd_in++ < BAS_RETRY) {
+ gig_dbg(DEBUG_USBREQ, "%s: %s, retry %d", __func__,
+ get_usb_statmsg(status), ucs->retry_cmd_in);
+ rc = atread_submit(cs, BAS_TIMEOUT);
+ if (rc >= 0)
+ /* successfully resubmitted, skip freeing */
+ return;
+ if (rc == -ENODEV)
+ /* disconnect, no further action necessary */
+ break;
+ }
+ dev_err(cs->dev, "control read: %s, giving up after %d tries\n",
+ get_usb_statmsg(status), ucs->retry_cmd_in);
+ error_reset(cs);
+ }
+
+ /* read finished, free buffer */
+ kfree(ucs->rcvbuf);
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+}
+
+/* atread_submit
+ * submit an HD_READ_ATMESSAGE command URB and optionally start a timeout
+ * parameters:
+ * cs controller state structure
+ * timeout timeout in 1/10 sec., 0: none
+ * return value:
+ * 0 on success
+ * -EBUSY if another request is pending
+ * any URB submission error code
+ */
+static int atread_submit(struct cardstate *cs, int timeout)
+{
+ struct bas_cardstate *ucs = cs->hw.bas;
+ int basstate;
+ int ret;
+
+ gig_dbg(DEBUG_USBREQ, "-------> HD_READ_ATMESSAGE (%d)",
+ ucs->rcvbuf_size);
+
+ basstate = update_basstate(ucs, BS_ATRDPEND, 0);
+ if (basstate & BS_ATRDPEND) {
+ dev_err(cs->dev,
+ "could not submit HD_READ_ATMESSAGE: URB busy\n");
+ return -EBUSY;
+ }
+
+ if (basstate & BS_SUSPEND) {
+ dev_notice(cs->dev,
+ "HD_READ_ATMESSAGE not submitted, "
+ "suspend in progress\n");
+ update_basstate(ucs, 0, BS_ATRDPEND);
+ /* treat like disconnect */
+ return -ENODEV;
+ }
+
+ ucs->dr_cmd_in.bRequestType = IN_VENDOR_REQ;
+ ucs->dr_cmd_in.bRequest = HD_READ_ATMESSAGE;
+ ucs->dr_cmd_in.wValue = 0;
+ ucs->dr_cmd_in.wIndex = 0;
+ ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size);
+ usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev,
+ usb_rcvctrlpipe(ucs->udev, 0),
+ (unsigned char *) &ucs->dr_cmd_in,
+ ucs->rcvbuf, ucs->rcvbuf_size,
+ read_ctrl_callback, cs->inbuf);
+
+ ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC);
+ if (ret != 0) {
+ update_basstate(ucs, 0, BS_ATRDPEND);
+ dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n",
+ get_usb_rcmsg(ret));
+ return ret;
+ }
+
+ if (timeout > 0) {
+ gig_dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
+ mod_timer(&ucs->timer_cmd_in, jiffies + timeout * HZ / 10);
+ }
+ return 0;
+}
+
+/* int_in_work
+ * workqueue routine to clear halt on interrupt in endpoint
+ */
+
+static void int_in_work(struct work_struct *work)
+{
+ struct bas_cardstate *ucs =
+ container_of(work, struct bas_cardstate, int_in_wq);
+ struct urb *urb = ucs->urb_int_in;
+ struct cardstate *cs = urb->context;
+ int rc;
+
+ /* clear halt condition */
+ rc = usb_clear_halt(ucs->udev, urb->pipe);
+ gig_dbg(DEBUG_USBREQ, "clear_halt: %s", get_usb_rcmsg(rc));
+ if (rc == 0)
+ /* success, resubmit interrupt read URB */
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+
+ switch (rc) {
+ case 0: /* success */
+ case -ENODEV: /* device gone */
+ case -EINVAL: /* URB already resubmitted, or terminal badness */
+ break;
+ default: /* failure: try to recover by resetting the device */
+ dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc));
+ rc = usb_lock_device_for_reset(ucs->udev, ucs->interface);
+ if (rc == 0) {
+ rc = usb_reset_device(ucs->udev);
+ usb_unlock_device(ucs->udev);
+ }
+ }
+ ucs->retry_int_in = 0;
+}
+
+/* int_in_resubmit
+ * timer routine for interrupt read delayed resubmit
+ * argument:
+ * controller state structure
+ */
+static void int_in_resubmit(struct timer_list *t)
+{
+ struct bas_cardstate *ucs = from_timer(ucs, t, timer_int_in);
+ struct cardstate *cs = ucs->cs;
+ int rc;
+
+ if (ucs->retry_int_in++ >= BAS_RETRY) {
+ dev_err(cs->dev, "interrupt read: giving up after %d tries\n",
+ ucs->retry_int_in);
+ usb_queue_reset_device(ucs->interface);
+ return;
+ }
+
+ gig_dbg(DEBUG_USBREQ, "%s: retry %d", __func__, ucs->retry_int_in);
+ rc = usb_submit_urb(ucs->urb_int_in, GFP_ATOMIC);
+ if (rc != 0 && rc != -ENODEV) {
+ dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
+ get_usb_rcmsg(rc));
+ usb_queue_reset_device(ucs->interface);
+ }
+}
+
+/* read_int_callback
+ * USB completion handler for interrupt pipe input
+ * called by the USB subsystem in interrupt context
+ * parameter:
+ * urb USB request block
+ * urb->context = controller state structure
+ */
+static void read_int_callback(struct urb *urb)
+{
+ struct cardstate *cs = urb->context;
+ struct bas_cardstate *ucs = cs->hw.bas;
+ struct bc_state *bcs;
+ int status = urb->status;
+ unsigned long flags;
+ int rc;
+ unsigned l;
+ int channel;
+
+ switch (status) {
+ case 0: /* success */
+ ucs->retry_int_in = 0;
+ break;
+ case -EPIPE: /* endpoint stalled */
+ schedule_work(&ucs->int_in_wq);
+ /* fall through */
+ case -ENOENT: /* cancelled */
+ case -ECONNRESET: /* cancelled (async) */
+ case -EINPROGRESS: /* pending */
+ case -ENODEV: /* device removed */
+ case -ESHUTDOWN: /* device shut down */
+ /* no further action necessary */
+ gig_dbg(DEBUG_USBREQ, "%s: %s",
+ __func__, get_usb_statmsg(status));
+ return;
+ case -EPROTO: /* protocol error or unplug */
+ case -EILSEQ:
+ case -ETIME:
+ /* resubmit after delay */
+ gig_dbg(DEBUG_USBREQ, "%s: %s",
+ __func__, get_usb_statmsg(status));
+ mod_timer(&ucs->timer_int_in, jiffies + HZ / 10);
+ return;
+ default: /* other errors: just resubmit */
+ dev_warn(cs->dev, "interrupt read: %s\n",
+ get_usb_statmsg(status));
+ goto resubmit;
+ }
+
+ /* drop incomplete packets even if the missing bytes wouldn't matter */
+ if (unlikely(urb->actual_length < IP_MSGSIZE)) {
+ dev_warn(cs->dev, "incomplete interrupt packet (%d bytes)\n",
+ urb->actual_length);
+ goto resubmit;
+ }
+
+ l = (unsigned) ucs->int_in_buf[1] +
+ (((unsigned) ucs->int_in_buf[2]) << 8);
+
+ gig_dbg(DEBUG_USBREQ, "<-------%d: 0x%02x (%u [0x%02x 0x%02x])",
+ urb->actual_length, (int)ucs->int_in_buf[0], l,
+ (int)ucs->int_in_buf[1], (int)ucs->int_in_buf[2]);
+
+ channel = 0;
+
+ switch (ucs->int_in_buf[0]) {
+ case HD_DEVICE_INIT_OK:
+ update_basstate(ucs, BS_INIT, 0);
+ break;
+
+ case HD_READY_SEND_ATDATA:
+ del_timer(&ucs->timer_atrdy);
+ update_basstate(ucs, BS_ATREADY, BS_ATTIMER);
+ start_cbsend(cs);
+ break;
+
+ case HD_OPEN_B2CHANNEL_ACK:
+ ++channel;
+ /* fall through */
+ case HD_OPEN_B1CHANNEL_ACK:
+ bcs = cs->bcs + channel;
+ update_basstate(ucs, BS_B1OPEN << channel, 0);
+ gigaset_bchannel_up(bcs);
+ break;
+
+ case HD_OPEN_ATCHANNEL_ACK:
+ update_basstate(ucs, BS_ATOPEN, 0);
+ start_cbsend(cs);
+ break;
+
+ case HD_CLOSE_B2CHANNEL_ACK:
+ ++channel;
+ /* fall through */
+ case HD_CLOSE_B1CHANNEL_ACK:
+ bcs = cs->bcs + channel;
+ update_basstate(ucs, 0, BS_B1OPEN << channel);
+ stopurbs(bcs->hw.bas);
+ gigaset_bchannel_down(bcs);
+ break;
+
+ case HD_CLOSE_ATCHANNEL_ACK:
+ update_basstate(ucs, 0, BS_ATOPEN);
+ break;
+
+ case HD_B2_FLOW_CONTROL:
+ ++channel;
+ /* fall through */
+ case HD_B1_FLOW_CONTROL:
+ bcs = cs->bcs + channel;
+ atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES,
+ &bcs->hw.bas->corrbytes);
+ gig_dbg(DEBUG_ISO,
+ "Flow control (channel %d, sub %d): 0x%02x => %d",
+ channel, bcs->hw.bas->numsub, l,
+ atomic_read(&bcs->hw.bas->corrbytes));
+ break;
+
+ case HD_RECEIVEATDATA_ACK: /* AT response ready to be received */
+ if (!l) {
+ dev_warn(cs->dev,
+ "HD_RECEIVEATDATA_ACK with length 0 ignored\n");
+ break;
+ }
+ spin_lock_irqsave(&cs->lock, flags);
+ if (ucs->basstate & BS_ATRDPEND) {
+ spin_unlock_irqrestore(&cs->lock, flags);
+ dev_warn(cs->dev,
+ "HD_RECEIVEATDATA_ACK(%d) during HD_READ_ATMESSAGE(%d) ignored\n",
+ l, ucs->rcvbuf_size);
+ break;
+ }
+ if (ucs->rcvbuf_size) {
+ /* throw away previous buffer - we have no queue */
+ dev_err(cs->dev,
+ "receive AT data overrun, %d bytes lost\n",
+ ucs->rcvbuf_size);
+ kfree(ucs->rcvbuf);
+ ucs->rcvbuf_size = 0;
+ }
+ ucs->rcvbuf = kmalloc(l, GFP_ATOMIC);
+ if (ucs->rcvbuf == NULL) {
+ spin_unlock_irqrestore(&cs->lock, flags);
+ dev_err(cs->dev, "out of memory receiving AT data\n");
+ break;
+ }
+ ucs->rcvbuf_size = l;
+ ucs->retry_cmd_in = 0;
+ rc = atread_submit(cs, BAS_TIMEOUT);
+ if (rc < 0) {
+ kfree(ucs->rcvbuf);
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+ if (rc < 0 && rc != -ENODEV)
+ error_reset(cs);
+ break;
+
+ case HD_RESET_INTERRUPT_PIPE_ACK:
+ update_basstate(ucs, 0, BS_RESETTING);
+ dev_notice(cs->dev, "interrupt pipe reset\n");
+ break;
+
+ case HD_SUSPEND_END:
+ gig_dbg(DEBUG_USBREQ, "HD_SUSPEND_END");
+ break;
+
+ default:
+ dev_warn(cs->dev,
+ "unknown Gigaset signal 0x%02x (%u) ignored\n",
+ (int) ucs->int_in_buf[0], l);
+ }
+
+ check_pending(ucs);
+ wake_up(&ucs->waitqueue);
+
+resubmit:
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(rc != 0 && rc != -ENODEV)) {
+ dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
+ get_usb_rcmsg(rc));
+ error_reset(cs);
+ }
+}
+
+/* read_iso_callback
+ * USB completion handler for B channel isochronous input
+ * called by the USB subsystem in interrupt context
+ * parameter:
+ * urb USB request block of completed request
+ * urb->context = bc_state structure
+ */
+static void read_iso_callback(struct urb *urb)
+{
+ struct bc_state *bcs;
+ struct bas_bc_state *ubc;
+ int status = urb->status;
+ unsigned long flags;
+ int i, rc;
+
+ /* status codes not worth bothering the tasklet with */
+ if (unlikely(status == -ENOENT ||
+ status == -ECONNRESET ||
+ status == -EINPROGRESS ||
+ status == -ENODEV ||
+ status == -ESHUTDOWN)) {
+ gig_dbg(DEBUG_ISO, "%s: %s",
+ __func__, get_usb_statmsg(status));
+ return;
+ }
+
+ bcs = urb->context;
+ ubc = bcs->hw.bas;
+
+ spin_lock_irqsave(&ubc->isoinlock, flags);
+ if (likely(ubc->isoindone == NULL)) {
+ /* pass URB to tasklet */
+ ubc->isoindone = urb;
+ ubc->isoinstatus = status;
+ tasklet_hi_schedule(&ubc->rcvd_tasklet);
+ } else {
+ /* tasklet still busy, drop data and resubmit URB */
+ gig_dbg(DEBUG_ISO, "%s: overrun", __func__);
+ ubc->loststatus = status;
+ for (i = 0; i < BAS_NUMFRAMES; i++) {
+ ubc->isoinlost += urb->iso_frame_desc[i].actual_length;
+ if (unlikely(urb->iso_frame_desc[i].status != 0 &&
+ urb->iso_frame_desc[i].status != -EINPROGRESS))
+ ubc->loststatus = urb->iso_frame_desc[i].status;
+ urb->iso_frame_desc[i].status = 0;
+ urb->iso_frame_desc[i].actual_length = 0;
+ }
+ if (likely(ubc->running)) {
+ /* urb->dev is clobbered by USB subsystem */
+ urb->dev = bcs->cs->hw.bas->udev;
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->number_of_packets = BAS_NUMFRAMES;
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(rc != 0 && rc != -ENODEV)) {
+ dev_err(bcs->cs->dev,
+ "could not resubmit isoc read URB: %s\n",
+ get_usb_rcmsg(rc));
+ dump_urb(DEBUG_ISO, "isoc read", urb);
+ error_hangup(bcs);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ubc->isoinlock, flags);
+}
+
+/* write_iso_callback
+ * USB completion handler for B channel isochronous output
+ * called by the USB subsystem in interrupt context
+ * parameter:
+ * urb USB request block of completed request
+ * urb->context = isow_urbctx_t structure
+ */
+static void write_iso_callback(struct urb *urb)
+{
+ struct isow_urbctx_t *ucx;
+ struct bas_bc_state *ubc;
+ int status = urb->status;
+ unsigned long flags;
+
+ /* status codes not worth bothering the tasklet with */
+ if (unlikely(status == -ENOENT ||
+ status == -ECONNRESET ||
+ status == -EINPROGRESS ||
+ status == -ENODEV ||
+ status == -ESHUTDOWN)) {
+ gig_dbg(DEBUG_ISO, "%s: %s",
+ __func__, get_usb_statmsg(status));
+ return;
+ }
+
+ /* pass URB context to tasklet */
+ ucx = urb->context;
+ ubc = ucx->bcs->hw.bas;
+ ucx->status = status;
+
+ spin_lock_irqsave(&ubc->isooutlock, flags);
+ ubc->isooutovfl = ubc->isooutdone;
+ ubc->isooutdone = ucx;
+ spin_unlock_irqrestore(&ubc->isooutlock, flags);
+ tasklet_hi_schedule(&ubc->sent_tasklet);
+}
+
+/* starturbs
+ * prepare and submit USB request blocks for isochronous input and output
+ * argument:
+ * B channel control structure
+ * return value:
+ * 0 on success
+ * < 0 on error (no URBs submitted)
+ */
+static int starturbs(struct bc_state *bcs)
+{
+ struct usb_device *udev = bcs->cs->hw.bas->udev;
+ struct bas_bc_state *ubc = bcs->hw.bas;
+ struct urb *urb;
+ int j, k;
+ int rc;
+
+ /* initialize L2 reception */
+ if (bcs->proto2 == L2_HDLC)
+ bcs->inputstate |= INS_flag_hunt;
+
+ /* submit all isochronous input URBs */
+ ubc->running = 1;
+ for (k = 0; k < BAS_INURBS; k++) {
+ urb = ubc->isoinurbs[k];
+ if (!urb) {
+ rc = -EFAULT;
+ goto error;
+ }
+ usb_fill_int_urb(urb, udev,
+ usb_rcvisocpipe(udev, 3 + 2 * bcs->channel),
+ ubc->isoinbuf + k * BAS_INBUFSIZE,
+ BAS_INBUFSIZE, read_iso_callback, bcs,
+ BAS_FRAMETIME);
+
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->number_of_packets = BAS_NUMFRAMES;
+ for (j = 0; j < BAS_NUMFRAMES; j++) {
+ urb->iso_frame_desc[j].offset = j * BAS_MAXFRAME;
+ urb->iso_frame_desc[j].length = BAS_MAXFRAME;
+ urb->iso_frame_desc[j].status = 0;
+ urb->iso_frame_desc[j].actual_length = 0;
+ }
+
+ dump_urb(DEBUG_ISO, "Initial isoc read", urb);
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (rc != 0)
+ goto error;
+ }
+
+ /* initialize L2 transmission */
+ gigaset_isowbuf_init(ubc->isooutbuf, PPP_FLAG);
+
+ /* set up isochronous output URBs for flag idling */
+ for (k = 0; k < BAS_OUTURBS; ++k) {
+ urb = ubc->isoouturbs[k].urb;
+ if (!urb) {
+ rc = -EFAULT;
+ goto error;
+ }
+ usb_fill_int_urb(urb, udev,
+ usb_sndisocpipe(udev, 4 + 2 * bcs->channel),
+ ubc->isooutbuf->data,
+ sizeof(ubc->isooutbuf->data),
+ write_iso_callback, &ubc->isoouturbs[k],
+ BAS_FRAMETIME);
+
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->number_of_packets = BAS_NUMFRAMES;
+ for (j = 0; j < BAS_NUMFRAMES; ++j) {
+ urb->iso_frame_desc[j].offset = BAS_OUTBUFSIZE;
+ urb->iso_frame_desc[j].length = BAS_NORMFRAME;
+ urb->iso_frame_desc[j].status = 0;
+ urb->iso_frame_desc[j].actual_length = 0;
+ }
+ ubc->isoouturbs[k].limit = -1;
+ }
+
+ /* keep one URB free, submit the others */
+ for (k = 0; k < BAS_OUTURBS - 1; ++k) {
+ dump_urb(DEBUG_ISO, "Initial isoc write", urb);
+ rc = usb_submit_urb(ubc->isoouturbs[k].urb, GFP_ATOMIC);
+ if (rc != 0)
+ goto error;
+ }
+ dump_urb(DEBUG_ISO, "Initial isoc write (free)", urb);
+ ubc->isooutfree = &ubc->isoouturbs[BAS_OUTURBS - 1];
+ ubc->isooutdone = ubc->isooutovfl = NULL;
+ return 0;
+error:
+ stopurbs(ubc);
+ return rc;
+}
+
+/* stopurbs
+ * cancel the USB request blocks for isochronous input and output
+ * errors are silently ignored
+ * argument:
+ * B channel control structure
+ */
+static void stopurbs(struct bas_bc_state *ubc)
+{
+ int k, rc;
+
+ ubc->running = 0;
+
+ for (k = 0; k < BAS_INURBS; ++k) {
+ rc = usb_unlink_urb(ubc->isoinurbs[k]);
+ gig_dbg(DEBUG_ISO,
+ "%s: isoc input URB %d unlinked, result = %s",
+ __func__, k, get_usb_rcmsg(rc));
+ }
+
+ for (k = 0; k < BAS_OUTURBS; ++k) {
+ rc = usb_unlink_urb(ubc->isoouturbs[k].urb);
+ gig_dbg(DEBUG_ISO,
+ "%s: isoc output URB %d unlinked, result = %s",
+ __func__, k, get_usb_rcmsg(rc));
+ }
+}
+
+/* Isochronous Write - Bottom Half */
+/* =============================== */
+
+/* submit_iso_write_urb
+ * fill and submit the next isochronous write URB
+ * parameters:
+ * ucx context structure containing URB
+ * return value:
+ * number of frames submitted in URB
+ * 0 if URB not submitted because no data available (isooutbuf busy)
+ * error code < 0 on error
+ */
+static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
+{
+ struct urb *urb = ucx->urb;
+ struct bas_bc_state *ubc = ucx->bcs->hw.bas;
+ struct usb_iso_packet_descriptor *ifd;
+ int corrbytes, nframe, rc;
+
+ /* urb->dev is clobbered by USB subsystem */
+ urb->dev = ucx->bcs->cs->hw.bas->udev;
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = ubc->isooutbuf->data;
+ urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data);
+
+ for (nframe = 0; nframe < BAS_NUMFRAMES; nframe++) {
+ ifd = &urb->iso_frame_desc[nframe];
+
+ /* compute frame length according to flow control */
+ ifd->length = BAS_NORMFRAME;
+ corrbytes = atomic_read(&ubc->corrbytes);
+ if (corrbytes != 0) {
+ gig_dbg(DEBUG_ISO, "%s: corrbytes=%d",
+ __func__, corrbytes);
+ if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME)
+ corrbytes = BAS_HIGHFRAME - BAS_NORMFRAME;
+ else if (corrbytes < BAS_LOWFRAME - BAS_NORMFRAME)
+ corrbytes = BAS_LOWFRAME - BAS_NORMFRAME;
+ ifd->length += corrbytes;
+ atomic_add(-corrbytes, &ubc->corrbytes);
+ }
+
+ /* retrieve block of data to send */
+ rc = gigaset_isowbuf_getbytes(ubc->isooutbuf, ifd->length);
+ if (rc < 0) {
+ if (rc == -EBUSY) {
+ gig_dbg(DEBUG_ISO,
+ "%s: buffer busy at frame %d",
+ __func__, nframe);
+ /* tasklet will be restarted from
+ gigaset_isoc_send_skb() */
+ } else {
+ dev_err(ucx->bcs->cs->dev,
+ "%s: buffer error %d at frame %d\n",
+ __func__, rc, nframe);
+ return rc;
+ }
+ break;
+ }
+ ifd->offset = rc;
+ ucx->limit = ubc->isooutbuf->nextread;
+ ifd->status = 0;
+ ifd->actual_length = 0;
+ }
+ if (unlikely(nframe == 0))
+ return 0; /* no data to send */
+ urb->number_of_packets = nframe;
+
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(rc)) {
+ if (rc == -ENODEV)
+ /* device removed - give up silently */
+ gig_dbg(DEBUG_ISO, "%s: disconnected", __func__);
+ else
+ dev_err(ucx->bcs->cs->dev,
+ "could not submit isoc write URB: %s\n",
+ get_usb_rcmsg(rc));
+ return rc;
+ }
+ ++ubc->numsub;
+ return nframe;
+}
+
+/* write_iso_tasklet
+ * tasklet scheduled when an isochronous output URB from the Gigaset device
+ * has completed
+ * parameter:
+ * data B channel state structure
+ */
+static void write_iso_tasklet(unsigned long data)
+{
+ struct bc_state *bcs = (struct bc_state *) data;
+ struct bas_bc_state *ubc = bcs->hw.bas;
+ struct cardstate *cs = bcs->cs;
+ struct isow_urbctx_t *done, *next, *ovfl;
+ struct urb *urb;
+ int status;
+ struct usb_iso_packet_descriptor *ifd;
+ unsigned long flags;
+ int i;
+ struct sk_buff *skb;
+ int len;
+ int rc;
+
+ /* loop while completed URBs arrive in time */
+ for (;;) {
+ if (unlikely(!(ubc->running))) {
+ gig_dbg(DEBUG_ISO, "%s: not running", __func__);
+ return;
+ }
+
+ /* retrieve completed URBs */
+ spin_lock_irqsave(&ubc->isooutlock, flags);
+ done = ubc->isooutdone;
+ ubc->isooutdone = NULL;
+ ovfl = ubc->isooutovfl;
+ ubc->isooutovfl = NULL;
+ spin_unlock_irqrestore(&ubc->isooutlock, flags);
+ if (ovfl) {
+ dev_err(cs->dev, "isoc write underrun\n");
+ error_hangup(bcs);
+ break;
+ }
+ if (!done)
+ break;
+
+ /* submit free URB if available */
+ spin_lock_irqsave(&ubc->isooutlock, flags);
+ next = ubc->isooutfree;
+ ubc->isooutfree = NULL;
+ spin_unlock_irqrestore(&ubc->isooutlock, flags);
+ if (next) {
+ rc = submit_iso_write_urb(next);
+ if (unlikely(rc <= 0 && rc != -ENODEV)) {
+ /* could not submit URB, put it back */
+ spin_lock_irqsave(&ubc->isooutlock, flags);
+ if (ubc->isooutfree == NULL) {
+ ubc->isooutfree = next;
+ next = NULL;
+ }
+ spin_unlock_irqrestore(&ubc->isooutlock, flags);
+ if (next) {
+ /* couldn't put it back */
+ dev_err(cs->dev,
+ "losing isoc write URB\n");
+ error_hangup(bcs);
+ }
+ }
+ }
+
+ /* process completed URB */
+ urb = done->urb;
+ status = done->status;
+ switch (status) {
+ case -EXDEV: /* partial completion */
+ gig_dbg(DEBUG_ISO, "%s: URB partially completed",
+ __func__);
+ /* fall through - what's the difference anyway? */
+ case 0: /* normal completion */
+ /* inspect individual frames
+ * assumptions (for lack of documentation):
+ * - actual_length bytes of first frame in error are
+ * successfully sent
+ * - all following frames are not sent at all
+ */
+ for (i = 0; i < BAS_NUMFRAMES; i++) {
+ ifd = &urb->iso_frame_desc[i];
+ if (ifd->status ||
+ ifd->actual_length != ifd->length) {
+ dev_warn(cs->dev,
+ "isoc write: frame %d[%d/%d]: %s\n",
+ i, ifd->actual_length,
+ ifd->length,
+ get_usb_statmsg(ifd->status));
+ break;
+ }
+ }
+ break;
+ case -EPIPE: /* stall - probably underrun */
+ dev_err(cs->dev, "isoc write: stalled\n");
+ error_hangup(bcs);
+ break;
+ default: /* other errors */
+ dev_warn(cs->dev, "isoc write: %s\n",
+ get_usb_statmsg(status));
+ }
+
+ /* mark the write buffer area covered by this URB as free */
+ if (done->limit >= 0)
+ ubc->isooutbuf->read = done->limit;
+
+ /* mark URB as free */
+ spin_lock_irqsave(&ubc->isooutlock, flags);
+ next = ubc->isooutfree;
+ ubc->isooutfree = done;
+ spin_unlock_irqrestore(&ubc->isooutlock, flags);
+ if (next) {
+ /* only one URB still active - resubmit one */
+ rc = submit_iso_write_urb(next);
+ if (unlikely(rc <= 0 && rc != -ENODEV)) {
+ /* couldn't submit */
+ error_hangup(bcs);
+ }
+ }
+ }
+
+ /* process queued SKBs */
+ while ((skb = skb_dequeue(&bcs->squeue))) {
+ /* copy to output buffer, doing L2 encapsulation */
+ len = skb->len;
+ if (gigaset_isoc_buildframe(bcs, skb->data, len) == -EAGAIN) {
+ /* insufficient buffer space, push back onto queue */
+ skb_queue_head(&bcs->squeue, skb);
+ gig_dbg(DEBUG_ISO, "%s: skb requeued, qlen=%d",
+ __func__, skb_queue_len(&bcs->squeue));
+ break;
+ }
+ skb_pull(skb, len);
+ gigaset_skb_sent(bcs, skb);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/* Isochronous Read - Bottom Half */
+/* ============================== */
+
+/* read_iso_tasklet
+ * tasklet scheduled when an isochronous input URB from the Gigaset device
+ * has completed
+ * parameter:
+ * data B channel state structure
+ */
+static void read_iso_tasklet(unsigned long data)
+{
+ struct bc_state *bcs = (struct bc_state *) data;
+ struct bas_bc_state *ubc = bcs->hw.bas;
+ struct cardstate *cs = bcs->cs;
+ struct urb *urb;
+ int status;
+ struct usb_iso_packet_descriptor *ifd;
+ char *rcvbuf;
+ unsigned long flags;
+ int totleft, numbytes, offset, frame, rc;
+
+ /* loop while more completed URBs arrive in the meantime */
+ for (;;) {
+ /* retrieve URB */
+ spin_lock_irqsave(&ubc->isoinlock, flags);
+ urb = ubc->isoindone;
+ if (!urb) {
+ spin_unlock_irqrestore(&ubc->isoinlock, flags);
+ return;
+ }
+ status = ubc->isoinstatus;
+ ubc->isoindone = NULL;
+ if (unlikely(ubc->loststatus != -EINPROGRESS)) {
+ dev_warn(cs->dev,
+ "isoc read overrun, URB dropped (status: %s, %d bytes)\n",
+ get_usb_statmsg(ubc->loststatus),
+ ubc->isoinlost);
+ ubc->loststatus = -EINPROGRESS;
+ }
+ spin_unlock_irqrestore(&ubc->isoinlock, flags);
+
+ if (unlikely(!(ubc->running))) {
+ gig_dbg(DEBUG_ISO,
+ "%s: channel not running, "
+ "dropped URB with status: %s",
+ __func__, get_usb_statmsg(status));
+ return;
+ }
+
+ switch (status) {
+ case 0: /* normal completion */
+ break;
+ case -EXDEV: /* inspect individual frames
+ (we do that anyway) */
+ gig_dbg(DEBUG_ISO, "%s: URB partially completed",
+ __func__);
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -EINPROGRESS:
+ gig_dbg(DEBUG_ISO, "%s: %s",
+ __func__, get_usb_statmsg(status));
+ continue; /* -> skip */
+ case -EPIPE:
+ dev_err(cs->dev, "isoc read: stalled\n");
+ error_hangup(bcs);
+ continue; /* -> skip */
+ default: /* other error */
+ dev_warn(cs->dev, "isoc read: %s\n",
+ get_usb_statmsg(status));
+ goto error;
+ }
+
+ rcvbuf = urb->transfer_buffer;
+ totleft = urb->actual_length;
+ for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) {
+ ifd = &urb->iso_frame_desc[frame];
+ numbytes = ifd->actual_length;
+ switch (ifd->status) {
+ case 0: /* success */
+ break;
+ case -EPROTO: /* protocol error or unplug */
+ case -EILSEQ:
+ case -ETIME:
+ /* probably just disconnected, ignore */
+ gig_dbg(DEBUG_ISO,
+ "isoc read: frame %d[%d]: %s\n",
+ frame, numbytes,
+ get_usb_statmsg(ifd->status));
+ break;
+ default: /* other error */
+ /* report, assume transferred bytes are ok */
+ dev_warn(cs->dev,
+ "isoc read: frame %d[%d]: %s\n",
+ frame, numbytes,
+ get_usb_statmsg(ifd->status));
+ }
+ if (unlikely(numbytes > BAS_MAXFRAME))
+ dev_warn(cs->dev,
+ "isoc read: frame %d[%d]: %s\n",
+ frame, numbytes,
+ "exceeds max frame size");
+ if (unlikely(numbytes > totleft)) {
+ dev_warn(cs->dev,
+ "isoc read: frame %d[%d]: %s\n",
+ frame, numbytes,
+ "exceeds total transfer length");
+ numbytes = totleft;
+ }
+ offset = ifd->offset;
+ if (unlikely(offset + numbytes > BAS_INBUFSIZE)) {
+ dev_warn(cs->dev,
+ "isoc read: frame %d[%d]: %s\n",
+ frame, numbytes,
+ "exceeds end of buffer");
+ numbytes = BAS_INBUFSIZE - offset;
+ }
+ gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs);
+ totleft -= numbytes;
+ }
+ if (unlikely(totleft > 0))
+ dev_warn(cs->dev, "isoc read: %d data bytes missing\n",
+ totleft);
+
+error:
+ /* URB processed, resubmit */
+ for (frame = 0; frame < BAS_NUMFRAMES; frame++) {
+ urb->iso_frame_desc[frame].status = 0;
+ urb->iso_frame_desc[frame].actual_length = 0;
+ }
+ /* urb->dev is clobbered by USB subsystem */
+ urb->dev = bcs->cs->hw.bas->udev;
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->number_of_packets = BAS_NUMFRAMES;
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(rc != 0 && rc != -ENODEV)) {
+ dev_err(cs->dev,
+ "could not resubmit isoc read URB: %s\n",
+ get_usb_rcmsg(rc));
+ dump_urb(DEBUG_ISO, "resubmit isoc read", urb);
+ error_hangup(bcs);
+ }
+ }
+}
+
+/* Channel Operations */
+/* ================== */
+
+/* req_timeout
+ * timeout routine for control output request
+ * argument:
+ * controller state structure
+ */
+static void req_timeout(struct timer_list *t)
+{
+ struct bas_cardstate *ucs = from_timer(ucs, t, timer_ctrl);
+ struct cardstate *cs = ucs->cs;
+ int pending;
+ unsigned long flags;
+
+ check_pending(ucs);
+
+ spin_lock_irqsave(&ucs->lock, flags);
+ pending = ucs->pending;
+ ucs->pending = 0;
+ spin_unlock_irqrestore(&ucs->lock, flags);
+
+ switch (pending) {
+ case 0: /* no pending request */
+ gig_dbg(DEBUG_USBREQ, "%s: no request pending", __func__);
+ break;
+
+ case HD_OPEN_ATCHANNEL:
+ dev_err(cs->dev, "timeout opening AT channel\n");
+ error_reset(cs);
+ break;
+
+ case HD_OPEN_B1CHANNEL:
+ dev_err(cs->dev, "timeout opening channel 1\n");
+ error_hangup(&cs->bcs[0]);
+ break;
+
+ case HD_OPEN_B2CHANNEL:
+ dev_err(cs->dev, "timeout opening channel 2\n");
+ error_hangup(&cs->bcs[1]);
+ break;
+
+ case HD_CLOSE_ATCHANNEL:
+ dev_err(cs->dev, "timeout closing AT channel\n");
+ error_reset(cs);
+ break;
+
+ case HD_CLOSE_B1CHANNEL:
+ dev_err(cs->dev, "timeout closing channel 1\n");
+ error_reset(cs);
+ break;
+
+ case HD_CLOSE_B2CHANNEL:
+ dev_err(cs->dev, "timeout closing channel 2\n");
+ error_reset(cs);
+ break;
+
+ case HD_RESET_INTERRUPT_PIPE:
+ /* error recovery escalation */
+ dev_err(cs->dev,
+ "reset interrupt pipe timeout, attempting USB reset\n");
+ usb_queue_reset_device(ucs->interface);
+ break;
+
+ default:
+ dev_warn(cs->dev, "request 0x%02x timed out, clearing\n",
+ pending);
+ }
+
+ wake_up(&ucs->waitqueue);
+}
+
+/* write_ctrl_callback
+ * USB completion handler for control pipe output
+ * called by the USB subsystem in interrupt context
+ * parameter:
+ * urb USB request block of completed request
+ * urb->context = hardware specific controller state structure
+ */
+static void write_ctrl_callback(struct urb *urb)
+{
+ struct bas_cardstate *ucs = urb->context;
+ int status = urb->status;
+ int rc;
+ unsigned long flags;
+
+ /* check status */
+ switch (status) {
+ case 0: /* normal completion */
+ spin_lock_irqsave(&ucs->lock, flags);
+ switch (ucs->pending) {
+ case HD_DEVICE_INIT_ACK: /* no reply expected */
+ del_timer(&ucs->timer_ctrl);
+ ucs->pending = 0;
+ break;
+ }
+ spin_unlock_irqrestore(&ucs->lock, flags);
+ return;
+
+ case -ENOENT: /* cancelled */
+ case -ECONNRESET: /* cancelled (async) */
+ case -EINPROGRESS: /* pending */
+ case -ENODEV: /* device removed */
+ case -ESHUTDOWN: /* device shut down */
+ /* ignore silently */
+ gig_dbg(DEBUG_USBREQ, "%s: %s",
+ __func__, get_usb_statmsg(status));
+ break;
+
+ default: /* any failure */
+ /* don't retry if suspend requested */
+ if (++ucs->retry_ctrl > BAS_RETRY ||
+ (ucs->basstate & BS_SUSPEND)) {
+ dev_err(&ucs->interface->dev,
+ "control request 0x%02x failed: %s\n",
+ ucs->dr_ctrl.bRequest,
+ get_usb_statmsg(status));
+ break; /* give up */
+ }
+ dev_notice(&ucs->interface->dev,
+ "control request 0x%02x: %s, retry %d\n",
+ ucs->dr_ctrl.bRequest, get_usb_statmsg(status),
+ ucs->retry_ctrl);
+ /* urb->dev is clobbered by USB subsystem */
+ urb->dev = ucs->udev;
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(rc)) {
+ dev_err(&ucs->interface->dev,
+ "could not resubmit request 0x%02x: %s\n",
+ ucs->dr_ctrl.bRequest, get_usb_rcmsg(rc));
+ break;
+ }
+ /* resubmitted */
+ return;
+ }
+
+ /* failed, clear pending request */
+ spin_lock_irqsave(&ucs->lock, flags);
+ del_timer(&ucs->timer_ctrl);
+ ucs->pending = 0;
+ spin_unlock_irqrestore(&ucs->lock, flags);
+ wake_up(&ucs->waitqueue);
+}
+
+/* req_submit
+ * submit a control output request without message buffer to the Gigaset base
+ * and optionally start a timeout
+ * parameters:
+ * bcs B channel control structure
+ * req control request code (HD_*)
+ * val control request parameter value (set to 0 if unused)
+ * timeout timeout in seconds (0: no timeout)
+ * return value:
+ * 0 on success
+ * -EBUSY if another request is pending
+ * any URB submission error code
+ */
+static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
+{
+ struct bas_cardstate *ucs = bcs->cs->hw.bas;
+ int ret;
+ unsigned long flags;
+
+ gig_dbg(DEBUG_USBREQ, "-------> 0x%02x (%d)", req, val);
+
+ spin_lock_irqsave(&ucs->lock, flags);
+ if (ucs->pending) {
+ spin_unlock_irqrestore(&ucs->lock, flags);
+ dev_err(bcs->cs->dev,
+ "submission of request 0x%02x failed: "
+ "request 0x%02x still pending\n",
+ req, ucs->pending);
+ return -EBUSY;
+ }
+
+ ucs->dr_ctrl.bRequestType = OUT_VENDOR_REQ;
+ ucs->dr_ctrl.bRequest = req;
+ ucs->dr_ctrl.wValue = cpu_to_le16(val);
+ ucs->dr_ctrl.wIndex = 0;
+ ucs->dr_ctrl.wLength = 0;
+ usb_fill_control_urb(ucs->urb_ctrl, ucs->udev,
+ usb_sndctrlpipe(ucs->udev, 0),
+ (unsigned char *) &ucs->dr_ctrl, NULL, 0,
+ write_ctrl_callback, ucs);
+ ucs->retry_ctrl = 0;
+ ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC);
+ if (unlikely(ret)) {
+ dev_err(bcs->cs->dev, "could not submit request 0x%02x: %s\n",
+ req, get_usb_rcmsg(ret));
+ spin_unlock_irqrestore(&ucs->lock, flags);
+ return ret;
+ }
+ ucs->pending = req;
+
+ if (timeout > 0) {
+ gig_dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
+ mod_timer(&ucs->timer_ctrl, jiffies + timeout * HZ / 10);
+ }
+
+ spin_unlock_irqrestore(&ucs->lock, flags);
+ return 0;
+}
+
+/* gigaset_init_bchannel
+ * called by common.c to connect a B channel
+ * initialize isochronous I/O and tell the Gigaset base to open the channel
+ * argument:
+ * B channel control structure
+ * return value:
+ * 0 on success, error code < 0 on error
+ */
+static int gigaset_init_bchannel(struct bc_state *bcs)
+{
+ struct cardstate *cs = bcs->cs;
+ int req, ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if (unlikely(!cs->connected)) {
+ gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return -ENODEV;
+ }
+
+ if (cs->hw.bas->basstate & BS_SUSPEND) {
+ dev_notice(cs->dev,
+ "not starting isoc I/O, suspend in progress\n");
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return -EHOSTUNREACH;
+ }
+
+ ret = starturbs(bcs);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&cs->lock, flags);
+ dev_err(cs->dev,
+ "could not start isoc I/O for channel B%d: %s\n",
+ bcs->channel + 1,
+ ret == -EFAULT ? "null URB" : get_usb_rcmsg(ret));
+ if (ret != -ENODEV)
+ error_hangup(bcs);
+ return ret;
+ }
+
+ req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL;
+ ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
+ if (ret < 0) {
+ dev_err(cs->dev, "could not open channel B%d\n",
+ bcs->channel + 1);
+ stopurbs(bcs->hw.bas);
+ }
+
+ spin_unlock_irqrestore(&cs->lock, flags);
+ if (ret < 0 && ret != -ENODEV)
+ error_hangup(bcs);
+ return ret;
+}
+
+/* gigaset_close_bchannel
+ * called by common.c to disconnect a B channel
+ * tell the Gigaset base to close the channel
+ * stopping isochronous I/O and LL notification will be done when the
+ * acknowledgement for the close arrives
+ * argument:
+ * B channel control structure
+ * return value:
+ * 0 on success, error code < 0 on error
+ */
+static int gigaset_close_bchannel(struct bc_state *bcs)
+{
+ struct cardstate *cs = bcs->cs;
+ int req, ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if (unlikely(!cs->connected)) {
+ spin_unlock_irqrestore(&cs->lock, flags);
+ gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
+ return -ENODEV;
+ }
+
+ if (!(cs->hw.bas->basstate & (bcs->channel ? BS_B2OPEN : BS_B1OPEN))) {
+ /* channel not running: just signal common.c */
+ spin_unlock_irqrestore(&cs->lock, flags);
+ gigaset_bchannel_down(bcs);
+ return 0;
+ }
+
+ /* channel running: tell device to close it */
+ req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL;
+ ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
+ if (ret < 0)
+ dev_err(cs->dev, "closing channel B%d failed\n",
+ bcs->channel + 1);
+
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return ret;
+}
+
+/* Device Operations */
+/* ================= */
+
+/* complete_cb
+ * unqueue first command buffer from queue, waking any sleepers
+ * must be called with cs->cmdlock held
+ * parameter:
+ * cs controller state structure
+ */
+static void complete_cb(struct cardstate *cs)
+{
+ struct cmdbuf_t *cb = cs->cmdbuf;
+
+ /* unqueue completed buffer */
+ cs->cmdbytes -= cs->curlen;
+ gig_dbg(DEBUG_OUTPUT, "write_command: sent %u bytes, %u left",
+ cs->curlen, cs->cmdbytes);
+ if (cb->next != NULL) {
+ cs->cmdbuf = cb->next;
+ cs->cmdbuf->prev = NULL;
+ cs->curlen = cs->cmdbuf->len;
+ } else {
+ cs->cmdbuf = NULL;
+ cs->lastcmdbuf = NULL;
+ cs->curlen = 0;
+ }
+
+ if (cb->wake_tasklet)
+ tasklet_schedule(cb->wake_tasklet);
+
+ kfree(cb);
+}
+
+/* write_command_callback
+ * USB completion handler for AT command transmission
+ * called by the USB subsystem in interrupt context
+ * parameter:
+ * urb USB request block of completed request
+ * urb->context = controller state structure
+ */
+static void write_command_callback(struct urb *urb)
+{
+ struct cardstate *cs = urb->context;
+ struct bas_cardstate *ucs = cs->hw.bas;
+ int status = urb->status;
+ unsigned long flags;
+
+ update_basstate(ucs, 0, BS_ATWRPEND);
+ wake_up(&ucs->waitqueue);
+
+ /* check status */
+ switch (status) {
+ case 0: /* normal completion */
+ break;
+ case -ENOENT: /* cancelled */
+ case -ECONNRESET: /* cancelled (async) */
+ case -EINPROGRESS: /* pending */
+ case -ENODEV: /* device removed */
+ case -ESHUTDOWN: /* device shut down */
+ /* ignore silently */
+ gig_dbg(DEBUG_USBREQ, "%s: %s",
+ __func__, get_usb_statmsg(status));
+ return;
+ default: /* any failure */
+ if (++ucs->retry_cmd_out > BAS_RETRY) {
+ dev_warn(cs->dev,
+ "command write: %s, "
+ "giving up after %d retries\n",
+ get_usb_statmsg(status),
+ ucs->retry_cmd_out);
+ break;
+ }
+ if (ucs->basstate & BS_SUSPEND) {
+ dev_warn(cs->dev,
+ "command write: %s, "
+ "won't retry - suspend requested\n",
+ get_usb_statmsg(status));
+ break;
+ }
+ if (cs->cmdbuf == NULL) {
+ dev_warn(cs->dev,
+ "command write: %s, "
+ "cannot retry - cmdbuf gone\n",
+ get_usb_statmsg(status));
+ break;
+ }
+ dev_notice(cs->dev, "command write: %s, retry %d\n",
+ get_usb_statmsg(status), ucs->retry_cmd_out);
+ if (atwrite_submit(cs, cs->cmdbuf->buf, cs->cmdbuf->len) >= 0)
+ /* resubmitted - bypass regular exit block */
+ return;
+ /* command send failed, assume base still waiting */
+ update_basstate(ucs, BS_ATREADY, 0);
+ }
+
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ if (cs->cmdbuf != NULL)
+ complete_cb(cs);
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+}
+
+/* atrdy_timeout
+ * timeout routine for AT command transmission
+ * argument:
+ * controller state structure
+ */
+static void atrdy_timeout(struct timer_list *t)
+{
+ struct bas_cardstate *ucs = from_timer(ucs, t, timer_atrdy);
+ struct cardstate *cs = ucs->cs;
+
+ dev_warn(cs->dev, "timeout waiting for HD_READY_SEND_ATDATA\n");
+
+ /* fake the missing signal - what else can I do? */
+ update_basstate(ucs, BS_ATREADY, BS_ATTIMER);
+ start_cbsend(cs);
+}
+
+/* atwrite_submit
+ * submit an HD_WRITE_ATMESSAGE command URB
+ * parameters:
+ * cs controller state structure
+ * buf buffer containing command to send
+ * len length of command to send
+ * return value:
+ * 0 on success
+ * -EBUSY if another request is pending
+ * any URB submission error code
+ */
+static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len)
+{
+ struct bas_cardstate *ucs = cs->hw.bas;
+ int rc;
+
+ gig_dbg(DEBUG_USBREQ, "-------> HD_WRITE_ATMESSAGE (%d)", len);
+
+ if (update_basstate(ucs, BS_ATWRPEND, 0) & BS_ATWRPEND) {
+ dev_err(cs->dev,
+ "could not submit HD_WRITE_ATMESSAGE: URB busy\n");
+ return -EBUSY;
+ }
+
+ ucs->dr_cmd_out.bRequestType = OUT_VENDOR_REQ;
+ ucs->dr_cmd_out.bRequest = HD_WRITE_ATMESSAGE;
+ ucs->dr_cmd_out.wValue = 0;
+ ucs->dr_cmd_out.wIndex = 0;
+ ucs->dr_cmd_out.wLength = cpu_to_le16(len);
+ usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev,
+ usb_sndctrlpipe(ucs->udev, 0),
+ (unsigned char *) &ucs->dr_cmd_out, buf, len,
+ write_command_callback, cs);
+ rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC);
+ if (unlikely(rc)) {
+ update_basstate(ucs, 0, BS_ATWRPEND);
+ dev_err(cs->dev, "could not submit HD_WRITE_ATMESSAGE: %s\n",
+ get_usb_rcmsg(rc));
+ return rc;
+ }
+
+ /* submitted successfully, start timeout if necessary */
+ if (!(update_basstate(ucs, BS_ATTIMER, BS_ATREADY) & BS_ATTIMER)) {
+ gig_dbg(DEBUG_OUTPUT, "setting ATREADY timeout of %d/10 secs",
+ ATRDY_TIMEOUT);
+ mod_timer(&ucs->timer_atrdy, jiffies + ATRDY_TIMEOUT * HZ / 10);
+ }
+ return 0;
+}
+
+/* start_cbsend
+ * start transmission of AT command queue if necessary
+ * parameter:
+ * cs controller state structure
+ * return value:
+ * 0 on success
+ * error code < 0 on error
+ */
+static int start_cbsend(struct cardstate *cs)
+{
+ struct cmdbuf_t *cb;
+ struct bas_cardstate *ucs = cs->hw.bas;
+ unsigned long flags;
+ int rc;
+ int retval = 0;
+
+ /* check if suspend requested */
+ if (ucs->basstate & BS_SUSPEND) {
+ gig_dbg(DEBUG_OUTPUT, "suspending");
+ return -EHOSTUNREACH;
+ }
+
+ /* check if AT channel is open */
+ if (!(ucs->basstate & BS_ATOPEN)) {
+ gig_dbg(DEBUG_OUTPUT, "AT channel not open");
+ rc = req_submit(cs->bcs, HD_OPEN_ATCHANNEL, 0, BAS_TIMEOUT);
+ if (rc < 0) {
+ /* flush command queue */
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ while (cs->cmdbuf != NULL)
+ complete_cb(cs);
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+ }
+ return rc;
+ }
+
+ /* try to send first command in queue */
+ spin_lock_irqsave(&cs->cmdlock, flags);
+
+ while ((cb = cs->cmdbuf) != NULL && (ucs->basstate & BS_ATREADY)) {
+ ucs->retry_cmd_out = 0;
+ rc = atwrite_submit(cs, cb->buf, cb->len);
+ if (unlikely(rc)) {
+ retval = rc;
+ complete_cb(cs);
+ }
+ }
+
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+ return retval;
+}
+
+/* gigaset_write_cmd
+ * This function is called by the device independent part of the driver
+ * to transmit an AT command string to the Gigaset device.
+ * It encapsulates the device specific method for transmission over the
+ * direct USB connection to the base.
+ * The command string is added to the queue of commands to send, and
+ * USB transmission is started if necessary.
+ * parameters:
+ * cs controller state structure
+ * cb command buffer structure
+ * return value:
+ * number of bytes queued on success
+ * error code < 0 on error
+ */
+static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
+{
+ unsigned long flags;
+ int rc;
+
+ gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
+ DEBUG_TRANSCMD : DEBUG_LOCKCMD,
+ "CMD Transmit", cb->len, cb->buf);
+
+ /* translate "+++" escape sequence sent as a single separate command
+ * into "close AT channel" command for error recovery
+ * The next command will reopen the AT channel automatically.
+ */
+ if (cb->len == 3 && !memcmp(cb->buf, "+++", 3)) {
+ /* If an HD_RECEIVEATDATA_ACK message remains unhandled
+ * because of an error, the base never sends another one.
+ * The response channel is thus effectively blocked.
+ * Closing and reopening the AT channel does *not* clear
+ * this condition.
+ * As a stopgap measure, submit a zero-length AT read
+ * before closing the AT channel. This has the undocumented
+ * effect of triggering a new HD_RECEIVEATDATA_ACK message
+ * from the base if necessary.
+ * The subsequent AT channel close then discards any pending
+ * messages.
+ */
+ spin_lock_irqsave(&cs->lock, flags);
+ if (!(cs->hw.bas->basstate & BS_ATRDPEND)) {
+ kfree(cs->hw.bas->rcvbuf);
+ cs->hw.bas->rcvbuf = NULL;
+ cs->hw.bas->rcvbuf_size = 0;
+ cs->hw.bas->retry_cmd_in = 0;
+ atread_submit(cs, 0);
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT);
+ if (cb->wake_tasklet)
+ tasklet_schedule(cb->wake_tasklet);
+ if (!rc)
+ rc = cb->len;
+ kfree(cb);
+ return rc;
+ }
+
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ cb->prev = cs->lastcmdbuf;
+ if (cs->lastcmdbuf)
+ cs->lastcmdbuf->next = cb;
+ else {
+ cs->cmdbuf = cb;
+ cs->curlen = cb->len;
+ }
+ cs->cmdbytes += cb->len;
+ cs->lastcmdbuf = cb;
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if (unlikely(!cs->connected)) {
+ spin_unlock_irqrestore(&cs->lock, flags);
+ gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
+ /* flush command queue */
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ while (cs->cmdbuf != NULL)
+ complete_cb(cs);
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+ return -ENODEV;
+ }
+ rc = start_cbsend(cs);
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return rc < 0 ? rc : cb->len;
+}
+
+/* gigaset_write_room
+ * tty_driver.write_room interface routine
+ * return number of characters the driver will accept to be written via
+ * gigaset_write_cmd
+ * parameter:
+ * controller state structure
+ * return value:
+ * number of characters
+ */
+static int gigaset_write_room(struct cardstate *cs)
+{
+ return IF_WRITEBUF;
+}
+
+/* gigaset_chars_in_buffer
+ * tty_driver.chars_in_buffer interface routine
+ * return number of characters waiting to be sent
+ * parameter:
+ * controller state structure
+ * return value:
+ * number of characters
+ */
+static int gigaset_chars_in_buffer(struct cardstate *cs)
+{
+ return cs->cmdbytes;
+}
+
+/* gigaset_brkchars
+ * implementation of ioctl(GIGASET_BRKCHARS)
+ * parameter:
+ * controller state structure
+ * return value:
+ * -EINVAL (unimplemented function)
+ */
+static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
+{
+ return -EINVAL;
+}
+
+
+/* Device Initialization/Shutdown */
+/* ============================== */
+
+/* Free hardware dependent part of the B channel structure
+ * parameter:
+ * bcs B channel structure
+ */
+static void gigaset_freebcshw(struct bc_state *bcs)
+{
+ struct bas_bc_state *ubc = bcs->hw.bas;
+ int i;
+
+ if (!ubc)
+ return;
+
+ /* kill URBs and tasklets before freeing - better safe than sorry */
+ ubc->running = 0;
+ gig_dbg(DEBUG_INIT, "%s: killing isoc URBs", __func__);
+ for (i = 0; i < BAS_OUTURBS; ++i) {
+ usb_kill_urb(ubc->isoouturbs[i].urb);
+ usb_free_urb(ubc->isoouturbs[i].urb);
+ }
+ for (i = 0; i < BAS_INURBS; ++i) {
+ usb_kill_urb(ubc->isoinurbs[i]);
+ usb_free_urb(ubc->isoinurbs[i]);
+ }
+ tasklet_kill(&ubc->sent_tasklet);
+ tasklet_kill(&ubc->rcvd_tasklet);
+ kfree(ubc->isooutbuf);
+ kfree(ubc);
+ bcs->hw.bas = NULL;
+}
+
+/* Initialize hardware dependent part of the B channel structure
+ * parameter:
+ * bcs B channel structure
+ * return value:
+ * 0 on success, error code < 0 on failure
+ */
+static int gigaset_initbcshw(struct bc_state *bcs)
+{
+ int i;
+ struct bas_bc_state *ubc;
+
+ bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL);
+ if (!ubc) {
+ pr_err("out of memory\n");
+ return -ENOMEM;
+ }
+
+ ubc->running = 0;
+ atomic_set(&ubc->corrbytes, 0);
+ spin_lock_init(&ubc->isooutlock);
+ for (i = 0; i < BAS_OUTURBS; ++i) {
+ ubc->isoouturbs[i].urb = NULL;
+ ubc->isoouturbs[i].bcs = bcs;
+ }
+ ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL;
+ ubc->numsub = 0;
+ ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL);
+ if (!ubc->isooutbuf) {
+ pr_err("out of memory\n");
+ kfree(ubc);
+ bcs->hw.bas = NULL;
+ return -ENOMEM;
+ }
+ tasklet_init(&ubc->sent_tasklet,
+ write_iso_tasklet, (unsigned long) bcs);
+
+ spin_lock_init(&ubc->isoinlock);
+ for (i = 0; i < BAS_INURBS; ++i)
+ ubc->isoinurbs[i] = NULL;
+ ubc->isoindone = NULL;
+ ubc->loststatus = -EINPROGRESS;
+ ubc->isoinlost = 0;
+ ubc->seqlen = 0;
+ ubc->inbyte = 0;
+ ubc->inbits = 0;
+ ubc->goodbytes = 0;
+ ubc->alignerrs = 0;
+ ubc->fcserrs = 0;
+ ubc->frameerrs = 0;
+ ubc->giants = 0;
+ ubc->runts = 0;
+ ubc->aborts = 0;
+ ubc->shared0s = 0;
+ ubc->stolen0s = 0;
+ tasklet_init(&ubc->rcvd_tasklet,
+ read_iso_tasklet, (unsigned long) bcs);
+ return 0;
+}
+
+static void gigaset_reinitbcshw(struct bc_state *bcs)
+{
+ struct bas_bc_state *ubc = bcs->hw.bas;
+
+ bcs->hw.bas->running = 0;
+ atomic_set(&bcs->hw.bas->corrbytes, 0);
+ bcs->hw.bas->numsub = 0;
+ spin_lock_init(&ubc->isooutlock);
+ spin_lock_init(&ubc->isoinlock);
+ ubc->loststatus = -EINPROGRESS;
+}
+
+static void gigaset_freecshw(struct cardstate *cs)
+{
+ /* timers, URBs and rcvbuf are disposed of in disconnect */
+ kfree(cs->hw.bas->int_in_buf);
+ kfree(cs->hw.bas);
+ cs->hw.bas = NULL;
+}
+
+/* Initialize hardware dependent part of the cardstate structure
+ * parameter:
+ * cs cardstate structure
+ * return value:
+ * 0 on success, error code < 0 on failure
+ */
+static int gigaset_initcshw(struct cardstate *cs)
+{
+ struct bas_cardstate *ucs;
+
+ cs->hw.bas = ucs = kzalloc(sizeof(*ucs), GFP_KERNEL);
+ if (!ucs) {
+ pr_err("out of memory\n");
+ return -ENOMEM;
+ }
+ ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL);
+ if (!ucs->int_in_buf) {
+ kfree(ucs);
+ pr_err("out of memory\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&ucs->lock);
+ ucs->cs = cs;
+ timer_setup(&ucs->timer_ctrl, req_timeout, 0);
+ timer_setup(&ucs->timer_atrdy, atrdy_timeout, 0);
+ timer_setup(&ucs->timer_cmd_in, cmd_in_timeout, 0);
+ timer_setup(&ucs->timer_int_in, int_in_resubmit, 0);
+ init_waitqueue_head(&ucs->waitqueue);
+ INIT_WORK(&ucs->int_in_wq, int_in_work);
+
+ return 0;
+}
+
+/* freeurbs
+ * unlink and deallocate all URBs unconditionally
+ * caller must make sure that no commands are still in progress
+ * parameter:
+ * cs controller state structure
+ */
+static void freeurbs(struct cardstate *cs)
+{
+ struct bas_cardstate *ucs = cs->hw.bas;
+ struct bas_bc_state *ubc;
+ int i, j;
+
+ gig_dbg(DEBUG_INIT, "%s: killing URBs", __func__);
+ for (j = 0; j < BAS_CHANNELS; ++j) {
+ ubc = cs->bcs[j].hw.bas;
+ for (i = 0; i < BAS_OUTURBS; ++i) {
+ usb_kill_urb(ubc->isoouturbs[i].urb);
+ usb_free_urb(ubc->isoouturbs[i].urb);
+ ubc->isoouturbs[i].urb = NULL;
+ }
+ for (i = 0; i < BAS_INURBS; ++i) {
+ usb_kill_urb(ubc->isoinurbs[i]);
+ usb_free_urb(ubc->isoinurbs[i]);
+ ubc->isoinurbs[i] = NULL;
+ }
+ }
+ usb_kill_urb(ucs->urb_int_in);
+ usb_free_urb(ucs->urb_int_in);
+ ucs->urb_int_in = NULL;
+ usb_kill_urb(ucs->urb_cmd_out);
+ usb_free_urb(ucs->urb_cmd_out);
+ ucs->urb_cmd_out = NULL;
+ usb_kill_urb(ucs->urb_cmd_in);
+ usb_free_urb(ucs->urb_cmd_in);
+ ucs->urb_cmd_in = NULL;
+ usb_kill_urb(ucs->urb_ctrl);
+ usb_free_urb(ucs->urb_ctrl);
+ ucs->urb_ctrl = NULL;
+}
+
+/* gigaset_probe
+ * This function is called when a new USB device is connected.
+ * It checks whether the new device is handled by this driver.
+ */
+static int gigaset_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_host_interface *hostif;
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct cardstate *cs = NULL;
+ struct bas_cardstate *ucs = NULL;
+ struct bas_bc_state *ubc;
+ struct usb_endpoint_descriptor *endpoint;
+ int i, j;
+ int rc;
+
+ gig_dbg(DEBUG_INIT,
+ "%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)",
+ __func__, le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct));
+
+ /* set required alternate setting */
+ hostif = interface->cur_altsetting;
+ if (hostif->desc.bAlternateSetting != 3) {
+ gig_dbg(DEBUG_INIT,
+ "%s: wrong alternate setting %d - trying to switch",
+ __func__, hostif->desc.bAlternateSetting);
+ if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3)
+ < 0) {
+ dev_warn(&udev->dev, "usb_set_interface failed, "
+ "device %d interface %d altsetting %d\n",
+ udev->devnum, hostif->desc.bInterfaceNumber,
+ hostif->desc.bAlternateSetting);
+ return -ENODEV;
+ }
+ hostif = interface->cur_altsetting;
+ }
+
+ /* Reject application specific interfaces
+ */
+ if (hostif->desc.bInterfaceClass != 255) {
+ dev_warn(&udev->dev, "%s: bInterfaceClass == %d\n",
+ __func__, hostif->desc.bInterfaceClass);
+ return -ENODEV;
+ }
+
+ if (hostif->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
+ dev_info(&udev->dev,
+ "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
+ __func__, le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct));
+
+ /* allocate memory for our device state and initialize it */
+ cs = gigaset_initcs(driver, BAS_CHANNELS, 0, 0, cidmode,
+ GIGASET_MODULENAME);
+ if (!cs)
+ return -ENODEV;
+ ucs = cs->hw.bas;
+
+ /* save off device structure ptrs for later use */
+ usb_get_dev(udev);
+ ucs->udev = udev;
+ ucs->interface = interface;
+ cs->dev = &interface->dev;
+
+ /* allocate URBs:
+ * - one for the interrupt pipe
+ * - three for the different uses of the default control pipe
+ * - three for each isochronous pipe
+ */
+ if (!(ucs->urb_int_in = usb_alloc_urb(0, GFP_KERNEL)) ||
+ !(ucs->urb_cmd_in = usb_alloc_urb(0, GFP_KERNEL)) ||
+ !(ucs->urb_cmd_out = usb_alloc_urb(0, GFP_KERNEL)) ||
+ !(ucs->urb_ctrl = usb_alloc_urb(0, GFP_KERNEL)))
+ goto allocerr;
+
+ for (j = 0; j < BAS_CHANNELS; ++j) {
+ ubc = cs->bcs[j].hw.bas;
+ for (i = 0; i < BAS_OUTURBS; ++i)
+ if (!(ubc->isoouturbs[i].urb =
+ usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
+ goto allocerr;
+ for (i = 0; i < BAS_INURBS; ++i)
+ if (!(ubc->isoinurbs[i] =
+ usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
+ goto allocerr;
+ }
+
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+
+ /* Fill the interrupt urb and send it to the core */
+ endpoint = &hostif->endpoint[0].desc;
+ usb_fill_int_urb(ucs->urb_int_in, udev,
+ usb_rcvintpipe(udev,
+ usb_endpoint_num(endpoint)),
+ ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs,
+ endpoint->bInterval);
+ rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
+ if (rc != 0) {
+ dev_err(cs->dev, "could not submit interrupt URB: %s\n",
+ get_usb_rcmsg(rc));
+ goto error;
+ }
+ ucs->retry_int_in = 0;
+
+ /* tell the device that the driver is ready */
+ rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0);
+ if (rc != 0)
+ goto error;
+
+ /* tell common part that the device is ready */
+ if (startmode == SM_LOCKED)
+ cs->mstate = MS_LOCKED;
+
+ /* save address of controller structure */
+ usb_set_intfdata(interface, cs);
+
+ rc = gigaset_start(cs);
+ if (rc < 0)
+ goto error;
+
+ return 0;
+
+allocerr:
+ dev_err(cs->dev, "could not allocate URBs\n");
+ rc = -ENOMEM;
+error:
+ freeurbs(cs);
+ usb_set_intfdata(interface, NULL);
+ usb_put_dev(udev);
+ gigaset_freecs(cs);
+ return rc;
+}
+
+/* gigaset_disconnect
+ * This function is called when the Gigaset base is unplugged.
+ */
+static void gigaset_disconnect(struct usb_interface *interface)
+{
+ struct cardstate *cs;
+ struct bas_cardstate *ucs;
+ int j;
+
+ cs = usb_get_intfdata(interface);
+
+ ucs = cs->hw.bas;
+
+ dev_info(cs->dev, "disconnecting Gigaset base\n");
+
+ /* mark base as not ready, all channels disconnected */
+ ucs->basstate = 0;
+
+ /* tell LL all channels are down */
+ for (j = 0; j < BAS_CHANNELS; ++j)
+ gigaset_bchannel_down(cs->bcs + j);
+
+ /* stop driver (common part) */
+ gigaset_stop(cs);
+
+ /* stop delayed work and URBs, free ressources */
+ del_timer_sync(&ucs->timer_ctrl);
+ del_timer_sync(&ucs->timer_atrdy);
+ del_timer_sync(&ucs->timer_cmd_in);
+ del_timer_sync(&ucs->timer_int_in);
+ cancel_work_sync(&ucs->int_in_wq);
+ freeurbs(cs);
+ usb_set_intfdata(interface, NULL);
+ kfree(ucs->rcvbuf);
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+ usb_put_dev(ucs->udev);
+ ucs->interface = NULL;
+ ucs->udev = NULL;
+ cs->dev = NULL;
+ gigaset_freecs(cs);
+}
+
+/* gigaset_suspend
+ * This function is called before the USB connection is suspended
+ * or before the USB device is reset.
+ * In the latter case, message == PMSG_ON.
+ */
+static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct cardstate *cs = usb_get_intfdata(intf);
+ struct bas_cardstate *ucs = cs->hw.bas;
+ int rc;
+
+ /* set suspend flag; this stops AT command/response traffic */
+ if (update_basstate(ucs, BS_SUSPEND, 0) & BS_SUSPEND) {
+ gig_dbg(DEBUG_SUSPEND, "already suspended");
+ return 0;
+ }
+
+ /* wait a bit for blocking conditions to go away */
+ rc = wait_event_timeout(ucs->waitqueue,
+ !(ucs->basstate &
+ (BS_B1OPEN | BS_B2OPEN | BS_ATRDPEND | BS_ATWRPEND)),
+ BAS_TIMEOUT * HZ / 10);
+ gig_dbg(DEBUG_SUSPEND, "wait_event_timeout() -> %d", rc);
+
+ /* check for conditions preventing suspend */
+ if (ucs->basstate & (BS_B1OPEN | BS_B2OPEN | BS_ATRDPEND | BS_ATWRPEND)) {
+ dev_warn(cs->dev, "cannot suspend:\n");
+ if (ucs->basstate & BS_B1OPEN)
+ dev_warn(cs->dev, " B channel 1 open\n");
+ if (ucs->basstate & BS_B2OPEN)
+ dev_warn(cs->dev, " B channel 2 open\n");
+ if (ucs->basstate & BS_ATRDPEND)
+ dev_warn(cs->dev, " receiving AT reply\n");
+ if (ucs->basstate & BS_ATWRPEND)
+ dev_warn(cs->dev, " sending AT command\n");
+ update_basstate(ucs, 0, BS_SUSPEND);
+ return -EBUSY;
+ }
+
+ /* close AT channel if open */
+ if (ucs->basstate & BS_ATOPEN) {
+ gig_dbg(DEBUG_SUSPEND, "closing AT channel");
+ rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, 0);
+ if (rc) {
+ update_basstate(ucs, 0, BS_SUSPEND);
+ return rc;
+ }
+ wait_event_timeout(ucs->waitqueue, !ucs->pending,
+ BAS_TIMEOUT * HZ / 10);
+ /* in case of timeout, proceed anyway */
+ }
+
+ /* kill all URBs and delayed work that might still be pending */
+ usb_kill_urb(ucs->urb_ctrl);
+ usb_kill_urb(ucs->urb_int_in);
+ del_timer_sync(&ucs->timer_ctrl);
+ del_timer_sync(&ucs->timer_atrdy);
+ del_timer_sync(&ucs->timer_cmd_in);
+ del_timer_sync(&ucs->timer_int_in);
+
+ /* don't try to cancel int_in_wq from within reset as it
+ * might be the one requesting the reset
+ */
+ if (message.event != PM_EVENT_ON)
+ cancel_work_sync(&ucs->int_in_wq);
+
+ gig_dbg(DEBUG_SUSPEND, "suspend complete");
+ return 0;
+}
+
+/* gigaset_resume
+ * This function is called after the USB connection has been resumed.
+ */
+static int gigaset_resume(struct usb_interface *intf)
+{
+ struct cardstate *cs = usb_get_intfdata(intf);
+ struct bas_cardstate *ucs = cs->hw.bas;
+ int rc;
+
+ /* resubmit interrupt URB for spontaneous messages from base */
+ rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
+ if (rc) {
+ dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
+ get_usb_rcmsg(rc));
+ return rc;
+ }
+ ucs->retry_int_in = 0;
+
+ /* clear suspend flag to reallow activity */
+ update_basstate(ucs, 0, BS_SUSPEND);
+
+ gig_dbg(DEBUG_SUSPEND, "resume complete");
+ return 0;
+}
+
+/* gigaset_pre_reset
+ * This function is called before the USB connection is reset.
+ */
+static int gigaset_pre_reset(struct usb_interface *intf)
+{
+ /* handle just like suspend */
+ return gigaset_suspend(intf, PMSG_ON);
+}
+
+/* gigaset_post_reset
+ * This function is called after the USB connection has been reset.
+ */
+static int gigaset_post_reset(struct usb_interface *intf)
+{
+ /* FIXME: send HD_DEVICE_INIT_ACK? */
+
+ /* resume operations */
+ return gigaset_resume(intf);
+}
+
+
+static const struct gigaset_ops gigops = {
+ .write_cmd = gigaset_write_cmd,
+ .write_room = gigaset_write_room,
+ .chars_in_buffer = gigaset_chars_in_buffer,
+ .brkchars = gigaset_brkchars,
+ .init_bchannel = gigaset_init_bchannel,
+ .close_bchannel = gigaset_close_bchannel,
+ .initbcshw = gigaset_initbcshw,
+ .freebcshw = gigaset_freebcshw,
+ .reinitbcshw = gigaset_reinitbcshw,
+ .initcshw = gigaset_initcshw,
+ .freecshw = gigaset_freecshw,
+ .set_modem_ctrl = gigaset_set_modem_ctrl,
+ .baud_rate = gigaset_baud_rate,
+ .set_line_ctrl = gigaset_set_line_ctrl,
+ .send_skb = gigaset_isoc_send_skb,
+ .handle_input = gigaset_isoc_input,
+};
+
+/* bas_gigaset_init
+ * This function is called after the kernel module is loaded.
+ */
+static int __init bas_gigaset_init(void)
+{
+ int result;
+
+ /* allocate memory for our driver state and initialize it */
+ driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
+ GIGASET_MODULENAME, GIGASET_DEVNAME,
+ &gigops, THIS_MODULE);
+ if (driver == NULL)
+ goto error;
+
+ /* register this driver with the USB subsystem */
+ result = usb_register(&gigaset_usb_driver);
+ if (result < 0) {
+ pr_err("error %d registering USB driver\n", -result);
+ goto error;
+ }
+
+ pr_info(DRIVER_DESC "\n");
+ return 0;
+
+error:
+ if (driver)
+ gigaset_freedriver(driver);
+ driver = NULL;
+ return -1;
+}
+
+/* bas_gigaset_exit
+ * This function is called before the kernel module is unloaded.
+ */
+static void __exit bas_gigaset_exit(void)
+{
+ struct bas_cardstate *ucs;
+ int i;
+
+ gigaset_blockdriver(driver); /* => probe will fail
+ * => no gigaset_start any more
+ */
+
+ /* stop all connected devices */
+ for (i = 0; i < driver->minors; i++) {
+ if (gigaset_shutdown(driver->cs + i) < 0)
+ continue; /* no device */
+ /* from now on, no isdn callback should be possible */
+
+ /* close all still open channels */
+ ucs = driver->cs[i].hw.bas;
+ if (ucs->basstate & BS_B1OPEN) {
+ gig_dbg(DEBUG_INIT, "closing B1 channel");
+ usb_control_msg(ucs->udev,
+ usb_sndctrlpipe(ucs->udev, 0),
+ HD_CLOSE_B1CHANNEL, OUT_VENDOR_REQ,
+ 0, 0, NULL, 0, BAS_TIMEOUT);
+ }
+ if (ucs->basstate & BS_B2OPEN) {
+ gig_dbg(DEBUG_INIT, "closing B2 channel");
+ usb_control_msg(ucs->udev,
+ usb_sndctrlpipe(ucs->udev, 0),
+ HD_CLOSE_B2CHANNEL, OUT_VENDOR_REQ,
+ 0, 0, NULL, 0, BAS_TIMEOUT);
+ }
+ if (ucs->basstate & BS_ATOPEN) {
+ gig_dbg(DEBUG_INIT, "closing AT channel");
+ usb_control_msg(ucs->udev,
+ usb_sndctrlpipe(ucs->udev, 0),
+ HD_CLOSE_ATCHANNEL, OUT_VENDOR_REQ,
+ 0, 0, NULL, 0, BAS_TIMEOUT);
+ }
+ ucs->basstate = 0;
+ }
+
+ /* deregister this driver with the USB subsystem */
+ usb_deregister(&gigaset_usb_driver);
+ /* this will call the disconnect-callback */
+ /* from now on, no disconnect/probe callback should be running */
+
+ gigaset_freedriver(driver);
+ driver = NULL;
+}
+
+
+module_init(bas_gigaset_init);
+module_exit(bas_gigaset_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/isdn/gigaset/capi.c b/drivers/staging/isdn/gigaset/capi.c
new file mode 100644
index 000000000000..83d7dd48c61d
--- /dev/null
+++ b/drivers/staging/isdn/gigaset/capi.c
@@ -0,0 +1,2517 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Kernel CAPI interface for the Gigaset driver
+ *
+ * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>.
+ *
+ * =====================================================================
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/ratelimit.h>
+#include <linux/isdn/capilli.h>
+#include <linux/isdn/capicmd.h>
+#include <linux/isdn/capiutil.h>
+#include <linux/export.h>
+
+/* missing from kernelcapi.h */
+#define CapiNcpiNotSupportedByProtocol 0x0001
+#define CapiFlagsNotSupportedByProtocol 0x0002
+#define CapiAlertAlreadySent 0x0003
+#define CapiFacilitySpecificFunctionNotSupported 0x3011
+
+/* missing from capicmd.h */
+#define CAPI_CONNECT_IND_BASELEN (CAPI_MSG_BASELEN + 4 + 2 + 8 * 1)
+#define CAPI_CONNECT_ACTIVE_IND_BASELEN (CAPI_MSG_BASELEN + 4 + 3 * 1)
+#define CAPI_CONNECT_B3_IND_BASELEN (CAPI_MSG_BASELEN + 4 + 1)
+#define CAPI_CONNECT_B3_ACTIVE_IND_BASELEN (CAPI_MSG_BASELEN + 4 + 1)
+#define CAPI_DATA_B3_REQ_LEN64 (CAPI_MSG_BASELEN + 4 + 4 + 2 + 2 + 2 + 8)
+#define CAPI_DATA_B3_CONF_LEN (CAPI_MSG_BASELEN + 4 + 2 + 2)
+#define CAPI_DISCONNECT_IND_LEN (CAPI_MSG_BASELEN + 4 + 2)
+#define CAPI_DISCONNECT_B3_IND_BASELEN (CAPI_MSG_BASELEN + 4 + 2 + 1)
+#define CAPI_FACILITY_CONF_BASELEN (CAPI_MSG_BASELEN + 4 + 2 + 2 + 1)
+/* most _CONF messages contain only Controller/PLCI/NCCI and Info parameters */
+#define CAPI_STDCONF_LEN (CAPI_MSG_BASELEN + 4 + 2)
+
+#define CAPI_FACILITY_HANDSET 0x0000
+#define CAPI_FACILITY_DTMF 0x0001
+#define CAPI_FACILITY_V42BIS 0x0002
+#define CAPI_FACILITY_SUPPSVC 0x0003
+#define CAPI_FACILITY_WAKEUP 0x0004
+#define CAPI_FACILITY_LI 0x0005
+
+#define CAPI_SUPPSVC_GETSUPPORTED 0x0000
+#define CAPI_SUPPSVC_LISTEN 0x0001
+
+/* missing from capiutil.h */
+#define CAPIMSG_PLCI_PART(m) CAPIMSG_U8(m, 9)
+#define CAPIMSG_NCCI_PART(m) CAPIMSG_U16(m, 10)
+#define CAPIMSG_HANDLE_REQ(m) CAPIMSG_U16(m, 18) /* DATA_B3_REQ/_IND only! */
+#define CAPIMSG_FLAGS(m) CAPIMSG_U16(m, 20)
+#define CAPIMSG_SETCONTROLLER(m, contr) capimsg_setu8(m, 8, contr)
+#define CAPIMSG_SETPLCI_PART(m, plci) capimsg_setu8(m, 9, plci)
+#define CAPIMSG_SETNCCI_PART(m, ncci) capimsg_setu16(m, 10, ncci)
+#define CAPIMSG_SETFLAGS(m, flags) capimsg_setu16(m, 20, flags)
+
+/* parameters with differing location in DATA_B3_CONF/_RESP: */
+#define CAPIMSG_SETHANDLE_CONF(m, handle) capimsg_setu16(m, 12, handle)
+#define CAPIMSG_SETINFO_CONF(m, info) capimsg_setu16(m, 14, info)
+
+/* Flags (DATA_B3_REQ/_IND) */
+#define CAPI_FLAGS_DELIVERY_CONFIRMATION 0x04
+#define CAPI_FLAGS_RESERVED (~0x1f)
+
+/* buffer sizes */
+#define MAX_BC_OCTETS 11
+#define MAX_HLC_OCTETS 3
+#define MAX_NUMBER_DIGITS 20
+#define MAX_FMT_IE_LEN 20
+
+/* values for bcs->apconnstate */
+#define APCONN_NONE 0 /* inactive/listening */
+#define APCONN_SETUP 1 /* connecting */
+#define APCONN_ACTIVE 2 /* B channel up */
+
+/* registered application data structure */
+struct gigaset_capi_appl {
+ struct list_head ctrlist;
+ struct gigaset_capi_appl *bcnext;
+ u16 id;
+ struct capi_register_params rp;
+ u16 nextMessageNumber;
+ u32 listenInfoMask;
+ u32 listenCIPmask;
+};
+
+/* CAPI specific controller data structure */
+struct gigaset_capi_ctr {
+ struct capi_ctr ctr;
+ struct list_head appls;
+ struct sk_buff_head sendqueue;
+ atomic_t sendqlen;
+ /* two _cmsg structures possibly used concurrently: */
+ _cmsg hcmsg; /* for message composition triggered from hardware */
+ _cmsg acmsg; /* for dissection of messages sent from application */
+ u8 bc_buf[MAX_BC_OCTETS + 1];
+ u8 hlc_buf[MAX_HLC_OCTETS + 1];
+ u8 cgpty_buf[MAX_NUMBER_DIGITS + 3];
+ u8 cdpty_buf[MAX_NUMBER_DIGITS + 2];
+};
+
+/* CIP Value table (from CAPI 2.0 standard, ch. 6.1) */
+static struct {
+ u8 *bc;
+ u8 *hlc;
+} cip2bchlc[] = {
+ [1] = { "8090A3", NULL }, /* Speech (A-law) */
+ [2] = { "8890", NULL }, /* Unrestricted digital information */
+ [3] = { "8990", NULL }, /* Restricted digital information */
+ [4] = { "9090A3", NULL }, /* 3,1 kHz audio (A-law) */
+ [5] = { "9190", NULL }, /* 7 kHz audio */
+ [6] = { "9890", NULL }, /* Video */
+ [7] = { "88C0C6E6", NULL }, /* Packet mode */
+ [8] = { "8890218F", NULL }, /* 56 kbit/s rate adaptation */
+ [9] = { "9190A5", NULL }, /* Unrestricted digital information
+ * with tones/announcements */
+ [16] = { "8090A3", "9181" }, /* Telephony */
+ [17] = { "9090A3", "9184" }, /* Group 2/3 facsimile */
+ [18] = { "8890", "91A1" }, /* Group 4 facsimile Class 1 */
+ [19] = { "8890", "91A4" }, /* Teletex service basic and mixed mode
+ * and Group 4 facsimile service
+ * Classes II and III */
+ [20] = { "8890", "91A8" }, /* Teletex service basic and
+ * processable mode */
+ [21] = { "8890", "91B1" }, /* Teletex service basic mode */
+ [22] = { "8890", "91B2" }, /* International interworking for
+ * Videotex */
+ [23] = { "8890", "91B5" }, /* Telex */
+ [24] = { "8890", "91B8" }, /* Message Handling Systems
+ * in accordance with X.400 */
+ [25] = { "8890", "91C1" }, /* OSI application
+ * in accordance with X.200 */
+ [26] = { "9190A5", "9181" }, /* 7 kHz telephony */
+ [27] = { "9190A5", "916001" }, /* Video telephony, first connection */
+ [28] = { "8890", "916002" }, /* Video telephony, second connection */
+};
+
+/*
+ * helper functions
+ * ================
+ */
+
+/*
+ * emit unsupported parameter warning
+ */
+static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param,
+ char *msgname, char *paramname)
+{
+ if (param && *param)
+ dev_warn(cs->dev, "%s: ignoring unsupported parameter: %s\n",
+ msgname, paramname);
+}
+
+/*
+ * convert an IE from Gigaset hex string to ETSI binary representation
+ * including length byte
+ * return value: result length, -1 on error
+ */
+static int encode_ie(char *in, u8 *out, int maxlen)
+{
+ int l = 0;
+ while (*in) {
+ if (!isxdigit(in[0]) || !isxdigit(in[1]) || l >= maxlen)
+ return -1;
+ out[++l] = (hex_to_bin(in[0]) << 4) + hex_to_bin(in[1]);
+ in += 2;
+ }
+ out[0] = l;
+ return l;
+}
+
+/*
+ * convert an IE from ETSI binary representation including length byte
+ * to Gigaset hex string
+ */
+static void decode_ie(u8 *in, char *out)
+{
+ int i = *in;
+ while (i-- > 0) {
+ /* ToDo: conversion to upper case necessary? */
+ *out++ = toupper(hex_asc_hi(*++in));
+ *out++ = toupper(hex_asc_lo(*in));
+ }
+}
+
+/*
+ * retrieve application data structure for an application ID
+ */
+static inline struct gigaset_capi_appl *
+get_appl(struct gigaset_capi_ctr *iif, u16 appl)
+{
+ struct gigaset_capi_appl *ap;
+
+ list_for_each_entry(ap, &iif->appls, ctrlist)
+ if (ap->id == appl)
+ return ap;
+ return NULL;
+}
+
+/*
+ * dump CAPI message to kernel messages for debugging
+ */
+static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p)
+{
+#ifdef CONFIG_GIGASET_DEBUG
+ /* dump at most 20 messages in 20 secs */
+ static DEFINE_RATELIMIT_STATE(msg_dump_ratelimit, 20 * HZ, 20);
+ _cdebbuf *cdb;
+
+ if (!(gigaset_debuglevel & level))
+ return;
+ if (!___ratelimit(&msg_dump_ratelimit, tag))
+ return;
+
+ cdb = capi_cmsg2str(p);
+ if (cdb) {
+ gig_dbg(level, "%s: [%d] %s", tag, p->ApplId, cdb->buf);
+ cdebbuf_free(cdb);
+ } else {
+ gig_dbg(level, "%s: [%d] %s", tag, p->ApplId,
+ capi_cmd2str(p->Command, p->Subcommand));
+ }
+#endif
+}
+
+static inline void dump_rawmsg(enum debuglevel level, const char *tag,
+ unsigned char *data)
+{
+#ifdef CONFIG_GIGASET_DEBUG
+ char *dbgline;
+ int i, l;
+
+ if (!(gigaset_debuglevel & level))
+ return;
+
+ l = CAPIMSG_LEN(data);
+ if (l < 12) {
+ gig_dbg(level, "%s: ??? LEN=%04d", tag, l);
+ return;
+ }
+ gig_dbg(level, "%s: 0x%02x:0x%02x: ID=%03d #0x%04x LEN=%04d NCCI=0x%x",
+ tag, CAPIMSG_COMMAND(data), CAPIMSG_SUBCOMMAND(data),
+ CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
+ CAPIMSG_CONTROL(data));
+ l -= 12;
+ if (l <= 0)
+ return;
+ if (l > 64)
+ l = 64; /* arbitrary limit */
+ dbgline = kmalloc_array(3, l, GFP_ATOMIC);
+ if (!dbgline)
+ return;
+ for (i = 0; i < l; i++) {
+ dbgline[3 * i] = hex_asc_hi(data[12 + i]);
+ dbgline[3 * i + 1] = hex_asc_lo(data[12 + i]);
+ dbgline[3 * i + 2] = ' ';
+ }
+ dbgline[3 * l - 1] = '\0';
+ gig_dbg(level, " %s", dbgline);
+ kfree(dbgline);
+ if (CAPIMSG_COMMAND(data) == CAPI_DATA_B3 &&
+ (CAPIMSG_SUBCOMMAND(data) == CAPI_REQ ||
+ CAPIMSG_SUBCOMMAND(data) == CAPI_IND)) {
+ l = CAPIMSG_DATALEN(data);
+ gig_dbg(level, " DataLength=%d", l);
+ if (l <= 0 || !(gigaset_debuglevel & DEBUG_LLDATA))
+ return;
+ if (l > 64)
+ l = 64; /* arbitrary limit */
+ dbgline = kmalloc_array(3, l, GFP_ATOMIC);
+ if (!dbgline)
+ return;
+ data += CAPIMSG_LEN(data);
+ for (i = 0; i < l; i++) {
+ dbgline[3 * i] = hex_asc_hi(data[i]);
+ dbgline[3 * i + 1] = hex_asc_lo(data[i]);
+ dbgline[3 * i + 2] = ' ';
+ }
+ dbgline[3 * l - 1] = '\0';
+ gig_dbg(level, " %s", dbgline);
+ kfree(dbgline);
+ }
+#endif
+}
+
+/*
+ * format CAPI IE as string
+ */
+
+#ifdef CONFIG_GIGASET_DEBUG
+static const char *format_ie(const char *ie)
+{
+ static char result[3 * MAX_FMT_IE_LEN];
+ int len, count;
+ char *pout = result;
+
+ if (!ie)
+ return "NULL";
+
+ count = len = ie[0];
+ if (count > MAX_FMT_IE_LEN)
+ count = MAX_FMT_IE_LEN - 1;
+ while (count--) {
+ *pout++ = hex_asc_hi(*++ie);
+ *pout++ = hex_asc_lo(*ie);
+ *pout++ = ' ';
+ }
+ if (len > MAX_FMT_IE_LEN) {
+ *pout++ = '.';
+ *pout++ = '.';
+ *pout++ = '.';
+ }
+ *--pout = 0;
+ return result;
+}
+#endif
+
+/*
+ * emit DATA_B3_CONF message
+ */
+static void send_data_b3_conf(struct cardstate *cs, struct capi_ctr *ctr,
+ u16 appl, u16 msgid, int channel,
+ u16 handle, u16 info)
+{
+ struct sk_buff *cskb;
+ u8 *msg;
+
+ cskb = alloc_skb(CAPI_DATA_B3_CONF_LEN, GFP_ATOMIC);
+ if (!cskb) {
+ dev_err(cs->dev, "%s: out of memory\n", __func__);
+ return;
+ }
+ /* frequent message, avoid _cmsg overhead */
+ msg = __skb_put(cskb, CAPI_DATA_B3_CONF_LEN);
+ CAPIMSG_SETLEN(msg, CAPI_DATA_B3_CONF_LEN);
+ CAPIMSG_SETAPPID(msg, appl);
+ CAPIMSG_SETCOMMAND(msg, CAPI_DATA_B3);
+ CAPIMSG_SETSUBCOMMAND(msg, CAPI_CONF);
+ CAPIMSG_SETMSGID(msg, msgid);
+ CAPIMSG_SETCONTROLLER(msg, ctr->cnr);
+ CAPIMSG_SETPLCI_PART(msg, channel);
+ CAPIMSG_SETNCCI_PART(msg, 1);
+ CAPIMSG_SETHANDLE_CONF(msg, handle);
+ CAPIMSG_SETINFO_CONF(msg, info);
+
+ /* emit message */
+ dump_rawmsg(DEBUG_MCMD, __func__, msg);
+ capi_ctr_handle_message(ctr, appl, cskb);
+}
+
+
+/*
+ * driver interface functions
+ * ==========================
+ */
+
+/**
+ * gigaset_skb_sent() - acknowledge transmission of outgoing skb
+ * @bcs: B channel descriptor structure.
+ * @skb: sent data.
+ *
+ * Called by hardware module {bas,ser,usb}_gigaset when the data in a
+ * skb has been successfully sent, for signalling completion to the LL.
+ */
+void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb)
+{
+ struct cardstate *cs = bcs->cs;
+ struct gigaset_capi_ctr *iif = cs->iif;
+ struct gigaset_capi_appl *ap = bcs->ap;
+ unsigned char *req = skb_mac_header(dskb);
+ u16 flags;
+
+ /* update statistics */
+ ++bcs->trans_up;
+
+ if (!ap) {
+ gig_dbg(DEBUG_MCMD, "%s: application gone", __func__);
+ return;
+ }
+
+ /* don't send further B3 messages if disconnected */
+ if (bcs->apconnstate < APCONN_ACTIVE) {
+ gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__);
+ return;
+ }
+
+ /*
+ * send DATA_B3_CONF if "delivery confirmation" bit was set in request;
+ * otherwise it has already been sent by do_data_b3_req()
+ */
+ flags = CAPIMSG_FLAGS(req);
+ if (flags & CAPI_FLAGS_DELIVERY_CONFIRMATION)
+ send_data_b3_conf(cs, &iif->ctr, ap->id, CAPIMSG_MSGID(req),
+ bcs->channel + 1, CAPIMSG_HANDLE_REQ(req),
+ (flags & ~CAPI_FLAGS_DELIVERY_CONFIRMATION) ?
+ CapiFlagsNotSupportedByProtocol :
+ CAPI_NOERROR);
+}
+EXPORT_SYMBOL_GPL(gigaset_skb_sent);
+
+/**
+ * gigaset_skb_rcvd() - pass received skb to LL
+ * @bcs: B channel descriptor structure.
+ * @skb: received data.
+ *
+ * Called by hardware module {bas,ser,usb}_gigaset when user data has
+ * been successfully received, for passing to the LL.
+ * Warning: skb must not be accessed anymore!
+ */
+void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
+{
+ struct cardstate *cs = bcs->cs;
+ struct gigaset_capi_ctr *iif = cs->iif;
+ struct gigaset_capi_appl *ap = bcs->ap;
+ int len = skb->len;
+
+ /* update statistics */
+ bcs->trans_down++;
+
+ if (!ap) {
+ gig_dbg(DEBUG_MCMD, "%s: application gone", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* don't send further B3 messages if disconnected */
+ if (bcs->apconnstate < APCONN_ACTIVE) {
+ gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /*
+ * prepend DATA_B3_IND message to payload
+ * Parameters: NCCI = 1, all others 0/unused
+ * frequent message, avoid _cmsg overhead
+ */
+ skb_push(skb, CAPI_DATA_B3_REQ_LEN);
+ CAPIMSG_SETLEN(skb->data, CAPI_DATA_B3_REQ_LEN);
+ CAPIMSG_SETAPPID(skb->data, ap->id);
+ CAPIMSG_SETCOMMAND(skb->data, CAPI_DATA_B3);
+ CAPIMSG_SETSUBCOMMAND(skb->data, CAPI_IND);
+ CAPIMSG_SETMSGID(skb->data, ap->nextMessageNumber++);
+ CAPIMSG_SETCONTROLLER(skb->data, iif->ctr.cnr);
+ CAPIMSG_SETPLCI_PART(skb->data, bcs->channel + 1);
+ CAPIMSG_SETNCCI_PART(skb->data, 1);
+ /* Data parameter not used */
+ CAPIMSG_SETDATALEN(skb->data, len);
+ /* Data handle parameter not used */
+ CAPIMSG_SETFLAGS(skb->data, 0);
+ /* Data64 parameter not present */
+
+ /* emit message */
+ dump_rawmsg(DEBUG_MCMD, __func__, skb->data);
+ capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+}
+EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
+
+/**
+ * gigaset_isdn_rcv_err() - signal receive error
+ * @bcs: B channel descriptor structure.
+ *
+ * Called by hardware module {bas,ser,usb}_gigaset when a receive error
+ * has occurred, for signalling to the LL.
+ */
+void gigaset_isdn_rcv_err(struct bc_state *bcs)
+{
+ /* if currently ignoring packets, just count down */
+ if (bcs->ignore) {
+ bcs->ignore--;
+ return;
+ }
+
+ /* update statistics */
+ bcs->corrupted++;
+
+ /* ToDo: signal error -> LL */
+}
+EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
+
+/**
+ * gigaset_isdn_icall() - signal incoming call
+ * @at_state: connection state structure.
+ *
+ * Called by main module at tasklet level to notify the LL that an incoming
+ * call has been received. @at_state contains the parameters of the call.
+ *
+ * Return value: call disposition (ICALL_*)
+ */
+int gigaset_isdn_icall(struct at_state_t *at_state)
+{
+ struct cardstate *cs = at_state->cs;
+ struct bc_state *bcs = at_state->bcs;
+ struct gigaset_capi_ctr *iif = cs->iif;
+ struct gigaset_capi_appl *ap;
+ u32 actCIPmask;
+ struct sk_buff *skb;
+ unsigned int msgsize;
+ unsigned long flags;
+ int i;
+
+ /*
+ * ToDo: signal calls without a free B channel, too
+ * (requires a u8 handle for the at_state structure that can
+ * be stored in the PLCI and used in the CONNECT_RESP message
+ * handler to retrieve it)
+ */
+ if (!bcs)
+ return ICALL_IGNORE;
+
+ /* prepare CONNECT_IND message, using B channel number as PLCI */
+ capi_cmsg_header(&iif->hcmsg, 0, CAPI_CONNECT, CAPI_IND, 0,
+ iif->ctr.cnr | ((bcs->channel + 1) << 8));
+
+ /* minimum size, all structs empty */
+ msgsize = CAPI_CONNECT_IND_BASELEN;
+
+ /* Bearer Capability (mandatory) */
+ if (at_state->str_var[STR_ZBC]) {
+ /* pass on BC from Gigaset */
+ if (encode_ie(at_state->str_var[STR_ZBC], iif->bc_buf,
+ MAX_BC_OCTETS) < 0) {
+ dev_warn(cs->dev, "RING ignored - bad BC %s\n",
+ at_state->str_var[STR_ZBC]);
+ return ICALL_IGNORE;
+ }
+
+ /* look up corresponding CIP value */
+ iif->hcmsg.CIPValue = 0; /* default if nothing found */
+ for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++)
+ if (cip2bchlc[i].bc != NULL &&
+ cip2bchlc[i].hlc == NULL &&
+ !strcmp(cip2bchlc[i].bc,
+ at_state->str_var[STR_ZBC])) {
+ iif->hcmsg.CIPValue = i;
+ break;
+ }
+ } else {
+ /* no BC (internal call): assume CIP 1 (speech, A-law) */
+ iif->hcmsg.CIPValue = 1;
+ encode_ie(cip2bchlc[1].bc, iif->bc_buf, MAX_BC_OCTETS);
+ }
+ iif->hcmsg.BC = iif->bc_buf;
+ msgsize += iif->hcmsg.BC[0];
+
+ /* High Layer Compatibility (optional) */
+ if (at_state->str_var[STR_ZHLC]) {
+ /* pass on HLC from Gigaset */
+ if (encode_ie(at_state->str_var[STR_ZHLC], iif->hlc_buf,
+ MAX_HLC_OCTETS) < 0) {
+ dev_warn(cs->dev, "RING ignored - bad HLC %s\n",
+ at_state->str_var[STR_ZHLC]);
+ return ICALL_IGNORE;
+ }
+ iif->hcmsg.HLC = iif->hlc_buf;
+ msgsize += iif->hcmsg.HLC[0];
+
+ /* look up corresponding CIP value */
+ /* keep BC based CIP value if none found */
+ if (at_state->str_var[STR_ZBC])
+ for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++)
+ if (cip2bchlc[i].hlc != NULL &&
+ !strcmp(cip2bchlc[i].hlc,
+ at_state->str_var[STR_ZHLC]) &&
+ !strcmp(cip2bchlc[i].bc,
+ at_state->str_var[STR_ZBC])) {
+ iif->hcmsg.CIPValue = i;
+ break;
+ }
+ }
+
+ /* Called Party Number (optional) */
+ if (at_state->str_var[STR_ZCPN]) {
+ i = strlen(at_state->str_var[STR_ZCPN]);
+ if (i > MAX_NUMBER_DIGITS) {
+ dev_warn(cs->dev, "RING ignored - bad number %s\n",
+ at_state->str_var[STR_ZBC]);
+ return ICALL_IGNORE;
+ }
+ iif->cdpty_buf[0] = i + 1;
+ iif->cdpty_buf[1] = 0x80; /* type / numbering plan unknown */
+ memcpy(iif->cdpty_buf + 2, at_state->str_var[STR_ZCPN], i);
+ iif->hcmsg.CalledPartyNumber = iif->cdpty_buf;
+ msgsize += iif->hcmsg.CalledPartyNumber[0];
+ }
+
+ /* Calling Party Number (optional) */
+ if (at_state->str_var[STR_NMBR]) {
+ i = strlen(at_state->str_var[STR_NMBR]);
+ if (i > MAX_NUMBER_DIGITS) {
+ dev_warn(cs->dev, "RING ignored - bad number %s\n",
+ at_state->str_var[STR_ZBC]);
+ return ICALL_IGNORE;
+ }
+ iif->cgpty_buf[0] = i + 2;
+ iif->cgpty_buf[1] = 0x00; /* type / numbering plan unknown */
+ iif->cgpty_buf[2] = 0x80; /* pres. allowed, not screened */
+ memcpy(iif->cgpty_buf + 3, at_state->str_var[STR_NMBR], i);
+ iif->hcmsg.CallingPartyNumber = iif->cgpty_buf;
+ msgsize += iif->hcmsg.CallingPartyNumber[0];
+ }
+
+ /* remaining parameters (not supported, always left NULL):
+ * - CalledPartySubaddress
+ * - CallingPartySubaddress
+ * - AdditionalInfo
+ * - BChannelinformation
+ * - Keypadfacility
+ * - Useruserdata
+ * - Facilitydataarray
+ */
+
+ gig_dbg(DEBUG_CMD, "icall: PLCI %x CIP %d BC %s",
+ iif->hcmsg.adr.adrPLCI, iif->hcmsg.CIPValue,
+ format_ie(iif->hcmsg.BC));
+ gig_dbg(DEBUG_CMD, "icall: HLC %s",
+ format_ie(iif->hcmsg.HLC));
+ gig_dbg(DEBUG_CMD, "icall: CgPty %s",
+ format_ie(iif->hcmsg.CallingPartyNumber));
+ gig_dbg(DEBUG_CMD, "icall: CdPty %s",
+ format_ie(iif->hcmsg.CalledPartyNumber));
+
+ /* scan application list for matching listeners */
+ spin_lock_irqsave(&bcs->aplock, flags);
+ if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE) {
+ dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n",
+ __func__, bcs->ap, bcs->apconnstate);
+ bcs->ap = NULL;
+ bcs->apconnstate = APCONN_NONE;
+ }
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+ actCIPmask = 1 | (1 << iif->hcmsg.CIPValue);
+ list_for_each_entry(ap, &iif->appls, ctrlist)
+ if (actCIPmask & ap->listenCIPmask) {
+ /* build CONNECT_IND message for this application */
+ iif->hcmsg.ApplId = ap->id;
+ iif->hcmsg.Messagenumber = ap->nextMessageNumber++;
+
+ skb = alloc_skb(msgsize, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(cs->dev, "%s: out of memory\n",
+ __func__);
+ break;
+ }
+ if (capi_cmsg2message(&iif->hcmsg,
+ __skb_put(skb, msgsize))) {
+ dev_err(cs->dev, "%s: message parser failure\n",
+ __func__);
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
+
+ /* add to listeners on this B channel, update state */
+ spin_lock_irqsave(&bcs->aplock, flags);
+ ap->bcnext = bcs->ap;
+ bcs->ap = ap;
+ bcs->chstate |= CHS_NOTIFY_LL;
+ bcs->apconnstate = APCONN_SETUP;
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+
+ /* emit message */
+ capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+ }
+
+ /*
+ * Return "accept" if any listeners.
+ * Gigaset will send ALERTING.
+ * There doesn't seem to be a way to avoid this.
+ */
+ return bcs->ap ? ICALL_ACCEPT : ICALL_IGNORE;
+}
+
+/*
+ * send a DISCONNECT_IND message to an application
+ * does not sleep, clobbers the controller's hcmsg structure
+ */
+static void send_disconnect_ind(struct bc_state *bcs,
+ struct gigaset_capi_appl *ap, u16 reason)
+{
+ struct cardstate *cs = bcs->cs;
+ struct gigaset_capi_ctr *iif = cs->iif;
+ struct sk_buff *skb;
+
+ if (bcs->apconnstate == APCONN_NONE)
+ return;
+
+ capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT, CAPI_IND,
+ ap->nextMessageNumber++,
+ iif->ctr.cnr | ((bcs->channel + 1) << 8));
+ iif->hcmsg.Reason = reason;
+ skb = alloc_skb(CAPI_DISCONNECT_IND_LEN, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(cs->dev, "%s: out of memory\n", __func__);
+ return;
+ }
+ if (capi_cmsg2message(&iif->hcmsg,
+ __skb_put(skb, CAPI_DISCONNECT_IND_LEN))) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
+ capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+}
+
+/*
+ * send a DISCONNECT_B3_IND message to an application
+ * Parameters: NCCI = 1, NCPI empty, Reason_B3 = 0
+ * does not sleep, clobbers the controller's hcmsg structure
+ */
+static void send_disconnect_b3_ind(struct bc_state *bcs,
+ struct gigaset_capi_appl *ap)
+{
+ struct cardstate *cs = bcs->cs;
+ struct gigaset_capi_ctr *iif = cs->iif;
+ struct sk_buff *skb;
+
+ /* nothing to do if no logical connection active */
+ if (bcs->apconnstate < APCONN_ACTIVE)
+ return;
+ bcs->apconnstate = APCONN_SETUP;
+
+ capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
+ ap->nextMessageNumber++,
+ iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16));
+ skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(cs->dev, "%s: out of memory\n", __func__);
+ return;
+ }
+ if (capi_cmsg2message(&iif->hcmsg,
+ __skb_put(skb, CAPI_DISCONNECT_B3_IND_BASELEN))) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
+ capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+}
+
+/**
+ * gigaset_isdn_connD() - signal D channel connect
+ * @bcs: B channel descriptor structure.
+ *
+ * Called by main module at tasklet level to notify the LL that the D channel
+ * connection has been established.
+ */
+void gigaset_isdn_connD(struct bc_state *bcs)
+{
+ struct cardstate *cs = bcs->cs;
+ struct gigaset_capi_ctr *iif = cs->iif;
+ struct gigaset_capi_appl *ap;
+ struct sk_buff *skb;
+ unsigned int msgsize;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcs->aplock, flags);
+ ap = bcs->ap;
+ if (!ap) {
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+ gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
+ return;
+ }
+ if (bcs->apconnstate == APCONN_NONE) {
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+ dev_warn(cs->dev, "%s: application %u not connected\n",
+ __func__, ap->id);
+ return;
+ }
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+ while (ap->bcnext) {
+ /* this should never happen */
+ dev_warn(cs->dev, "%s: dropping extra application %u\n",
+ __func__, ap->bcnext->id);
+ send_disconnect_ind(bcs, ap->bcnext,
+ CapiCallGivenToOtherApplication);
+ ap->bcnext = ap->bcnext->bcnext;
+ }
+
+ /* prepare CONNECT_ACTIVE_IND message
+ * Note: LLC not supported by device
+ */
+ capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_CONNECT_ACTIVE, CAPI_IND,
+ ap->nextMessageNumber++,
+ iif->ctr.cnr | ((bcs->channel + 1) << 8));
+
+ /* minimum size, all structs empty */
+ msgsize = CAPI_CONNECT_ACTIVE_IND_BASELEN;
+
+ /* ToDo: set parameter: Connected number
+ * (requires ev-layer state machine extension to collect
+ * ZCON device reply)
+ */
+
+ /* build and emit CONNECT_ACTIVE_IND message */
+ skb = alloc_skb(msgsize, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(cs->dev, "%s: out of memory\n", __func__);
+ return;
+ }
+ if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
+ capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+}
+
+/**
+ * gigaset_isdn_hupD() - signal D channel hangup
+ * @bcs: B channel descriptor structure.
+ *
+ * Called by main module at tasklet level to notify the LL that the D channel
+ * connection has been shut down.
+ */
+void gigaset_isdn_hupD(struct bc_state *bcs)
+{
+ struct gigaset_capi_appl *ap;
+ unsigned long flags;
+
+ /*
+ * ToDo: pass on reason code reported by device
+ * (requires ev-layer state machine extension to collect
+ * ZCAU device reply)
+ */
+ spin_lock_irqsave(&bcs->aplock, flags);
+ while (bcs->ap != NULL) {
+ ap = bcs->ap;
+ bcs->ap = ap->bcnext;
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+ send_disconnect_b3_ind(bcs, ap);
+ send_disconnect_ind(bcs, ap, 0);
+ spin_lock_irqsave(&bcs->aplock, flags);
+ }
+ bcs->apconnstate = APCONN_NONE;
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+}
+
+/**
+ * gigaset_isdn_connB() - signal B channel connect
+ * @bcs: B channel descriptor structure.
+ *
+ * Called by main module at tasklet level to notify the LL that the B channel
+ * connection has been established.
+ */
+void gigaset_isdn_connB(struct bc_state *bcs)
+{
+ struct cardstate *cs = bcs->cs;
+ struct gigaset_capi_ctr *iif = cs->iif;
+ struct gigaset_capi_appl *ap;
+ struct sk_buff *skb;
+ unsigned long flags;
+ unsigned int msgsize;
+ u8 command;
+
+ spin_lock_irqsave(&bcs->aplock, flags);
+ ap = bcs->ap;
+ if (!ap) {
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+ gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
+ return;
+ }
+ if (!bcs->apconnstate) {
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+ dev_warn(cs->dev, "%s: application %u not connected\n",
+ __func__, ap->id);
+ return;
+ }
+
+ /*
+ * emit CONNECT_B3_ACTIVE_IND if we already got CONNECT_B3_REQ;
+ * otherwise we have to emit CONNECT_B3_IND first, and follow up with
+ * CONNECT_B3_ACTIVE_IND in reply to CONNECT_B3_RESP
+ * Parameters in both cases always: NCCI = 1, NCPI empty
+ */
+ if (bcs->apconnstate >= APCONN_ACTIVE) {
+ command = CAPI_CONNECT_B3_ACTIVE;
+ msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
+ } else {
+ command = CAPI_CONNECT_B3;
+ msgsize = CAPI_CONNECT_B3_IND_BASELEN;
+ }
+ bcs->apconnstate = APCONN_ACTIVE;
+
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+
+ while (ap->bcnext) {
+ /* this should never happen */
+ dev_warn(cs->dev, "%s: dropping extra application %u\n",
+ __func__, ap->bcnext->id);
+ send_disconnect_ind(bcs, ap->bcnext,
+ CapiCallGivenToOtherApplication);
+ ap->bcnext = ap->bcnext->bcnext;
+ }
+
+ capi_cmsg_header(&iif->hcmsg, ap->id, command, CAPI_IND,
+ ap->nextMessageNumber++,
+ iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16));
+ skb = alloc_skb(msgsize, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(cs->dev, "%s: out of memory\n", __func__);
+ return;
+ }
+ if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
+ capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+}
+
+/**
+ * gigaset_isdn_hupB() - signal B channel hangup
+ * @bcs: B channel descriptor structure.
+ *
+ * Called by main module to notify the LL that the B channel connection has
+ * been shut down.
+ */
+void gigaset_isdn_hupB(struct bc_state *bcs)
+{
+ struct gigaset_capi_appl *ap = bcs->ap;
+
+ /* ToDo: assure order of DISCONNECT_B3_IND and DISCONNECT_IND ? */
+
+ if (!ap) {
+ gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
+ return;
+ }
+
+ send_disconnect_b3_ind(bcs, ap);
+}
+
+/**
+ * gigaset_isdn_start() - signal device availability
+ * @cs: device descriptor structure.
+ *
+ * Called by main module to notify the LL that the device is available for
+ * use.
+ */
+void gigaset_isdn_start(struct cardstate *cs)
+{
+ struct gigaset_capi_ctr *iif = cs->iif;
+
+ /* fill profile data: manufacturer name */
+ strcpy(iif->ctr.manu, "Siemens");
+ /* CAPI and device version */
+ iif->ctr.version.majorversion = 2; /* CAPI 2.0 */
+ iif->ctr.version.minorversion = 0;
+ /* ToDo: check/assert cs->gotfwver? */
+ iif->ctr.version.majormanuversion = cs->fwver[0];
+ iif->ctr.version.minormanuversion = cs->fwver[1];
+ /* number of B channels supported */
+ iif->ctr.profile.nbchannel = cs->channels;
+ /* global options: internal controller, supplementary services */
+ iif->ctr.profile.goptions = 0x11;
+ /* B1 protocols: 64 kbit/s HDLC or transparent */
+ iif->ctr.profile.support1 = 0x03;
+ /* B2 protocols: transparent only */
+ /* ToDo: X.75 SLP ? */
+ iif->ctr.profile.support2 = 0x02;
+ /* B3 protocols: transparent only */
+ iif->ctr.profile.support3 = 0x01;
+ /* no serial number */
+ strcpy(iif->ctr.serial, "0");
+ capi_ctr_ready(&iif->ctr);
+}
+
+/**
+ * gigaset_isdn_stop() - signal device unavailability
+ * @cs: device descriptor structure.
+ *
+ * Called by main module to notify the LL that the device is no longer
+ * available for use.
+ */
+void gigaset_isdn_stop(struct cardstate *cs)
+{
+ struct gigaset_capi_ctr *iif = cs->iif;
+ capi_ctr_down(&iif->ctr);
+}
+
+/*
+ * kernel CAPI callback methods
+ * ============================
+ */
+
+/*
+ * register CAPI application
+ */
+static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl,
+ capi_register_params *rp)
+{
+ struct gigaset_capi_ctr *iif
+ = container_of(ctr, struct gigaset_capi_ctr, ctr);
+ struct cardstate *cs = ctr->driverdata;
+ struct gigaset_capi_appl *ap;
+
+ gig_dbg(DEBUG_CMD, "%s [%u] l3cnt=%u blkcnt=%u blklen=%u",
+ __func__, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen);
+
+ list_for_each_entry(ap, &iif->appls, ctrlist)
+ if (ap->id == appl) {
+ dev_notice(cs->dev,
+ "application %u already registered\n", appl);
+ return;
+ }
+
+ ap = kzalloc(sizeof(*ap), GFP_KERNEL);
+ if (!ap) {
+ dev_err(cs->dev, "%s: out of memory\n", __func__);
+ return;
+ }
+ ap->id = appl;
+ ap->rp = *rp;
+
+ list_add(&ap->ctrlist, &iif->appls);
+ dev_info(cs->dev, "application %u registered\n", ap->id);
+}
+
+/*
+ * remove CAPI application from channel
+ * helper function to keep indentation levels down and stay in 80 columns
+ */
+
+static inline void remove_appl_from_channel(struct bc_state *bcs,
+ struct gigaset_capi_appl *ap)
+{
+ struct cardstate *cs = bcs->cs;
+ struct gigaset_capi_appl *bcap;
+ unsigned long flags;
+ int prevconnstate;
+
+ spin_lock_irqsave(&bcs->aplock, flags);
+ bcap = bcs->ap;
+ if (bcap == NULL) {
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+ return;
+ }
+
+ /* check first application on channel */
+ if (bcap == ap) {
+ bcs->ap = ap->bcnext;
+ if (bcs->ap != NULL) {
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+ return;
+ }
+
+ /* none left, clear channel state */
+ prevconnstate = bcs->apconnstate;
+ bcs->apconnstate = APCONN_NONE;
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+
+ if (prevconnstate == APCONN_ACTIVE) {
+ dev_notice(cs->dev, "%s: hanging up channel %u\n",
+ __func__, bcs->channel);
+ gigaset_add_event(cs, &bcs->at_state,
+ EV_HUP, NULL, 0, NULL);
+ gigaset_schedule_event(cs);
+ }
+ return;
+ }
+
+ /* check remaining list */
+ do {
+ if (bcap->bcnext == ap) {
+ bcap->bcnext = bcap->bcnext->bcnext;
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+ return;
+ }
+ bcap = bcap->bcnext;
+ } while (bcap != NULL);
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+}
+
+/*
+ * release CAPI application
+ */
+static void gigaset_release_appl(struct capi_ctr *ctr, u16 appl)
+{
+ struct gigaset_capi_ctr *iif
+ = container_of(ctr, struct gigaset_capi_ctr, ctr);
+ struct cardstate *cs = iif->ctr.driverdata;
+ struct gigaset_capi_appl *ap, *tmp;
+ unsigned ch;
+
+ gig_dbg(DEBUG_CMD, "%s [%u]", __func__, appl);
+
+ list_for_each_entry_safe(ap, tmp, &iif->appls, ctrlist)
+ if (ap->id == appl) {
+ /* remove from any channels */
+ for (ch = 0; ch < cs->channels; ch++)
+ remove_appl_from_channel(&cs->bcs[ch], ap);
+
+ /* remove from registration list */
+ list_del(&ap->ctrlist);
+ kfree(ap);
+ dev_info(cs->dev, "application %u released\n", appl);
+ }
+}
+
+/*
+ * =====================================================================
+ * outgoing CAPI message handler
+ * =====================================================================
+ */
+
+/*
+ * helper function: emit reply message with given Info value
+ */
+static void send_conf(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb,
+ u16 info)
+{
+ struct cardstate *cs = iif->ctr.driverdata;
+
+ /*
+ * _CONF replies always only have NCCI and Info parameters
+ * so they'll fit into the _REQ message skb
+ */
+ capi_cmsg_answer(&iif->acmsg);
+ iif->acmsg.Info = info;
+ if (capi_cmsg2message(&iif->acmsg, skb->data)) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ __skb_trim(skb, CAPI_STDCONF_LEN);
+ dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+ capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+}
+
+/*
+ * process FACILITY_REQ message
+ */
+static void do_facility_req(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb)
+{
+ struct cardstate *cs = iif->ctr.driverdata;
+ _cmsg *cmsg = &iif->acmsg;
+ struct sk_buff *cskb;
+ u8 *pparam;
+ unsigned int msgsize = CAPI_FACILITY_CONF_BASELEN;
+ u16 function, info;
+ static u8 confparam[10]; /* max. 9 octets + length byte */
+
+ /* decode message */
+ if (capi_message2cmsg(cmsg, skb->data)) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, cmsg);
+
+ /*
+ * Facility Request Parameter is not decoded by capi_message2cmsg()
+ * encoding depends on Facility Selector
+ */
+ switch (cmsg->FacilitySelector) {
+ case CAPI_FACILITY_DTMF: /* ToDo */
+ info = CapiFacilityNotSupported;
+ confparam[0] = 2; /* length */
+ /* DTMF information: Unknown DTMF request */
+ capimsg_setu16(confparam, 1, 2);
+ break;
+
+ case CAPI_FACILITY_V42BIS: /* not supported */
+ info = CapiFacilityNotSupported;
+ confparam[0] = 2; /* length */
+ /* V.42 bis information: not available */
+ capimsg_setu16(confparam, 1, 1);
+ break;
+
+ case CAPI_FACILITY_SUPPSVC:
+ /* decode Function parameter */
+ pparam = cmsg->FacilityRequestParameter;
+ if (pparam == NULL || pparam[0] < 2) {
+ dev_notice(cs->dev, "%s: %s missing\n", "FACILITY_REQ",
+ "Facility Request Parameter");
+ send_conf(iif, ap, skb, CapiIllMessageParmCoding);
+ return;
+ }
+ function = CAPIMSG_U16(pparam, 1);
+ switch (function) {
+ case CAPI_SUPPSVC_GETSUPPORTED:
+ info = CapiSuccess;
+ /* Supplementary Service specific parameter */
+ confparam[3] = 6; /* length */
+ /* Supplementary services info: Success */
+ capimsg_setu16(confparam, 4, CapiSuccess);
+ /* Supported Services: none */
+ capimsg_setu32(confparam, 6, 0);
+ break;
+ case CAPI_SUPPSVC_LISTEN:
+ if (pparam[0] < 7 || pparam[3] < 4) {
+ dev_notice(cs->dev, "%s: %s missing\n",
+ "FACILITY_REQ", "Notification Mask");
+ send_conf(iif, ap, skb,
+ CapiIllMessageParmCoding);
+ return;
+ }
+ if (CAPIMSG_U32(pparam, 4) != 0) {
+ dev_notice(cs->dev,
+ "%s: unsupported supplementary service notification mask 0x%x\n",
+ "FACILITY_REQ", CAPIMSG_U32(pparam, 4));
+ info = CapiFacilitySpecificFunctionNotSupported;
+ confparam[3] = 2; /* length */
+ capimsg_setu16(confparam, 4,
+ CapiSupplementaryServiceNotSupported);
+ break;
+ }
+ info = CapiSuccess;
+ confparam[3] = 2; /* length */
+ capimsg_setu16(confparam, 4, CapiSuccess);
+ break;
+
+ /* ToDo: add supported services */
+
+ default:
+ dev_notice(cs->dev,
+ "%s: unsupported supplementary service function 0x%04x\n",
+ "FACILITY_REQ", function);
+ info = CapiFacilitySpecificFunctionNotSupported;
+ /* Supplementary Service specific parameter */
+ confparam[3] = 2; /* length */
+ /* Supplementary services info: not supported */
+ capimsg_setu16(confparam, 4,
+ CapiSupplementaryServiceNotSupported);
+ }
+
+ /* Facility confirmation parameter */
+ confparam[0] = confparam[3] + 3; /* total length */
+ /* Function: copy from _REQ message */
+ capimsg_setu16(confparam, 1, function);
+ /* Supplementary Service specific parameter already set above */
+ break;
+
+ case CAPI_FACILITY_WAKEUP: /* ToDo */
+ info = CapiFacilityNotSupported;
+ confparam[0] = 2; /* length */
+ /* Number of accepted awake request parameters: 0 */
+ capimsg_setu16(confparam, 1, 0);
+ break;
+
+ default:
+ info = CapiFacilityNotSupported;
+ confparam[0] = 0; /* empty struct */
+ }
+
+ /* send FACILITY_CONF with given Info and confirmation parameter */
+ dev_kfree_skb_any(skb);
+ capi_cmsg_answer(cmsg);
+ cmsg->Info = info;
+ cmsg->FacilityConfirmationParameter = confparam;
+ msgsize += confparam[0]; /* length */
+ cskb = alloc_skb(msgsize, GFP_ATOMIC);
+ if (!cskb) {
+ dev_err(cs->dev, "%s: out of memory\n", __func__);
+ return;
+ }
+ if (capi_cmsg2message(cmsg, __skb_put(cskb, msgsize))) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(cskb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, cmsg);
+ capi_ctr_handle_message(&iif->ctr, ap->id, cskb);
+}
+
+
+/*
+ * process LISTEN_REQ message
+ * just store the masks in the application data structure
+ */
+static void do_listen_req(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb)
+{
+ struct cardstate *cs = iif->ctr.driverdata;
+
+ /* decode message */
+ if (capi_message2cmsg(&iif->acmsg, skb->data)) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+
+ /* store listening parameters */
+ ap->listenInfoMask = iif->acmsg.InfoMask;
+ ap->listenCIPmask = iif->acmsg.CIPmask;
+ send_conf(iif, ap, skb, CapiSuccess);
+}
+
+/*
+ * process ALERT_REQ message
+ * nothing to do, Gigaset always alerts anyway
+ */
+static void do_alert_req(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb)
+{
+ struct cardstate *cs = iif->ctr.driverdata;
+
+ /* decode message */
+ if (capi_message2cmsg(&iif->acmsg, skb->data)) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+ send_conf(iif, ap, skb, CapiAlertAlreadySent);
+}
+
+/*
+ * process CONNECT_REQ message
+ * allocate a B channel, prepare dial commands, queue a DIAL event,
+ * emit CONNECT_CONF reply
+ */
+static void do_connect_req(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb)
+{
+ struct cardstate *cs = iif->ctr.driverdata;
+ _cmsg *cmsg = &iif->acmsg;
+ struct bc_state *bcs;
+ char **commands;
+ char *s;
+ u8 *pp;
+ unsigned long flags;
+ int i, l, lbc, lhlc;
+ u16 info;
+
+ /* decode message */
+ if (capi_message2cmsg(cmsg, skb->data)) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, cmsg);
+
+ /* get free B channel & construct PLCI */
+ bcs = gigaset_get_free_channel(cs);
+ if (!bcs) {
+ dev_notice(cs->dev, "%s: no B channel available\n",
+ "CONNECT_REQ");
+ send_conf(iif, ap, skb, CapiNoPlciAvailable);
+ return;
+ }
+ spin_lock_irqsave(&bcs->aplock, flags);
+ if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE)
+ dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n",
+ __func__, bcs->ap, bcs->apconnstate);
+ ap->bcnext = NULL;
+ bcs->ap = ap;
+ bcs->apconnstate = APCONN_SETUP;
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+
+ bcs->rx_bufsize = ap->rp.datablklen;
+ dev_kfree_skb(bcs->rx_skb);
+ gigaset_new_rx_skb(bcs);
+ cmsg->adr.adrPLCI |= (bcs->channel + 1) << 8;
+
+ /* build command table */
+ commands = kcalloc(AT_NUM, sizeof(*commands), GFP_KERNEL);
+ if (!commands)
+ goto oom;
+
+ /* encode parameter: Called party number */
+ pp = cmsg->CalledPartyNumber;
+ if (pp == NULL || *pp == 0) {
+ dev_notice(cs->dev, "%s: %s missing\n",
+ "CONNECT_REQ", "Called party number");
+ info = CapiIllMessageParmCoding;
+ goto error;
+ }
+ l = *pp++;
+ /* check type of number/numbering plan byte */
+ switch (*pp) {
+ case 0x80: /* unknown type / unknown numbering plan */
+ case 0x81: /* unknown type / ISDN/Telephony numbering plan */
+ break;
+ default: /* others: warn about potential misinterpretation */
+ dev_notice(cs->dev, "%s: %s type/plan 0x%02x unsupported\n",
+ "CONNECT_REQ", "Called party number", *pp);
+ }
+ pp++;
+ l--;
+ /* translate "**" internal call prefix to CTP value */
+ if (l >= 2 && pp[0] == '*' && pp[1] == '*') {
+ s = "^SCTP=0\r";
+ pp += 2;
+ l -= 2;
+ } else {
+ s = "^SCTP=1\r";
+ }
+ commands[AT_TYPE] = kstrdup(s, GFP_KERNEL);
+ if (!commands[AT_TYPE])
+ goto oom;
+ commands[AT_DIAL] = kmalloc(l + 3, GFP_KERNEL);
+ if (!commands[AT_DIAL])
+ goto oom;
+ snprintf(commands[AT_DIAL], l + 3, "D%.*s\r", l, pp);
+
+ /* encode parameter: Calling party number */
+ pp = cmsg->CallingPartyNumber;
+ if (pp != NULL && *pp > 0) {
+ l = *pp++;
+
+ /* check type of number/numbering plan byte */
+ /* ToDo: allow for/handle Ext=1? */
+ switch (*pp) {
+ case 0x00: /* unknown type / unknown numbering plan */
+ case 0x01: /* unknown type / ISDN/Telephony num. plan */
+ break;
+ default:
+ dev_notice(cs->dev,
+ "%s: %s type/plan 0x%02x unsupported\n",
+ "CONNECT_REQ", "Calling party number", *pp);
+ }
+ pp++;
+ l--;
+
+ /* check presentation indicator */
+ if (!l) {
+ dev_notice(cs->dev, "%s: %s IE truncated\n",
+ "CONNECT_REQ", "Calling party number");
+ info = CapiIllMessageParmCoding;
+ goto error;
+ }
+ switch (*pp & 0xfc) { /* ignore Screening indicator */
+ case 0x80: /* Presentation allowed */
+ s = "^SCLIP=1\r";
+ break;
+ case 0xa0: /* Presentation restricted */
+ s = "^SCLIP=0\r";
+ break;
+ default:
+ dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
+ "CONNECT_REQ",
+ "Presentation/Screening indicator",
+ *pp);
+ s = "^SCLIP=1\r";
+ }
+ commands[AT_CLIP] = kstrdup(s, GFP_KERNEL);
+ if (!commands[AT_CLIP])
+ goto oom;
+ pp++;
+ l--;
+
+ if (l) {
+ /* number */
+ commands[AT_MSN] = kmalloc(l + 8, GFP_KERNEL);
+ if (!commands[AT_MSN])
+ goto oom;
+ snprintf(commands[AT_MSN], l + 8, "^SMSN=%*s\r", l, pp);
+ }
+ }
+
+ /* check parameter: CIP Value */
+ if (cmsg->CIPValue >= ARRAY_SIZE(cip2bchlc) ||
+ (cmsg->CIPValue > 0 && cip2bchlc[cmsg->CIPValue].bc == NULL)) {
+ dev_notice(cs->dev, "%s: unknown CIP value %d\n",
+ "CONNECT_REQ", cmsg->CIPValue);
+ info = CapiCipValueUnknown;
+ goto error;
+ }
+
+ /*
+ * check/encode parameters: BC & HLC
+ * must be encoded together as device doesn't accept HLC separately
+ * explicit parameters override values derived from CIP
+ */
+
+ /* determine lengths */
+ if (cmsg->BC && cmsg->BC[0]) /* BC specified explicitly */
+ lbc = 2 * cmsg->BC[0];
+ else if (cip2bchlc[cmsg->CIPValue].bc) /* BC derived from CIP */
+ lbc = strlen(cip2bchlc[cmsg->CIPValue].bc);
+ else /* no BC */
+ lbc = 0;
+ if (cmsg->HLC && cmsg->HLC[0]) /* HLC specified explicitly */
+ lhlc = 2 * cmsg->HLC[0];
+ else if (cip2bchlc[cmsg->CIPValue].hlc) /* HLC derived from CIP */
+ lhlc = strlen(cip2bchlc[cmsg->CIPValue].hlc);
+ else /* no HLC */
+ lhlc = 0;
+
+ if (lbc) {
+ /* have BC: allocate and assemble command string */
+ l = lbc + 7; /* "^SBC=" + value + "\r" + null byte */
+ if (lhlc)
+ l += lhlc + 7; /* ";^SHLC=" + value */
+ commands[AT_BC] = kmalloc(l, GFP_KERNEL);
+ if (!commands[AT_BC])
+ goto oom;
+ strcpy(commands[AT_BC], "^SBC=");
+ if (cmsg->BC && cmsg->BC[0]) /* BC specified explicitly */
+ decode_ie(cmsg->BC, commands[AT_BC] + 5);
+ else /* BC derived from CIP */
+ strcpy(commands[AT_BC] + 5,
+ cip2bchlc[cmsg->CIPValue].bc);
+ if (lhlc) {
+ strcpy(commands[AT_BC] + lbc + 5, ";^SHLC=");
+ if (cmsg->HLC && cmsg->HLC[0])
+ /* HLC specified explicitly */
+ decode_ie(cmsg->HLC,
+ commands[AT_BC] + lbc + 12);
+ else /* HLC derived from CIP */
+ strcpy(commands[AT_BC] + lbc + 12,
+ cip2bchlc[cmsg->CIPValue].hlc);
+ }
+ strcpy(commands[AT_BC] + l - 2, "\r");
+ } else {
+ /* no BC */
+ if (lhlc) {
+ dev_notice(cs->dev, "%s: cannot set HLC without BC\n",
+ "CONNECT_REQ");
+ info = CapiIllMessageParmCoding; /* ? */
+ goto error;
+ }
+ }
+
+ /* check/encode parameter: B Protocol */
+ if (cmsg->BProtocol == CAPI_DEFAULT) {
+ bcs->proto2 = L2_HDLC;
+ dev_warn(cs->dev,
+ "B2 Protocol X.75 SLP unsupported, using Transparent\n");
+ } else {
+ switch (cmsg->B1protocol) {
+ case 0:
+ bcs->proto2 = L2_HDLC;
+ break;
+ case 1:
+ bcs->proto2 = L2_VOICE;
+ break;
+ default:
+ dev_warn(cs->dev,
+ "B1 Protocol %u unsupported, using Transparent\n",
+ cmsg->B1protocol);
+ bcs->proto2 = L2_VOICE;
+ }
+ if (cmsg->B2protocol != 1)
+ dev_warn(cs->dev,
+ "B2 Protocol %u unsupported, using Transparent\n",
+ cmsg->B2protocol);
+ if (cmsg->B3protocol != 0)
+ dev_warn(cs->dev,
+ "B3 Protocol %u unsupported, using Transparent\n",
+ cmsg->B3protocol);
+ ignore_cstruct_param(cs, cmsg->B1configuration,
+ "CONNECT_REQ", "B1 Configuration");
+ ignore_cstruct_param(cs, cmsg->B2configuration,
+ "CONNECT_REQ", "B2 Configuration");
+ ignore_cstruct_param(cs, cmsg->B3configuration,
+ "CONNECT_REQ", "B3 Configuration");
+ }
+ commands[AT_PROTO] = kmalloc(9, GFP_KERNEL);
+ if (!commands[AT_PROTO])
+ goto oom;
+ snprintf(commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
+
+ /* ToDo: check/encode remaining parameters */
+ ignore_cstruct_param(cs, cmsg->CalledPartySubaddress,
+ "CONNECT_REQ", "Called pty subaddr");
+ ignore_cstruct_param(cs, cmsg->CallingPartySubaddress,
+ "CONNECT_REQ", "Calling pty subaddr");
+ ignore_cstruct_param(cs, cmsg->LLC,
+ "CONNECT_REQ", "LLC");
+ if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
+ ignore_cstruct_param(cs, cmsg->BChannelinformation,
+ "CONNECT_REQ", "B Channel Information");
+ ignore_cstruct_param(cs, cmsg->Keypadfacility,
+ "CONNECT_REQ", "Keypad Facility");
+ ignore_cstruct_param(cs, cmsg->Useruserdata,
+ "CONNECT_REQ", "User-User Data");
+ ignore_cstruct_param(cs, cmsg->Facilitydataarray,
+ "CONNECT_REQ", "Facility Data Array");
+ }
+
+ /* encode parameter: B channel to use */
+ commands[AT_ISO] = kmalloc(9, GFP_KERNEL);
+ if (!commands[AT_ISO])
+ goto oom;
+ snprintf(commands[AT_ISO], 9, "^SISO=%u\r",
+ (unsigned) bcs->channel + 1);
+
+ /* queue & schedule EV_DIAL event */
+ if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, commands,
+ bcs->at_state.seq_index, NULL)) {
+ info = CAPI_MSGOSRESOURCEERR;
+ goto error;
+ }
+ gigaset_schedule_event(cs);
+ send_conf(iif, ap, skb, CapiSuccess);
+ return;
+
+oom:
+ dev_err(cs->dev, "%s: out of memory\n", __func__);
+ info = CAPI_MSGOSRESOURCEERR;
+error:
+ if (commands)
+ for (i = 0; i < AT_NUM; i++)
+ kfree(commands[i]);
+ kfree(commands);
+ gigaset_free_channel(bcs);
+ send_conf(iif, ap, skb, info);
+}
+
+/*
+ * process CONNECT_RESP message
+ * checks protocol parameters and queues an ACCEPT or HUP event
+ */
+static void do_connect_resp(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb)
+{
+ struct cardstate *cs = iif->ctr.driverdata;
+ _cmsg *cmsg = &iif->acmsg;
+ struct bc_state *bcs;
+ struct gigaset_capi_appl *oap;
+ unsigned long flags;
+ int channel;
+
+ /* decode message */
+ if (capi_message2cmsg(cmsg, skb->data)) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, cmsg);
+ dev_kfree_skb_any(skb);
+
+ /* extract and check channel number from PLCI */
+ channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
+ if (!channel || channel > cs->channels) {
+ dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
+ "CONNECT_RESP", "PLCI", cmsg->adr.adrPLCI);
+ return;
+ }
+ bcs = cs->bcs + channel - 1;
+
+ switch (cmsg->Reject) {
+ case 0: /* Accept */
+ /* drop all competing applications, keep only this one */
+ spin_lock_irqsave(&bcs->aplock, flags);
+ while (bcs->ap != NULL) {
+ oap = bcs->ap;
+ bcs->ap = oap->bcnext;
+ if (oap != ap) {
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+ send_disconnect_ind(bcs, oap,
+ CapiCallGivenToOtherApplication);
+ spin_lock_irqsave(&bcs->aplock, flags);
+ }
+ }
+ ap->bcnext = NULL;
+ bcs->ap = ap;
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+
+ bcs->rx_bufsize = ap->rp.datablklen;
+ dev_kfree_skb(bcs->rx_skb);
+ gigaset_new_rx_skb(bcs);
+ bcs->chstate |= CHS_NOTIFY_LL;
+
+ /* check/encode B channel protocol */
+ if (cmsg->BProtocol == CAPI_DEFAULT) {
+ bcs->proto2 = L2_HDLC;
+ dev_warn(cs->dev,
+ "B2 Protocol X.75 SLP unsupported, using Transparent\n");
+ } else {
+ switch (cmsg->B1protocol) {
+ case 0:
+ bcs->proto2 = L2_HDLC;
+ break;
+ case 1:
+ bcs->proto2 = L2_VOICE;
+ break;
+ default:
+ dev_warn(cs->dev,
+ "B1 Protocol %u unsupported, using Transparent\n",
+ cmsg->B1protocol);
+ bcs->proto2 = L2_VOICE;
+ }
+ if (cmsg->B2protocol != 1)
+ dev_warn(cs->dev,
+ "B2 Protocol %u unsupported, using Transparent\n",
+ cmsg->B2protocol);
+ if (cmsg->B3protocol != 0)
+ dev_warn(cs->dev,
+ "B3 Protocol %u unsupported, using Transparent\n",
+ cmsg->B3protocol);
+ ignore_cstruct_param(cs, cmsg->B1configuration,
+ "CONNECT_RESP", "B1 Configuration");
+ ignore_cstruct_param(cs, cmsg->B2configuration,
+ "CONNECT_RESP", "B2 Configuration");
+ ignore_cstruct_param(cs, cmsg->B3configuration,
+ "CONNECT_RESP", "B3 Configuration");
+ }
+
+ /* ToDo: check/encode remaining parameters */
+ ignore_cstruct_param(cs, cmsg->ConnectedNumber,
+ "CONNECT_RESP", "Connected Number");
+ ignore_cstruct_param(cs, cmsg->ConnectedSubaddress,
+ "CONNECT_RESP", "Connected Subaddress");
+ ignore_cstruct_param(cs, cmsg->LLC,
+ "CONNECT_RESP", "LLC");
+ if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
+ ignore_cstruct_param(cs, cmsg->BChannelinformation,
+ "CONNECT_RESP", "BChannel Information");
+ ignore_cstruct_param(cs, cmsg->Keypadfacility,
+ "CONNECT_RESP", "Keypad Facility");
+ ignore_cstruct_param(cs, cmsg->Useruserdata,
+ "CONNECT_RESP", "User-User Data");
+ ignore_cstruct_param(cs, cmsg->Facilitydataarray,
+ "CONNECT_RESP", "Facility Data Array");
+ }
+
+ /* Accept call */
+ if (!gigaset_add_event(cs, &cs->bcs[channel - 1].at_state,
+ EV_ACCEPT, NULL, 0, NULL))
+ return;
+ gigaset_schedule_event(cs);
+ return;
+
+ case 1: /* Ignore */
+ /* send DISCONNECT_IND to this application */
+ send_disconnect_ind(bcs, ap, 0);
+
+ /* remove it from the list of listening apps */
+ spin_lock_irqsave(&bcs->aplock, flags);
+ if (bcs->ap == ap) {
+ bcs->ap = ap->bcnext;
+ if (bcs->ap == NULL) {
+ /* last one: stop ev-layer hupD notifications */
+ bcs->apconnstate = APCONN_NONE;
+ bcs->chstate &= ~CHS_NOTIFY_LL;
+ }
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+ return;
+ }
+ for (oap = bcs->ap; oap != NULL; oap = oap->bcnext) {
+ if (oap->bcnext == ap) {
+ oap->bcnext = oap->bcnext->bcnext;
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+ dev_err(cs->dev, "%s: application %u not found\n",
+ __func__, ap->id);
+ return;
+
+ default: /* Reject */
+ /* drop all competing applications, keep only this one */
+ spin_lock_irqsave(&bcs->aplock, flags);
+ while (bcs->ap != NULL) {
+ oap = bcs->ap;
+ bcs->ap = oap->bcnext;
+ if (oap != ap) {
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+ send_disconnect_ind(bcs, oap,
+ CapiCallGivenToOtherApplication);
+ spin_lock_irqsave(&bcs->aplock, flags);
+ }
+ }
+ ap->bcnext = NULL;
+ bcs->ap = ap;
+ spin_unlock_irqrestore(&bcs->aplock, flags);
+
+ /* reject call - will trigger DISCONNECT_IND for this app */
+ dev_info(cs->dev, "%s: Reject=%x\n",
+ "CONNECT_RESP", cmsg->Reject);
+ if (!gigaset_add_event(cs, &cs->bcs[channel - 1].at_state,
+ EV_HUP, NULL, 0, NULL))
+ return;
+ gigaset_schedule_event(cs);
+ return;
+ }
+}
+
+/*
+ * process CONNECT_B3_REQ message
+ * build NCCI and emit CONNECT_B3_CONF reply
+ */
+static void do_connect_b3_req(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb)
+{
+ struct cardstate *cs = iif->ctr.driverdata;
+ _cmsg *cmsg = &iif->acmsg;
+ struct bc_state *bcs;
+ int channel;
+
+ /* decode message */
+ if (capi_message2cmsg(cmsg, skb->data)) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, cmsg);
+
+ /* extract and check channel number from PLCI */
+ channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
+ if (!channel || channel > cs->channels) {
+ dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
+ "CONNECT_B3_REQ", "PLCI", cmsg->adr.adrPLCI);
+ send_conf(iif, ap, skb, CapiIllContrPlciNcci);
+ return;
+ }
+ bcs = &cs->bcs[channel - 1];
+
+ /* mark logical connection active */
+ bcs->apconnstate = APCONN_ACTIVE;
+
+ /* build NCCI: always 1 (one B3 connection only) */
+ cmsg->adr.adrNCCI |= 1 << 16;
+
+ /* NCPI parameter: not applicable for B3 Transparent */
+ ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI");
+ send_conf(iif, ap, skb,
+ (cmsg->NCPI && cmsg->NCPI[0]) ?
+ CapiNcpiNotSupportedByProtocol : CapiSuccess);
+}
+
+/*
+ * process CONNECT_B3_RESP message
+ * Depending on the Reject parameter, either emit CONNECT_B3_ACTIVE_IND
+ * or queue EV_HUP and emit DISCONNECT_B3_IND.
+ * The emitted message is always shorter than the received one,
+ * allowing to reuse the skb.
+ */
+static void do_connect_b3_resp(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb)
+{
+ struct cardstate *cs = iif->ctr.driverdata;
+ _cmsg *cmsg = &iif->acmsg;
+ struct bc_state *bcs;
+ int channel;
+ unsigned int msgsize;
+ u8 command;
+
+ /* decode message */
+ if (capi_message2cmsg(cmsg, skb->data)) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, cmsg);
+
+ /* extract and check channel number and NCCI */
+ channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
+ if (!channel || channel > cs->channels ||
+ ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
+ dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
+ "CONNECT_B3_RESP", "NCCI", cmsg->adr.adrNCCI);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ bcs = &cs->bcs[channel - 1];
+
+ if (cmsg->Reject) {
+ /* Reject: clear B3 connect received flag */
+ bcs->apconnstate = APCONN_SETUP;
+
+ /* trigger hangup, causing eventual DISCONNECT_IND */
+ if (!gigaset_add_event(cs, &bcs->at_state,
+ EV_HUP, NULL, 0, NULL)) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ gigaset_schedule_event(cs);
+
+ /* emit DISCONNECT_B3_IND */
+ command = CAPI_DISCONNECT_B3;
+ msgsize = CAPI_DISCONNECT_B3_IND_BASELEN;
+ } else {
+ /*
+ * Accept: emit CONNECT_B3_ACTIVE_IND immediately, as
+ * we only send CONNECT_B3_IND if the B channel is up
+ */
+ command = CAPI_CONNECT_B3_ACTIVE;
+ msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
+ }
+ capi_cmsg_header(cmsg, ap->id, command, CAPI_IND,
+ ap->nextMessageNumber++, cmsg->adr.adrNCCI);
+ __skb_trim(skb, msgsize);
+ if (capi_cmsg2message(cmsg, skb->data)) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, cmsg);
+ capi_ctr_handle_message(&iif->ctr, ap->id, skb);
+}
+
+/*
+ * process DISCONNECT_REQ message
+ * schedule EV_HUP and emit DISCONNECT_B3_IND if necessary,
+ * emit DISCONNECT_CONF reply
+ */
+static void do_disconnect_req(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb)
+{
+ struct cardstate *cs = iif->ctr.driverdata;
+ _cmsg *cmsg = &iif->acmsg;
+ struct bc_state *bcs;
+ _cmsg *b3cmsg;
+ struct sk_buff *b3skb;
+ int channel;
+
+ /* decode message */
+ if (capi_message2cmsg(cmsg, skb->data)) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, cmsg);
+
+ /* extract and check channel number from PLCI */
+ channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
+ if (!channel || channel > cs->channels) {
+ dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
+ "DISCONNECT_REQ", "PLCI", cmsg->adr.adrPLCI);
+ send_conf(iif, ap, skb, CapiIllContrPlciNcci);
+ return;
+ }
+ bcs = cs->bcs + channel - 1;
+
+ /* ToDo: process parameter: Additional info */
+ if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
+ ignore_cstruct_param(cs, cmsg->BChannelinformation,
+ "DISCONNECT_REQ", "B Channel Information");
+ ignore_cstruct_param(cs, cmsg->Keypadfacility,
+ "DISCONNECT_REQ", "Keypad Facility");
+ ignore_cstruct_param(cs, cmsg->Useruserdata,
+ "DISCONNECT_REQ", "User-User Data");
+ ignore_cstruct_param(cs, cmsg->Facilitydataarray,
+ "DISCONNECT_REQ", "Facility Data Array");
+ }
+
+ /* skip if DISCONNECT_IND already sent */
+ if (!bcs->apconnstate)
+ return;
+
+ /* check for active logical connection */
+ if (bcs->apconnstate >= APCONN_ACTIVE) {
+ /* clear it */
+ bcs->apconnstate = APCONN_SETUP;
+
+ /*
+ * emit DISCONNECT_B3_IND with cause 0x3301
+ * use separate cmsg structure, as the content of iif->acmsg
+ * is still needed for creating the _CONF message
+ */
+ b3cmsg = kmalloc(sizeof(*b3cmsg), GFP_KERNEL);
+ if (!b3cmsg) {
+ dev_err(cs->dev, "%s: out of memory\n", __func__);
+ send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
+ return;
+ }
+ capi_cmsg_header(b3cmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
+ ap->nextMessageNumber++,
+ cmsg->adr.adrPLCI | (1 << 16));
+ b3cmsg->Reason_B3 = CapiProtocolErrorLayer1;
+ b3skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_KERNEL);
+ if (b3skb == NULL) {
+ dev_err(cs->dev, "%s: out of memory\n", __func__);
+ send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
+ kfree(b3cmsg);
+ return;
+ }
+ if (capi_cmsg2message(b3cmsg,
+ __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN))) {
+ dev_err(cs->dev, "%s: message parser failure\n",
+ __func__);
+ kfree(b3cmsg);
+ dev_kfree_skb_any(b3skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, b3cmsg);
+ kfree(b3cmsg);
+ capi_ctr_handle_message(&iif->ctr, ap->id, b3skb);
+ }
+
+ /* trigger hangup, causing eventual DISCONNECT_IND */
+ if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
+ send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
+ return;
+ }
+ gigaset_schedule_event(cs);
+
+ /* emit reply */
+ send_conf(iif, ap, skb, CapiSuccess);
+}
+
+/*
+ * process DISCONNECT_B3_REQ message
+ * schedule EV_HUP and emit DISCONNECT_B3_CONF reply
+ */
+static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb)
+{
+ struct cardstate *cs = iif->ctr.driverdata;
+ _cmsg *cmsg = &iif->acmsg;
+ struct bc_state *bcs;
+ int channel;
+
+ /* decode message */
+ if (capi_message2cmsg(cmsg, skb->data)) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, cmsg);
+
+ /* extract and check channel number and NCCI */
+ channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
+ if (!channel || channel > cs->channels ||
+ ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
+ dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
+ "DISCONNECT_B3_REQ", "NCCI", cmsg->adr.adrNCCI);
+ send_conf(iif, ap, skb, CapiIllContrPlciNcci);
+ return;
+ }
+ bcs = &cs->bcs[channel - 1];
+
+ /* reject if logical connection not active */
+ if (bcs->apconnstate < APCONN_ACTIVE) {
+ send_conf(iif, ap, skb,
+ CapiMessageNotSupportedInCurrentState);
+ return;
+ }
+
+ /* trigger hangup, causing eventual DISCONNECT_B3_IND */
+ if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
+ send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
+ return;
+ }
+ gigaset_schedule_event(cs);
+
+ /* NCPI parameter: not applicable for B3 Transparent */
+ ignore_cstruct_param(cs, cmsg->NCPI,
+ "DISCONNECT_B3_REQ", "NCPI");
+ send_conf(iif, ap, skb,
+ (cmsg->NCPI && cmsg->NCPI[0]) ?
+ CapiNcpiNotSupportedByProtocol : CapiSuccess);
+}
+
+/*
+ * process DATA_B3_REQ message
+ */
+static void do_data_b3_req(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb)
+{
+ struct cardstate *cs = iif->ctr.driverdata;
+ struct bc_state *bcs;
+ int channel = CAPIMSG_PLCI_PART(skb->data);
+ u16 ncci = CAPIMSG_NCCI_PART(skb->data);
+ u16 msglen = CAPIMSG_LEN(skb->data);
+ u16 datalen = CAPIMSG_DATALEN(skb->data);
+ u16 flags = CAPIMSG_FLAGS(skb->data);
+ u16 msgid = CAPIMSG_MSGID(skb->data);
+ u16 handle = CAPIMSG_HANDLE_REQ(skb->data);
+
+ /* frequent message, avoid _cmsg overhead */
+ dump_rawmsg(DEBUG_MCMD, __func__, skb->data);
+
+ /* check parameters */
+ if (channel == 0 || channel > cs->channels || ncci != 1) {
+ dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
+ "DATA_B3_REQ", "NCCI", CAPIMSG_NCCI(skb->data));
+ send_conf(iif, ap, skb, CapiIllContrPlciNcci);
+ return;
+ }
+ bcs = &cs->bcs[channel - 1];
+ if (msglen != CAPI_DATA_B3_REQ_LEN && msglen != CAPI_DATA_B3_REQ_LEN64)
+ dev_notice(cs->dev, "%s: unexpected length %d\n",
+ "DATA_B3_REQ", msglen);
+ if (msglen + datalen != skb->len)
+ dev_notice(cs->dev, "%s: length mismatch (%d+%d!=%d)\n",
+ "DATA_B3_REQ", msglen, datalen, skb->len);
+ if (msglen + datalen > skb->len) {
+ /* message too short for announced data length */
+ send_conf(iif, ap, skb, CapiIllMessageParmCoding); /* ? */
+ return;
+ }
+ if (flags & CAPI_FLAGS_RESERVED) {
+ dev_notice(cs->dev, "%s: reserved flags set (%x)\n",
+ "DATA_B3_REQ", flags);
+ send_conf(iif, ap, skb, CapiIllMessageParmCoding);
+ return;
+ }
+
+ /* reject if logical connection not active */
+ if (bcs->apconnstate < APCONN_ACTIVE) {
+ send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
+ return;
+ }
+
+ /* pull CAPI message into link layer header */
+ skb_reset_mac_header(skb);
+ skb->mac_len = msglen;
+ skb_pull(skb, msglen);
+
+ /* pass to device-specific module */
+ if (cs->ops->send_skb(bcs, skb) < 0) {
+ send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
+ return;
+ }
+
+ /*
+ * DATA_B3_CONF will be sent by gigaset_skb_sent() only if "delivery
+ * confirmation" bit is set; otherwise we have to send it now
+ */
+ if (!(flags & CAPI_FLAGS_DELIVERY_CONFIRMATION))
+ send_data_b3_conf(cs, &iif->ctr, ap->id, msgid, channel, handle,
+ flags ? CapiFlagsNotSupportedByProtocol
+ : CAPI_NOERROR);
+}
+
+/*
+ * process RESET_B3_REQ message
+ * just always reply "not supported by current protocol"
+ */
+static void do_reset_b3_req(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb)
+{
+ struct cardstate *cs = iif->ctr.driverdata;
+
+ /* decode message */
+ if (capi_message2cmsg(&iif->acmsg, skb->data)) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+ send_conf(iif, ap, skb,
+ CapiResetProcedureNotSupportedByCurrentProtocol);
+}
+
+/*
+ * unsupported CAPI message handler
+ */
+static void do_unsupported(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb)
+{
+ struct cardstate *cs = iif->ctr.driverdata;
+
+ /* decode message */
+ if (capi_message2cmsg(&iif->acmsg, skb->data)) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+ send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
+}
+
+/*
+ * CAPI message handler: no-op
+ */
+static void do_nothing(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb)
+{
+ struct cardstate *cs = iif->ctr.driverdata;
+
+ /* decode message */
+ if (capi_message2cmsg(&iif->acmsg, skb->data)) {
+ dev_err(cs->dev, "%s: message parser failure\n", __func__);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+ dev_kfree_skb_any(skb);
+}
+
+static void do_data_b3_resp(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb)
+{
+ dump_rawmsg(DEBUG_MCMD, __func__, skb->data);
+ dev_kfree_skb_any(skb);
+}
+
+/* table of outgoing CAPI message handlers with lookup function */
+typedef void (*capi_send_handler_t)(struct gigaset_capi_ctr *,
+ struct gigaset_capi_appl *,
+ struct sk_buff *);
+
+static struct {
+ u16 cmd;
+ capi_send_handler_t handler;
+} capi_send_handler_table[] = {
+ /* most frequent messages first for faster lookup */
+ { CAPI_DATA_B3_REQ, do_data_b3_req },
+ { CAPI_DATA_B3_RESP, do_data_b3_resp },
+
+ { CAPI_ALERT_REQ, do_alert_req },
+ { CAPI_CONNECT_ACTIVE_RESP, do_nothing },
+ { CAPI_CONNECT_B3_ACTIVE_RESP, do_nothing },
+ { CAPI_CONNECT_B3_REQ, do_connect_b3_req },
+ { CAPI_CONNECT_B3_RESP, do_connect_b3_resp },
+ { CAPI_CONNECT_B3_T90_ACTIVE_RESP, do_nothing },
+ { CAPI_CONNECT_REQ, do_connect_req },
+ { CAPI_CONNECT_RESP, do_connect_resp },
+ { CAPI_DISCONNECT_B3_REQ, do_disconnect_b3_req },
+ { CAPI_DISCONNECT_B3_RESP, do_nothing },
+ { CAPI_DISCONNECT_REQ, do_disconnect_req },
+ { CAPI_DISCONNECT_RESP, do_nothing },
+ { CAPI_FACILITY_REQ, do_facility_req },
+ { CAPI_FACILITY_RESP, do_nothing },
+ { CAPI_LISTEN_REQ, do_listen_req },
+ { CAPI_SELECT_B_PROTOCOL_REQ, do_unsupported },
+ { CAPI_RESET_B3_REQ, do_reset_b3_req },
+ { CAPI_RESET_B3_RESP, do_nothing },
+
+ /*
+ * ToDo: support overlap sending (requires ev-layer state
+ * machine extension to generate additional ATD commands)
+ */
+ { CAPI_INFO_REQ, do_unsupported },
+ { CAPI_INFO_RESP, do_nothing },
+
+ /*
+ * ToDo: what's the proper response for these?
+ */
+ { CAPI_MANUFACTURER_REQ, do_nothing },
+ { CAPI_MANUFACTURER_RESP, do_nothing },
+};
+
+/* look up handler */
+static inline capi_send_handler_t lookup_capi_send_handler(const u16 cmd)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(capi_send_handler_table); i++)
+ if (capi_send_handler_table[i].cmd == cmd)
+ return capi_send_handler_table[i].handler;
+ return NULL;
+}
+
+
+/**
+ * gigaset_send_message() - accept a CAPI message from an application
+ * @ctr: controller descriptor structure.
+ * @skb: CAPI message.
+ *
+ * Return value: CAPI error code
+ * Note: capidrv (and probably others, too) only uses the return value to
+ * decide whether it has to free the skb (only if result != CAPI_NOERROR (0))
+ */
+static u16 gigaset_send_message(struct capi_ctr *ctr, struct sk_buff *skb)
+{
+ struct gigaset_capi_ctr *iif
+ = container_of(ctr, struct gigaset_capi_ctr, ctr);
+ struct cardstate *cs = ctr->driverdata;
+ struct gigaset_capi_appl *ap;
+ capi_send_handler_t handler;
+
+ /* can only handle linear sk_buffs */
+ if (skb_linearize(skb) < 0) {
+ dev_warn(cs->dev, "%s: skb_linearize failed\n", __func__);
+ return CAPI_MSGOSRESOURCEERR;
+ }
+
+ /* retrieve application data structure */
+ ap = get_appl(iif, CAPIMSG_APPID(skb->data));
+ if (!ap) {
+ dev_notice(cs->dev, "%s: application %u not registered\n",
+ __func__, CAPIMSG_APPID(skb->data));
+ return CAPI_ILLAPPNR;
+ }
+
+ /* look up command */
+ handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
+ if (!handler) {
+ /* unknown/unsupported message type */
+ if (printk_ratelimit())
+ dev_notice(cs->dev, "%s: unsupported message %u\n",
+ __func__, CAPIMSG_CMD(skb->data));
+ return CAPI_ILLCMDORSUBCMDORMSGTOSMALL;
+ }
+
+ /* serialize */
+ if (atomic_add_return(1, &iif->sendqlen) > 1) {
+ /* queue behind other messages */
+ skb_queue_tail(&iif->sendqueue, skb);
+ return CAPI_NOERROR;
+ }
+
+ /* process message */
+ handler(iif, ap, skb);
+
+ /* process other messages arrived in the meantime */
+ while (atomic_sub_return(1, &iif->sendqlen) > 0) {
+ skb = skb_dequeue(&iif->sendqueue);
+ if (!skb) {
+ /* should never happen */
+ dev_err(cs->dev, "%s: send queue empty\n", __func__);
+ continue;
+ }
+ ap = get_appl(iif, CAPIMSG_APPID(skb->data));
+ if (!ap) {
+ /* could that happen? */
+ dev_warn(cs->dev, "%s: application %u vanished\n",
+ __func__, CAPIMSG_APPID(skb->data));
+ continue;
+ }
+ handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
+ if (!handler) {
+ /* should never happen */
+ dev_err(cs->dev, "%s: handler %x vanished\n",
+ __func__, CAPIMSG_CMD(skb->data));
+ continue;
+ }
+ handler(iif, ap, skb);
+ }
+
+ return CAPI_NOERROR;
+}
+
+/**
+ * gigaset_procinfo() - build single line description for controller
+ * @ctr: controller descriptor structure.
+ *
+ * Return value: pointer to generated string (null terminated)
+ */
+static char *gigaset_procinfo(struct capi_ctr *ctr)
+{
+ return ctr->name; /* ToDo: more? */
+}
+
+static int gigaset_proc_show(struct seq_file *m, void *v)
+{
+ struct capi_ctr *ctr = m->private;
+ struct cardstate *cs = ctr->driverdata;
+ char *s;
+ int i;
+
+ seq_printf(m, "%-16s %s\n", "name", ctr->name);
+ seq_printf(m, "%-16s %s %s\n", "dev",
+ dev_driver_string(cs->dev), dev_name(cs->dev));
+ seq_printf(m, "%-16s %d\n", "id", cs->myid);
+ if (cs->gotfwver)
+ seq_printf(m, "%-16s %d.%d.%d.%d\n", "firmware",
+ cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]);
+ seq_printf(m, "%-16s %d\n", "channels", cs->channels);
+ seq_printf(m, "%-16s %s\n", "onechannel", cs->onechannel ? "yes" : "no");
+
+ switch (cs->mode) {
+ case M_UNKNOWN:
+ s = "unknown";
+ break;
+ case M_CONFIG:
+ s = "config";
+ break;
+ case M_UNIMODEM:
+ s = "Unimodem";
+ break;
+ case M_CID:
+ s = "CID";
+ break;
+ default:
+ s = "??";
+ }
+ seq_printf(m, "%-16s %s\n", "mode", s);
+
+ switch (cs->mstate) {
+ case MS_UNINITIALIZED:
+ s = "uninitialized";
+ break;
+ case MS_INIT:
+ s = "init";
+ break;
+ case MS_LOCKED:
+ s = "locked";
+ break;
+ case MS_SHUTDOWN:
+ s = "shutdown";
+ break;
+ case MS_RECOVER:
+ s = "recover";
+ break;
+ case MS_READY:
+ s = "ready";
+ break;
+ default:
+ s = "??";
+ }
+ seq_printf(m, "%-16s %s\n", "mstate", s);
+
+ seq_printf(m, "%-16s %s\n", "running", cs->running ? "yes" : "no");
+ seq_printf(m, "%-16s %s\n", "connected", cs->connected ? "yes" : "no");
+ seq_printf(m, "%-16s %s\n", "isdn_up", cs->isdn_up ? "yes" : "no");
+ seq_printf(m, "%-16s %s\n", "cidmode", cs->cidmode ? "yes" : "no");
+
+ for (i = 0; i < cs->channels; i++) {
+ seq_printf(m, "[%d]%-13s %d\n", i, "corrupted",
+ cs->bcs[i].corrupted);
+ seq_printf(m, "[%d]%-13s %d\n", i, "trans_down",
+ cs->bcs[i].trans_down);
+ seq_printf(m, "[%d]%-13s %d\n", i, "trans_up",
+ cs->bcs[i].trans_up);
+ seq_printf(m, "[%d]%-13s %d\n", i, "chstate",
+ cs->bcs[i].chstate);
+ switch (cs->bcs[i].proto2) {
+ case L2_BITSYNC:
+ s = "bitsync";
+ break;
+ case L2_HDLC:
+ s = "HDLC";
+ break;
+ case L2_VOICE:
+ s = "voice";
+ break;
+ default:
+ s = "??";
+ }
+ seq_printf(m, "[%d]%-13s %s\n", i, "proto2", s);
+ }
+ return 0;
+}
+
+/**
+ * gigaset_isdn_regdev() - register device to LL
+ * @cs: device descriptor structure.
+ * @isdnid: device name.
+ *
+ * Return value: 0 on success, error code < 0 on failure
+ */
+int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
+{
+ struct gigaset_capi_ctr *iif;
+ int rc;
+
+ iif = kzalloc(sizeof(*iif), GFP_KERNEL);
+ if (!iif) {
+ pr_err("%s: out of memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* prepare controller structure */
+ iif->ctr.owner = THIS_MODULE;
+ iif->ctr.driverdata = cs;
+ strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name) - 1);
+ iif->ctr.driver_name = "gigaset";
+ iif->ctr.load_firmware = NULL;
+ iif->ctr.reset_ctr = NULL;
+ iif->ctr.register_appl = gigaset_register_appl;
+ iif->ctr.release_appl = gigaset_release_appl;
+ iif->ctr.send_message = gigaset_send_message;
+ iif->ctr.procinfo = gigaset_procinfo;
+ iif->ctr.proc_show = gigaset_proc_show,
+ INIT_LIST_HEAD(&iif->appls);
+ skb_queue_head_init(&iif->sendqueue);
+ atomic_set(&iif->sendqlen, 0);
+
+ /* register controller with CAPI */
+ rc = attach_capi_ctr(&iif->ctr);
+ if (rc) {
+ pr_err("attach_capi_ctr failed (%d)\n", rc);
+ kfree(iif);
+ return rc;
+ }
+
+ cs->iif = iif;
+ cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN;
+ return 0;
+}
+
+/**
+ * gigaset_isdn_unregdev() - unregister device from LL
+ * @cs: device descriptor structure.
+ */
+void gigaset_isdn_unregdev(struct cardstate *cs)
+{
+ struct gigaset_capi_ctr *iif = cs->iif;
+
+ detach_capi_ctr(&iif->ctr);
+ kfree(iif);
+ cs->iif = NULL;
+}
+
+static struct capi_driver capi_driver_gigaset = {
+ .name = "gigaset",
+ .revision = "1.0",
+};
+
+/**
+ * gigaset_isdn_regdrv() - register driver to LL
+ */
+void gigaset_isdn_regdrv(void)
+{
+ pr_info("Kernel CAPI interface\n");
+ register_capi_driver(&capi_driver_gigaset);
+}
+
+/**
+ * gigaset_isdn_unregdrv() - unregister driver from LL
+ */
+void gigaset_isdn_unregdrv(void)
+{
+ unregister_capi_driver(&capi_driver_gigaset);
+}
diff --git a/drivers/staging/isdn/gigaset/common.c b/drivers/staging/isdn/gigaset/common.c
new file mode 100644
index 000000000000..3bb8092858ab
--- /dev/null
+++ b/drivers/staging/isdn/gigaset/common.c
@@ -0,0 +1,1153 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Stuff used by all variants of the driver
+ *
+ * Copyright (c) 2001 by Stefan Eilers,
+ * Hansjoerg Lipp <hjlipp@web.de>,
+ * Tilman Schmidt <tilman@imap.cc>.
+ *
+ * =====================================================================
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+/* Version Information */
+#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers"
+#define DRIVER_DESC "Driver for Gigaset 307x"
+
+#ifdef CONFIG_GIGASET_DEBUG
+#define DRIVER_DESC_DEBUG " (debug build)"
+#else
+#define DRIVER_DESC_DEBUG ""
+#endif
+
+/* Module parameters */
+int gigaset_debuglevel;
+EXPORT_SYMBOL_GPL(gigaset_debuglevel);
+module_param_named(debug, gigaset_debuglevel, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug level");
+
+/* driver state flags */
+#define VALID_MINOR 0x01
+#define VALID_ID 0x02
+
+/**
+ * gigaset_dbg_buffer() - dump data in ASCII and hex for debugging
+ * @level: debugging level.
+ * @msg: message prefix.
+ * @len: number of bytes to dump.
+ * @buf: data to dump.
+ *
+ * If the current debugging level includes one of the bits set in @level,
+ * @len bytes starting at @buf are logged to dmesg at KERN_DEBUG prio,
+ * prefixed by the text @msg.
+ */
+void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
+ size_t len, const unsigned char *buf)
+{
+ unsigned char outbuf[80];
+ unsigned char c;
+ size_t space = sizeof outbuf - 1;
+ unsigned char *out = outbuf;
+ size_t numin = len;
+
+ while (numin--) {
+ c = *buf++;
+ if (c == '~' || c == '^' || c == '\\') {
+ if (!space--)
+ break;
+ *out++ = '\\';
+ }
+ if (c & 0x80) {
+ if (!space--)
+ break;
+ *out++ = '~';
+ c ^= 0x80;
+ }
+ if (c < 0x20 || c == 0x7f) {
+ if (!space--)
+ break;
+ *out++ = '^';
+ c ^= 0x40;
+ }
+ if (!space--)
+ break;
+ *out++ = c;
+ }
+ *out = 0;
+
+ gig_dbg(level, "%s (%u bytes): %s", msg, (unsigned) len, outbuf);
+}
+EXPORT_SYMBOL_GPL(gigaset_dbg_buffer);
+
+static int setflags(struct cardstate *cs, unsigned flags, unsigned delay)
+{
+ int r;
+
+ r = cs->ops->set_modem_ctrl(cs, cs->control_state, flags);
+ cs->control_state = flags;
+ if (r < 0)
+ return r;
+
+ if (delay) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(delay * HZ / 1000);
+ }
+
+ return 0;
+}
+
+int gigaset_enterconfigmode(struct cardstate *cs)
+{
+ int i, r;
+
+ cs->control_state = TIOCM_RTS;
+
+ r = setflags(cs, TIOCM_DTR, 200);
+ if (r < 0)
+ goto error;
+ r = setflags(cs, 0, 200);
+ if (r < 0)
+ goto error;
+ for (i = 0; i < 5; ++i) {
+ r = setflags(cs, TIOCM_RTS, 100);
+ if (r < 0)
+ goto error;
+ r = setflags(cs, 0, 100);
+ if (r < 0)
+ goto error;
+ }
+ r = setflags(cs, TIOCM_RTS | TIOCM_DTR, 800);
+ if (r < 0)
+ goto error;
+
+ return 0;
+
+error:
+ dev_err(cs->dev, "error %d on setuartbits\n", -r);
+ cs->control_state = TIOCM_RTS | TIOCM_DTR;
+ cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS | TIOCM_DTR);
+
+ return -1;
+}
+
+static int test_timeout(struct at_state_t *at_state)
+{
+ if (!at_state->timer_expires)
+ return 0;
+
+ if (--at_state->timer_expires) {
+ gig_dbg(DEBUG_MCMD, "decreased timer of %p to %lu",
+ at_state, at_state->timer_expires);
+ return 0;
+ }
+
+ gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL,
+ at_state->timer_index, NULL);
+ return 1;
+}
+
+static void timer_tick(struct timer_list *t)
+{
+ struct cardstate *cs = from_timer(cs, t, timer);
+ unsigned long flags;
+ unsigned channel;
+ struct at_state_t *at_state;
+ int timeout = 0;
+
+ spin_lock_irqsave(&cs->lock, flags);
+
+ for (channel = 0; channel < cs->channels; ++channel)
+ if (test_timeout(&cs->bcs[channel].at_state))
+ timeout = 1;
+
+ if (test_timeout(&cs->at_state))
+ timeout = 1;
+
+ list_for_each_entry(at_state, &cs->temp_at_states, list)
+ if (test_timeout(at_state))
+ timeout = 1;
+
+ if (cs->running) {
+ mod_timer(&cs->timer, jiffies + msecs_to_jiffies(GIG_TICK));
+ if (timeout) {
+ gig_dbg(DEBUG_EVENT, "scheduling timeout");
+ tasklet_schedule(&cs->event_tasklet);
+ }
+ }
+
+ spin_unlock_irqrestore(&cs->lock, flags);
+}
+
+int gigaset_get_channel(struct bc_state *bcs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcs->cs->lock, flags);
+ if (bcs->use_count || !try_module_get(bcs->cs->driver->owner)) {
+ gig_dbg(DEBUG_CHANNEL, "could not allocate channel %d",
+ bcs->channel);
+ spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ return -EBUSY;
+ }
+ ++bcs->use_count;
+ bcs->busy = 1;
+ gig_dbg(DEBUG_CHANNEL, "allocated channel %d", bcs->channel);
+ spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ return 0;
+}
+
+struct bc_state *gigaset_get_free_channel(struct cardstate *cs)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if (!try_module_get(cs->driver->owner)) {
+ gig_dbg(DEBUG_CHANNEL,
+ "could not get module for allocating channel");
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return NULL;
+ }
+ for (i = 0; i < cs->channels; ++i)
+ if (!cs->bcs[i].use_count) {
+ ++cs->bcs[i].use_count;
+ cs->bcs[i].busy = 1;
+ spin_unlock_irqrestore(&cs->lock, flags);
+ gig_dbg(DEBUG_CHANNEL, "allocated channel %d", i);
+ return cs->bcs + i;
+ }
+ module_put(cs->driver->owner);
+ spin_unlock_irqrestore(&cs->lock, flags);
+ gig_dbg(DEBUG_CHANNEL, "no free channel");
+ return NULL;
+}
+
+void gigaset_free_channel(struct bc_state *bcs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcs->cs->lock, flags);
+ if (!bcs->busy) {
+ gig_dbg(DEBUG_CHANNEL, "could not free channel %d",
+ bcs->channel);
+ spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ return;
+ }
+ --bcs->use_count;
+ bcs->busy = 0;
+ module_put(bcs->cs->driver->owner);
+ gig_dbg(DEBUG_CHANNEL, "freed channel %d", bcs->channel);
+ spin_unlock_irqrestore(&bcs->cs->lock, flags);
+}
+
+int gigaset_get_channels(struct cardstate *cs)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ for (i = 0; i < cs->channels; ++i)
+ if (cs->bcs[i].use_count) {
+ spin_unlock_irqrestore(&cs->lock, flags);
+ gig_dbg(DEBUG_CHANNEL,
+ "could not allocate all channels");
+ return -EBUSY;
+ }
+ for (i = 0; i < cs->channels; ++i)
+ ++cs->bcs[i].use_count;
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ gig_dbg(DEBUG_CHANNEL, "allocated all channels");
+
+ return 0;
+}
+
+void gigaset_free_channels(struct cardstate *cs)
+{
+ unsigned long flags;
+ int i;
+
+ gig_dbg(DEBUG_CHANNEL, "unblocking all channels");
+ spin_lock_irqsave(&cs->lock, flags);
+ for (i = 0; i < cs->channels; ++i)
+ --cs->bcs[i].use_count;
+ spin_unlock_irqrestore(&cs->lock, flags);
+}
+
+void gigaset_block_channels(struct cardstate *cs)
+{
+ unsigned long flags;
+ int i;
+
+ gig_dbg(DEBUG_CHANNEL, "blocking all channels");
+ spin_lock_irqsave(&cs->lock, flags);
+ for (i = 0; i < cs->channels; ++i)
+ ++cs->bcs[i].use_count;
+ spin_unlock_irqrestore(&cs->lock, flags);
+}
+
+static void clear_events(struct cardstate *cs)
+{
+ struct event_t *ev;
+ unsigned head, tail;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->ev_lock, flags);
+
+ head = cs->ev_head;
+ tail = cs->ev_tail;
+
+ while (tail != head) {
+ ev = cs->events + head;
+ kfree(ev->ptr);
+ head = (head + 1) % MAX_EVENTS;
+ }
+
+ cs->ev_head = tail;
+
+ spin_unlock_irqrestore(&cs->ev_lock, flags);
+}
+
+/**
+ * gigaset_add_event() - add event to device event queue
+ * @cs: device descriptor structure.
+ * @at_state: connection state structure.
+ * @type: event type.
+ * @ptr: pointer parameter for event.
+ * @parameter: integer parameter for event.
+ * @arg: pointer parameter for event.
+ *
+ * Allocate an event queue entry from the device's event queue, and set it up
+ * with the parameters given.
+ *
+ * Return value: added event
+ */
+struct event_t *gigaset_add_event(struct cardstate *cs,
+ struct at_state_t *at_state, int type,
+ void *ptr, int parameter, void *arg)
+{
+ unsigned long flags;
+ unsigned next, tail;
+ struct event_t *event = NULL;
+
+ gig_dbg(DEBUG_EVENT, "queueing event %d", type);
+
+ spin_lock_irqsave(&cs->ev_lock, flags);
+
+ tail = cs->ev_tail;
+ next = (tail + 1) % MAX_EVENTS;
+ if (unlikely(next == cs->ev_head))
+ dev_err(cs->dev, "event queue full\n");
+ else {
+ event = cs->events + tail;
+ event->type = type;
+ event->at_state = at_state;
+ event->cid = -1;
+ event->ptr = ptr;
+ event->arg = arg;
+ event->parameter = parameter;
+ cs->ev_tail = next;
+ }
+
+ spin_unlock_irqrestore(&cs->ev_lock, flags);
+
+ return event;
+}
+EXPORT_SYMBOL_GPL(gigaset_add_event);
+
+static void clear_at_state(struct at_state_t *at_state)
+{
+ int i;
+
+ for (i = 0; i < STR_NUM; ++i) {
+ kfree(at_state->str_var[i]);
+ at_state->str_var[i] = NULL;
+ }
+}
+
+static void dealloc_temp_at_states(struct cardstate *cs)
+{
+ struct at_state_t *cur, *next;
+
+ list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) {
+ list_del(&cur->list);
+ clear_at_state(cur);
+ kfree(cur);
+ }
+}
+
+static void gigaset_freebcs(struct bc_state *bcs)
+{
+ int i;
+
+ gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
+ bcs->cs->ops->freebcshw(bcs);
+
+ gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
+ clear_at_state(&bcs->at_state);
+ gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel);
+ dev_kfree_skb(bcs->rx_skb);
+ bcs->rx_skb = NULL;
+
+ for (i = 0; i < AT_NUM; ++i) {
+ kfree(bcs->commands[i]);
+ bcs->commands[i] = NULL;
+ }
+}
+
+static struct cardstate *alloc_cs(struct gigaset_driver *drv)
+{
+ unsigned long flags;
+ unsigned i;
+ struct cardstate *cs;
+ struct cardstate *ret = NULL;
+
+ spin_lock_irqsave(&drv->lock, flags);
+ if (drv->blocked)
+ goto exit;
+ for (i = 0; i < drv->minors; ++i) {
+ cs = drv->cs + i;
+ if (!(cs->flags & VALID_MINOR)) {
+ cs->flags = VALID_MINOR;
+ ret = cs;
+ break;
+ }
+ }
+exit:
+ spin_unlock_irqrestore(&drv->lock, flags);
+ return ret;
+}
+
+static void free_cs(struct cardstate *cs)
+{
+ cs->flags = 0;
+}
+
+static void make_valid(struct cardstate *cs, unsigned mask)
+{
+ unsigned long flags;
+ struct gigaset_driver *drv = cs->driver;
+ spin_lock_irqsave(&drv->lock, flags);
+ cs->flags |= mask;
+ spin_unlock_irqrestore(&drv->lock, flags);
+}
+
+static void make_invalid(struct cardstate *cs, unsigned mask)
+{
+ unsigned long flags;
+ struct gigaset_driver *drv = cs->driver;
+ spin_lock_irqsave(&drv->lock, flags);
+ cs->flags &= ~mask;
+ spin_unlock_irqrestore(&drv->lock, flags);
+}
+
+/**
+ * gigaset_freecs() - free all associated ressources of a device
+ * @cs: device descriptor structure.
+ *
+ * Stops all tasklets and timers, unregisters the device from all
+ * subsystems it was registered to, deallocates the device structure
+ * @cs and all structures referenced from it.
+ * Operations on the device should be stopped before calling this.
+ */
+void gigaset_freecs(struct cardstate *cs)
+{
+ int i;
+ unsigned long flags;
+
+ if (!cs)
+ return;
+
+ mutex_lock(&cs->mutex);
+
+ spin_lock_irqsave(&cs->lock, flags);
+ cs->running = 0;
+ spin_unlock_irqrestore(&cs->lock, flags); /* event handler and timer are
+ not rescheduled below */
+
+ tasklet_kill(&cs->event_tasklet);
+ del_timer_sync(&cs->timer);
+
+ switch (cs->cs_init) {
+ default:
+ /* clear B channel structures */
+ for (i = 0; i < cs->channels; ++i) {
+ gig_dbg(DEBUG_INIT, "clearing bcs[%d]", i);
+ gigaset_freebcs(cs->bcs + i);
+ }
+
+ /* clear device sysfs */
+ gigaset_free_dev_sysfs(cs);
+
+ gigaset_if_free(cs);
+
+ gig_dbg(DEBUG_INIT, "clearing hw");
+ cs->ops->freecshw(cs);
+
+ /* fall through */
+ case 2: /* error in initcshw */
+ /* Deregister from LL */
+ make_invalid(cs, VALID_ID);
+ gigaset_isdn_unregdev(cs);
+
+ /* fall through */
+ case 1: /* error when registering to LL */
+ gig_dbg(DEBUG_INIT, "clearing at_state");
+ clear_at_state(&cs->at_state);
+ dealloc_temp_at_states(cs);
+ clear_events(cs);
+ tty_port_destroy(&cs->port);
+
+ /* fall through */
+ case 0: /* error in basic setup */
+ gig_dbg(DEBUG_INIT, "freeing inbuf");
+ kfree(cs->inbuf);
+ kfree(cs->bcs);
+ }
+
+ mutex_unlock(&cs->mutex);
+ free_cs(cs);
+}
+EXPORT_SYMBOL_GPL(gigaset_freecs);
+
+void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
+ struct cardstate *cs, int cid)
+{
+ int i;
+
+ INIT_LIST_HEAD(&at_state->list);
+ at_state->waiting = 0;
+ at_state->getstring = 0;
+ at_state->pending_commands = 0;
+ at_state->timer_expires = 0;
+ at_state->timer_active = 0;
+ at_state->timer_index = 0;
+ at_state->seq_index = 0;
+ at_state->ConState = 0;
+ for (i = 0; i < STR_NUM; ++i)
+ at_state->str_var[i] = NULL;
+ at_state->int_var[VAR_ZDLE] = 0;
+ at_state->int_var[VAR_ZCTP] = -1;
+ at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
+ at_state->cs = cs;
+ at_state->bcs = bcs;
+ at_state->cid = cid;
+ if (!cid)
+ at_state->replystruct = cs->tabnocid;
+ else
+ at_state->replystruct = cs->tabcid;
+}
+
+
+static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs)
+/* inbuf->read must be allocated before! */
+{
+ inbuf->head = 0;
+ inbuf->tail = 0;
+ inbuf->cs = cs;
+ inbuf->inputstate = INS_command;
+}
+
+/**
+ * gigaset_fill_inbuf() - append received data to input buffer
+ * @inbuf: buffer structure.
+ * @src: received data.
+ * @numbytes: number of bytes received.
+ *
+ * Return value: !=0 if some data was appended
+ */
+int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
+ unsigned numbytes)
+{
+ unsigned n, head, tail, bytesleft;
+
+ gig_dbg(DEBUG_INTR, "received %u bytes", numbytes);
+
+ if (!numbytes)
+ return 0;
+
+ bytesleft = numbytes;
+ tail = inbuf->tail;
+ head = inbuf->head;
+ gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
+
+ while (bytesleft) {
+ if (head > tail)
+ n = head - 1 - tail;
+ else if (head == 0)
+ n = (RBUFSIZE - 1) - tail;
+ else
+ n = RBUFSIZE - tail;
+ if (!n) {
+ dev_err(inbuf->cs->dev,
+ "buffer overflow (%u bytes lost)\n",
+ bytesleft);
+ break;
+ }
+ if (n > bytesleft)
+ n = bytesleft;
+ memcpy(inbuf->data + tail, src, n);
+ bytesleft -= n;
+ tail = (tail + n) % RBUFSIZE;
+ src += n;
+ }
+ gig_dbg(DEBUG_INTR, "setting tail to %u", tail);
+ inbuf->tail = tail;
+ return numbytes != bytesleft;
+}
+EXPORT_SYMBOL_GPL(gigaset_fill_inbuf);
+
+/* Initialize the b-channel structure */
+static int gigaset_initbcs(struct bc_state *bcs, struct cardstate *cs,
+ int channel)
+{
+ int i;
+
+ bcs->tx_skb = NULL;
+
+ skb_queue_head_init(&bcs->squeue);
+
+ bcs->corrupted = 0;
+ bcs->trans_down = 0;
+ bcs->trans_up = 0;
+
+ gig_dbg(DEBUG_INIT, "setting up bcs[%d]->at_state", channel);
+ gigaset_at_init(&bcs->at_state, bcs, cs, -1);
+
+#ifdef CONFIG_GIGASET_DEBUG
+ bcs->emptycount = 0;
+#endif
+
+ bcs->rx_bufsize = 0;
+ bcs->rx_skb = NULL;
+ bcs->rx_fcs = PPP_INITFCS;
+ bcs->inputstate = 0;
+ bcs->channel = channel;
+ bcs->cs = cs;
+
+ bcs->chstate = 0;
+ bcs->use_count = 1;
+ bcs->busy = 0;
+ bcs->ignore = cs->ignoreframes;
+
+ for (i = 0; i < AT_NUM; ++i)
+ bcs->commands[i] = NULL;
+
+ spin_lock_init(&bcs->aplock);
+ bcs->ap = NULL;
+ bcs->apconnstate = 0;
+
+ gig_dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel);
+ return cs->ops->initbcshw(bcs);
+}
+
+/**
+ * gigaset_initcs() - initialize device structure
+ * @drv: hardware driver the device belongs to
+ * @channels: number of B channels supported by device
+ * @onechannel: !=0 if B channel data and AT commands share one
+ * communication channel (M10x),
+ * ==0 if B channels have separate communication channels (base)
+ * @ignoreframes: number of frames to ignore after setting up B channel
+ * @cidmode: !=0: start in CallID mode
+ * @modulename: name of driver module for LL registration
+ *
+ * Allocate and initialize cardstate structure for Gigaset driver
+ * Calls hardware dependent gigaset_initcshw() function
+ * Calls B channel initialization function gigaset_initbcs() for each B channel
+ *
+ * Return value:
+ * pointer to cardstate structure
+ */
+struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
+ int onechannel, int ignoreframes,
+ int cidmode, const char *modulename)
+{
+ struct cardstate *cs;
+ unsigned long flags;
+ int i;
+
+ gig_dbg(DEBUG_INIT, "allocating cs");
+ cs = alloc_cs(drv);
+ if (!cs) {
+ pr_err("maximum number of devices exceeded\n");
+ return NULL;
+ }
+
+ cs->cs_init = 0;
+ cs->channels = channels;
+ cs->onechannel = onechannel;
+ cs->ignoreframes = ignoreframes;
+ INIT_LIST_HEAD(&cs->temp_at_states);
+ cs->running = 0;
+ timer_setup(&cs->timer, timer_tick, 0);
+ spin_lock_init(&cs->ev_lock);
+ cs->ev_tail = 0;
+ cs->ev_head = 0;
+
+ tasklet_init(&cs->event_tasklet, gigaset_handle_event,
+ (unsigned long) cs);
+ tty_port_init(&cs->port);
+ cs->commands_pending = 0;
+ cs->cur_at_seq = 0;
+ cs->gotfwver = -1;
+ cs->dev = NULL;
+ cs->tty_dev = NULL;
+ cs->cidmode = cidmode != 0;
+ cs->tabnocid = gigaset_tab_nocid;
+ cs->tabcid = gigaset_tab_cid;
+
+ init_waitqueue_head(&cs->waitqueue);
+ cs->waiting = 0;
+
+ cs->mode = M_UNKNOWN;
+ cs->mstate = MS_UNINITIALIZED;
+
+ cs->bcs = kmalloc_array(channels, sizeof(struct bc_state), GFP_KERNEL);
+ cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL);
+ if (!cs->bcs || !cs->inbuf) {
+ pr_err("out of memory\n");
+ goto error;
+ }
+ ++cs->cs_init;
+
+ gig_dbg(DEBUG_INIT, "setting up at_state");
+ spin_lock_init(&cs->lock);
+ gigaset_at_init(&cs->at_state, NULL, cs, 0);
+ cs->dle = 0;
+ cs->cbytes = 0;
+
+ gig_dbg(DEBUG_INIT, "setting up inbuf");
+ gigaset_inbuf_init(cs->inbuf, cs);
+
+ cs->connected = 0;
+ cs->isdn_up = 0;
+
+ gig_dbg(DEBUG_INIT, "setting up cmdbuf");
+ cs->cmdbuf = cs->lastcmdbuf = NULL;
+ spin_lock_init(&cs->cmdlock);
+ cs->curlen = 0;
+ cs->cmdbytes = 0;
+
+ gig_dbg(DEBUG_INIT, "setting up iif");
+ if (gigaset_isdn_regdev(cs, modulename) < 0) {
+ pr_err("error registering ISDN device\n");
+ goto error;
+ }
+
+ make_valid(cs, VALID_ID);
+ ++cs->cs_init;
+ gig_dbg(DEBUG_INIT, "setting up hw");
+ if (cs->ops->initcshw(cs) < 0)
+ goto error;
+
+ ++cs->cs_init;
+
+ /* set up character device */
+ gigaset_if_init(cs);
+
+ /* set up device sysfs */
+ gigaset_init_dev_sysfs(cs);
+
+ /* set up channel data structures */
+ for (i = 0; i < channels; ++i) {
+ gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i);
+ if (gigaset_initbcs(cs->bcs + i, cs, i) < 0) {
+ pr_err("could not allocate channel %d data\n", i);
+ goto error;
+ }
+ }
+
+ spin_lock_irqsave(&cs->lock, flags);
+ cs->running = 1;
+ spin_unlock_irqrestore(&cs->lock, flags);
+ cs->timer.expires = jiffies + msecs_to_jiffies(GIG_TICK);
+ add_timer(&cs->timer);
+
+ gig_dbg(DEBUG_INIT, "cs initialized");
+ return cs;
+
+error:
+ gig_dbg(DEBUG_INIT, "failed");
+ gigaset_freecs(cs);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(gigaset_initcs);
+
+/* ReInitialize the b-channel structure on hangup */
+void gigaset_bcs_reinit(struct bc_state *bcs)
+{
+ struct sk_buff *skb;
+ struct cardstate *cs = bcs->cs;
+ unsigned long flags;
+
+ while ((skb = skb_dequeue(&bcs->squeue)) != NULL)
+ dev_kfree_skb(skb);
+
+ spin_lock_irqsave(&cs->lock, flags);
+ clear_at_state(&bcs->at_state);
+ bcs->at_state.ConState = 0;
+ bcs->at_state.timer_active = 0;
+ bcs->at_state.timer_expires = 0;
+ bcs->at_state.cid = -1; /* No CID defined */
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ bcs->inputstate = 0;
+
+#ifdef CONFIG_GIGASET_DEBUG
+ bcs->emptycount = 0;
+#endif
+
+ bcs->rx_fcs = PPP_INITFCS;
+ bcs->chstate = 0;
+
+ bcs->ignore = cs->ignoreframes;
+ dev_kfree_skb(bcs->rx_skb);
+ bcs->rx_skb = NULL;
+
+ cs->ops->reinitbcshw(bcs);
+}
+
+static void cleanup_cs(struct cardstate *cs)
+{
+ struct cmdbuf_t *cb, *tcb;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->lock, flags);
+
+ cs->mode = M_UNKNOWN;
+ cs->mstate = MS_UNINITIALIZED;
+
+ clear_at_state(&cs->at_state);
+ dealloc_temp_at_states(cs);
+ gigaset_at_init(&cs->at_state, NULL, cs, 0);
+
+ cs->inbuf->inputstate = INS_command;
+ cs->inbuf->head = 0;
+ cs->inbuf->tail = 0;
+
+ cb = cs->cmdbuf;
+ while (cb) {
+ tcb = cb;
+ cb = cb->next;
+ kfree(tcb);
+ }
+ cs->cmdbuf = cs->lastcmdbuf = NULL;
+ cs->curlen = 0;
+ cs->cmdbytes = 0;
+ cs->gotfwver = -1;
+ cs->dle = 0;
+ cs->cur_at_seq = 0;
+ cs->commands_pending = 0;
+ cs->cbytes = 0;
+
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ for (i = 0; i < cs->channels; ++i) {
+ gigaset_freebcs(cs->bcs + i);
+ if (gigaset_initbcs(cs->bcs + i, cs, i) < 0)
+ pr_err("could not allocate channel %d data\n", i);
+ }
+
+ if (cs->waiting) {
+ cs->cmd_result = -ENODEV;
+ cs->waiting = 0;
+ wake_up_interruptible(&cs->waitqueue);
+ }
+}
+
+
+/**
+ * gigaset_start() - start device operations
+ * @cs: device descriptor structure.
+ *
+ * Prepares the device for use by setting up communication parameters,
+ * scheduling an EV_START event to initiate device initialization, and
+ * waiting for completion of the initialization.
+ *
+ * Return value:
+ * 0 on success, error code < 0 on failure
+ */
+int gigaset_start(struct cardstate *cs)
+{
+ unsigned long flags;
+
+ if (mutex_lock_interruptible(&cs->mutex))
+ return -EBUSY;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ cs->connected = 1;
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ if (cs->mstate != MS_LOCKED) {
+ cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR | TIOCM_RTS);
+ cs->ops->baud_rate(cs, B115200);
+ cs->ops->set_line_ctrl(cs, CS8);
+ cs->control_state = TIOCM_DTR | TIOCM_RTS;
+ }
+
+ cs->waiting = 1;
+
+ if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) {
+ cs->waiting = 0;
+ goto error;
+ }
+ gigaset_schedule_event(cs);
+
+ wait_event(cs->waitqueue, !cs->waiting);
+
+ mutex_unlock(&cs->mutex);
+ return 0;
+
+error:
+ mutex_unlock(&cs->mutex);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(gigaset_start);
+
+/**
+ * gigaset_shutdown() - shut down device operations
+ * @cs: device descriptor structure.
+ *
+ * Deactivates the device by scheduling an EV_SHUTDOWN event and
+ * waiting for completion of the shutdown.
+ *
+ * Return value:
+ * 0 - success, -ENODEV - error (no device associated)
+ */
+int gigaset_shutdown(struct cardstate *cs)
+{
+ mutex_lock(&cs->mutex);
+
+ if (!(cs->flags & VALID_MINOR)) {
+ mutex_unlock(&cs->mutex);
+ return -ENODEV;
+ }
+
+ cs->waiting = 1;
+
+ if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL))
+ goto exit;
+ gigaset_schedule_event(cs);
+
+ wait_event(cs->waitqueue, !cs->waiting);
+
+ cleanup_cs(cs);
+
+exit:
+ mutex_unlock(&cs->mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gigaset_shutdown);
+
+/**
+ * gigaset_stop() - stop device operations
+ * @cs: device descriptor structure.
+ *
+ * Stops operations on the device by scheduling an EV_STOP event and
+ * waiting for completion of the shutdown.
+ */
+void gigaset_stop(struct cardstate *cs)
+{
+ mutex_lock(&cs->mutex);
+
+ cs->waiting = 1;
+
+ if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL))
+ goto exit;
+ gigaset_schedule_event(cs);
+
+ wait_event(cs->waitqueue, !cs->waiting);
+
+ cleanup_cs(cs);
+
+exit:
+ mutex_unlock(&cs->mutex);
+}
+EXPORT_SYMBOL_GPL(gigaset_stop);
+
+static LIST_HEAD(drivers);
+static DEFINE_SPINLOCK(driver_lock);
+
+struct cardstate *gigaset_get_cs_by_id(int id)
+{
+ unsigned long flags;
+ struct cardstate *ret = NULL;
+ struct cardstate *cs;
+ struct gigaset_driver *drv;
+ unsigned i;
+
+ spin_lock_irqsave(&driver_lock, flags);
+ list_for_each_entry(drv, &drivers, list) {
+ spin_lock(&drv->lock);
+ for (i = 0; i < drv->minors; ++i) {
+ cs = drv->cs + i;
+ if ((cs->flags & VALID_ID) && cs->myid == id) {
+ ret = cs;
+ break;
+ }
+ }
+ spin_unlock(&drv->lock);
+ if (ret)
+ break;
+ }
+ spin_unlock_irqrestore(&driver_lock, flags);
+ return ret;
+}
+
+static struct cardstate *gigaset_get_cs_by_minor(unsigned minor)
+{
+ unsigned long flags;
+ struct cardstate *ret = NULL;
+ struct gigaset_driver *drv;
+ unsigned index;
+
+ spin_lock_irqsave(&driver_lock, flags);
+ list_for_each_entry(drv, &drivers, list) {
+ if (minor < drv->minor || minor >= drv->minor + drv->minors)
+ continue;
+ index = minor - drv->minor;
+ spin_lock(&drv->lock);
+ if (drv->cs[index].flags & VALID_MINOR)
+ ret = drv->cs + index;
+ spin_unlock(&drv->lock);
+ if (ret)
+ break;
+ }
+ spin_unlock_irqrestore(&driver_lock, flags);
+ return ret;
+}
+
+struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty)
+{
+ return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start);
+}
+
+/**
+ * gigaset_freedriver() - free all associated ressources of a driver
+ * @drv: driver descriptor structure.
+ *
+ * Unregisters the driver from the system and deallocates the driver
+ * structure @drv and all structures referenced from it.
+ * All devices should be shut down before calling this.
+ */
+void gigaset_freedriver(struct gigaset_driver *drv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&driver_lock, flags);
+ list_del(&drv->list);
+ spin_unlock_irqrestore(&driver_lock, flags);
+
+ gigaset_if_freedriver(drv);
+
+ kfree(drv->cs);
+ kfree(drv);
+}
+EXPORT_SYMBOL_GPL(gigaset_freedriver);
+
+/**
+ * gigaset_initdriver() - initialize driver structure
+ * @minor: First minor number
+ * @minors: Number of minors this driver can handle
+ * @procname: Name of the driver
+ * @devname: Name of the device files (prefix without minor number)
+ *
+ * Allocate and initialize gigaset_driver structure. Initialize interface.
+ *
+ * Return value:
+ * Pointer to the gigaset_driver structure on success, NULL on failure.
+ */
+struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
+ const char *procname,
+ const char *devname,
+ const struct gigaset_ops *ops,
+ struct module *owner)
+{
+ struct gigaset_driver *drv;
+ unsigned long flags;
+ unsigned i;
+
+ drv = kmalloc(sizeof *drv, GFP_KERNEL);
+ if (!drv)
+ return NULL;
+
+ drv->have_tty = 0;
+ drv->minor = minor;
+ drv->minors = minors;
+ spin_lock_init(&drv->lock);
+ drv->blocked = 0;
+ drv->ops = ops;
+ drv->owner = owner;
+ INIT_LIST_HEAD(&drv->list);
+
+ drv->cs = kmalloc_array(minors, sizeof(*drv->cs), GFP_KERNEL);
+ if (!drv->cs)
+ goto error;
+
+ for (i = 0; i < minors; ++i) {
+ drv->cs[i].flags = 0;
+ drv->cs[i].driver = drv;
+ drv->cs[i].ops = drv->ops;
+ drv->cs[i].minor_index = i;
+ mutex_init(&drv->cs[i].mutex);
+ }
+
+ gigaset_if_initdriver(drv, procname, devname);
+
+ spin_lock_irqsave(&driver_lock, flags);
+ list_add(&drv->list, &drivers);
+ spin_unlock_irqrestore(&driver_lock, flags);
+
+ return drv;
+
+error:
+ kfree(drv);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(gigaset_initdriver);
+
+/**
+ * gigaset_blockdriver() - block driver
+ * @drv: driver descriptor structure.
+ *
+ * Prevents the driver from attaching new devices, in preparation for
+ * deregistration.
+ */
+void gigaset_blockdriver(struct gigaset_driver *drv)
+{
+ drv->blocked = 1;
+}
+EXPORT_SYMBOL_GPL(gigaset_blockdriver);
+
+static int __init gigaset_init_module(void)
+{
+ /* in accordance with the principle of least astonishment,
+ * setting the 'debug' parameter to 1 activates a sensible
+ * set of default debug levels
+ */
+ if (gigaset_debuglevel == 1)
+ gigaset_debuglevel = DEBUG_DEFAULT;
+
+ pr_info(DRIVER_DESC DRIVER_DESC_DEBUG "\n");
+ gigaset_isdn_regdrv();
+ return 0;
+}
+
+static void __exit gigaset_exit_module(void)
+{
+ gigaset_isdn_unregdrv();
+}
+
+module_init(gigaset_init_module);
+module_exit(gigaset_exit_module);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/isdn/gigaset/dummyll.c b/drivers/staging/isdn/gigaset/dummyll.c
new file mode 100644
index 000000000000..4b9637e5da6e
--- /dev/null
+++ b/drivers/staging/isdn/gigaset/dummyll.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Dummy LL interface for the Gigaset driver
+ *
+ * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>.
+ *
+ * =====================================================================
+ * =====================================================================
+ */
+
+#include <linux/export.h>
+#include "gigaset.h"
+
+void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
+{
+}
+EXPORT_SYMBOL_GPL(gigaset_skb_sent);
+
+void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
+{
+}
+EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
+
+void gigaset_isdn_rcv_err(struct bc_state *bcs)
+{
+}
+EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
+
+int gigaset_isdn_icall(struct at_state_t *at_state)
+{
+ return ICALL_IGNORE;
+}
+
+void gigaset_isdn_connD(struct bc_state *bcs)
+{
+}
+
+void gigaset_isdn_hupD(struct bc_state *bcs)
+{
+}
+
+void gigaset_isdn_connB(struct bc_state *bcs)
+{
+}
+
+void gigaset_isdn_hupB(struct bc_state *bcs)
+{
+}
+
+void gigaset_isdn_start(struct cardstate *cs)
+{
+}
+
+void gigaset_isdn_stop(struct cardstate *cs)
+{
+}
+
+int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
+{
+ return 0;
+}
+
+void gigaset_isdn_unregdev(struct cardstate *cs)
+{
+}
+
+void gigaset_isdn_regdrv(void)
+{
+ pr_info("no ISDN subsystem interface\n");
+}
+
+void gigaset_isdn_unregdrv(void)
+{
+}
diff --git a/drivers/staging/isdn/gigaset/ev-layer.c b/drivers/staging/isdn/gigaset/ev-layer.c
new file mode 100644
index 000000000000..f8bb1869c600
--- /dev/null
+++ b/drivers/staging/isdn/gigaset/ev-layer.c
@@ -0,0 +1,1910 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Stuff used by all variants of the driver
+ *
+ * Copyright (c) 2001 by Stefan Eilers,
+ * Hansjoerg Lipp <hjlipp@web.de>,
+ * Tilman Schmidt <tilman@imap.cc>.
+ *
+ * =====================================================================
+ * =====================================================================
+ */
+
+#include <linux/export.h>
+#include "gigaset.h"
+
+/* ========================================================== */
+/* bit masks for pending commands */
+#define PC_DIAL 0x001
+#define PC_HUP 0x002
+#define PC_INIT 0x004
+#define PC_DLE0 0x008
+#define PC_DLE1 0x010
+#define PC_SHUTDOWN 0x020
+#define PC_ACCEPT 0x040
+#define PC_CID 0x080
+#define PC_NOCID 0x100
+#define PC_CIDMODE 0x200
+#define PC_UMMODE 0x400
+
+/* types of modem responses */
+#define RT_NOTHING 0
+#define RT_ZSAU 1
+#define RT_RING 2
+#define RT_NUMBER 3
+#define RT_STRING 4
+#define RT_ZCAU 6
+
+/* Possible ASCII responses */
+#define RSP_OK 0
+#define RSP_ERROR 1
+#define RSP_ZGCI 3
+#define RSP_RING 4
+#define RSP_ZVLS 5
+#define RSP_ZCAU 6
+
+/* responses with values to store in at_state */
+/* - numeric */
+#define RSP_VAR 100
+#define RSP_ZSAU (RSP_VAR + VAR_ZSAU)
+#define RSP_ZDLE (RSP_VAR + VAR_ZDLE)
+#define RSP_ZCTP (RSP_VAR + VAR_ZCTP)
+/* - string */
+#define RSP_STR (RSP_VAR + VAR_NUM)
+#define RSP_NMBR (RSP_STR + STR_NMBR)
+#define RSP_ZCPN (RSP_STR + STR_ZCPN)
+#define RSP_ZCON (RSP_STR + STR_ZCON)
+#define RSP_ZBC (RSP_STR + STR_ZBC)
+#define RSP_ZHLC (RSP_STR + STR_ZHLC)
+
+#define RSP_WRONG_CID -2 /* unknown cid in cmd */
+#define RSP_INVAL -6 /* invalid response */
+#define RSP_NODEV -9 /* device not connected */
+
+#define RSP_NONE -19
+#define RSP_STRING -20
+#define RSP_NULL -21
+#define RSP_INIT -27
+#define RSP_ANY -26
+#define RSP_LAST -28
+
+/* actions for process_response */
+#define ACT_NOTHING 0
+#define ACT_SETDLE1 1
+#define ACT_SETDLE0 2
+#define ACT_FAILINIT 3
+#define ACT_HUPMODEM 4
+#define ACT_CONFIGMODE 5
+#define ACT_INIT 6
+#define ACT_DLE0 7
+#define ACT_DLE1 8
+#define ACT_FAILDLE0 9
+#define ACT_FAILDLE1 10
+#define ACT_RING 11
+#define ACT_CID 12
+#define ACT_FAILCID 13
+#define ACT_SDOWN 14
+#define ACT_FAILSDOWN 15
+#define ACT_DEBUG 16
+#define ACT_WARN 17
+#define ACT_DIALING 18
+#define ACT_ABORTDIAL 19
+#define ACT_DISCONNECT 20
+#define ACT_CONNECT 21
+#define ACT_REMOTEREJECT 22
+#define ACT_CONNTIMEOUT 23
+#define ACT_REMOTEHUP 24
+#define ACT_ABORTHUP 25
+#define ACT_ICALL 26
+#define ACT_ACCEPTED 27
+#define ACT_ABORTACCEPT 28
+#define ACT_TIMEOUT 29
+#define ACT_GETSTRING 30
+#define ACT_SETVER 31
+#define ACT_FAILVER 32
+#define ACT_GOTVER 33
+#define ACT_TEST 34
+#define ACT_ERROR 35
+#define ACT_ABORTCID 36
+#define ACT_ZCAU 37
+#define ACT_NOTIFY_BC_DOWN 38
+#define ACT_NOTIFY_BC_UP 39
+#define ACT_DIAL 40
+#define ACT_ACCEPT 41
+#define ACT_HUP 43
+#define ACT_IF_LOCK 44
+#define ACT_START 45
+#define ACT_STOP 46
+#define ACT_FAKEDLE0 47
+#define ACT_FAKEHUP 48
+#define ACT_FAKESDOWN 49
+#define ACT_SHUTDOWN 50
+#define ACT_PROC_CIDMODE 51
+#define ACT_UMODESET 52
+#define ACT_FAILUMODE 53
+#define ACT_CMODESET 54
+#define ACT_FAILCMODE 55
+#define ACT_IF_VER 56
+#define ACT_CMD 100
+
+/* at command sequences */
+#define SEQ_NONE 0
+#define SEQ_INIT 100
+#define SEQ_DLE0 200
+#define SEQ_DLE1 250
+#define SEQ_CID 300
+#define SEQ_NOCID 350
+#define SEQ_HUP 400
+#define SEQ_DIAL 600
+#define SEQ_ACCEPT 720
+#define SEQ_SHUTDOWN 500
+#define SEQ_CIDMODE 10
+#define SEQ_UMMODE 11
+
+
+/* 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid),
+ * 400: hup, 500: reset, 600: dial, 700: ring */
+struct reply_t gigaset_tab_nocid[] =
+{
+/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
+ * action, command */
+
+/* initialize device, set cid mode if possible */
+ {RSP_INIT, -1, -1, SEQ_INIT, 100, 1, {ACT_TIMEOUT} },
+
+ {EV_TIMEOUT, 100, 100, -1, 101, 3, {0}, "Z\r"},
+ {RSP_OK, 101, 103, -1, 120, 5, {ACT_GETSTRING},
+ "+GMR\r"},
+
+ {EV_TIMEOUT, 101, 101, -1, 102, 5, {0}, "Z\r"},
+ {RSP_ERROR, 101, 101, -1, 102, 5, {0}, "Z\r"},
+
+ {EV_TIMEOUT, 102, 102, -1, 108, 5, {ACT_SETDLE1},
+ "^SDLE=0\r"},
+ {RSP_OK, 108, 108, -1, 104, -1},
+ {RSP_ZDLE, 104, 104, 0, 103, 5, {0}, "Z\r"},
+ {EV_TIMEOUT, 104, 104, -1, 0, 0, {ACT_FAILINIT} },
+ {RSP_ERROR, 108, 108, -1, 0, 0, {ACT_FAILINIT} },
+
+ {EV_TIMEOUT, 108, 108, -1, 105, 2, {ACT_SETDLE0,
+ ACT_HUPMODEM,
+ ACT_TIMEOUT} },
+ {EV_TIMEOUT, 105, 105, -1, 103, 5, {0}, "Z\r"},
+
+ {RSP_ERROR, 102, 102, -1, 107, 5, {0}, "^GETPRE\r"},
+ {RSP_OK, 107, 107, -1, 0, 0, {ACT_CONFIGMODE} },
+ {RSP_ERROR, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
+ {EV_TIMEOUT, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
+
+ {RSP_ERROR, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
+ {EV_TIMEOUT, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
+
+ {RSP_STRING, 120, 120, -1, 121, -1, {ACT_SETVER} },
+
+ {EV_TIMEOUT, 120, 121, -1, 0, 0, {ACT_FAILVER,
+ ACT_INIT} },
+ {RSP_ERROR, 120, 121, -1, 0, 0, {ACT_FAILVER,
+ ACT_INIT} },
+ {RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER,
+ ACT_INIT} },
+ {RSP_NONE, 121, 121, -1, 120, 0, {ACT_GETSTRING} },
+
+/* leave dle mode */
+ {RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
+ {RSP_OK, 201, 201, -1, 202, -1},
+ {RSP_ZDLE, 202, 202, 0, 0, 0, {ACT_DLE0} },
+ {RSP_NODEV, 200, 249, -1, 0, 0, {ACT_FAKEDLE0} },
+ {RSP_ERROR, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
+ {EV_TIMEOUT, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
+
+/* enter dle mode */
+ {RSP_INIT, 0, 0, SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"},
+ {RSP_OK, 251, 251, -1, 252, -1},
+ {RSP_ZDLE, 252, 252, 1, 0, 0, {ACT_DLE1} },
+ {RSP_ERROR, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
+ {EV_TIMEOUT, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
+
+/* incoming call */
+ {RSP_RING, -1, -1, -1, -1, -1, {ACT_RING} },
+
+/* get cid */
+ {RSP_INIT, 0, 0, SEQ_CID, 301, 5, {0}, "^SGCI?\r"},
+ {RSP_OK, 301, 301, -1, 302, -1},
+ {RSP_ZGCI, 302, 302, -1, 0, 0, {ACT_CID} },
+ {RSP_ERROR, 301, 349, -1, 0, 0, {ACT_FAILCID} },
+ {EV_TIMEOUT, 301, 349, -1, 0, 0, {ACT_FAILCID} },
+
+/* enter cid mode */
+ {RSP_INIT, 0, 0, SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"},
+ {RSP_OK, 150, 150, -1, 0, 0, {ACT_CMODESET} },
+ {RSP_ERROR, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
+ {EV_TIMEOUT, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
+
+/* leave cid mode */
+ {RSP_INIT, 0, 0, SEQ_UMMODE, 160, 5, {0}, "Z\r"},
+ {RSP_OK, 160, 160, -1, 0, 0, {ACT_UMODESET} },
+ {RSP_ERROR, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
+ {EV_TIMEOUT, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
+
+/* abort getting cid */
+ {RSP_INIT, 0, 0, SEQ_NOCID, 0, 0, {ACT_ABORTCID} },
+
+/* reset */
+ {RSP_INIT, 0, 0, SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"},
+ {RSP_OK, 504, 504, -1, 0, 0, {ACT_SDOWN} },
+ {RSP_ERROR, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
+ {EV_TIMEOUT, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
+ {RSP_NODEV, 501, 599, -1, 0, 0, {ACT_FAKESDOWN} },
+
+ {EV_PROC_CIDMODE, -1, -1, -1, -1, -1, {ACT_PROC_CIDMODE} },
+ {EV_IF_LOCK, -1, -1, -1, -1, -1, {ACT_IF_LOCK} },
+ {EV_IF_VER, -1, -1, -1, -1, -1, {ACT_IF_VER} },
+ {EV_START, -1, -1, -1, -1, -1, {ACT_START} },
+ {EV_STOP, -1, -1, -1, -1, -1, {ACT_STOP} },
+ {EV_SHUTDOWN, -1, -1, -1, -1, -1, {ACT_SHUTDOWN} },
+
+/* misc. */
+ {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} },
+ {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
+ {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
+ {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
+ {RSP_LAST}
+};
+
+/* 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring,
+ * 400: hup, 750: accepted icall */
+struct reply_t gigaset_tab_cid[] =
+{
+/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
+ * action, command */
+
+/* dial */
+ {EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} },
+ {RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD + AT_BC} },
+ {RSP_OK, 601, 601, -1, 603, 5, {ACT_CMD + AT_PROTO} },
+ {RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD + AT_TYPE} },
+ {RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD + AT_MSN} },
+ {RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} },
+ {RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} },
+ {RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} },
+ {RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} },
+ {RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"},
+ {RSP_OK, 608, 608, -1, 609, -1},
+ {RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD + AT_DIAL} },
+ {RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} },
+
+ {RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
+ {EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
+
+/* optional dialing responses */
+ {EV_BC_OPEN, 650, 650, -1, 651, -1},
+ {RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} },
+ {RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} },
+ {RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} },
+ {RSP_ZSAU, 650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} },
+
+/* connect */
+ {RSP_ZSAU, 650, 650, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
+ {RSP_ZSAU, 651, 651, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
+ ACT_NOTIFY_BC_UP} },
+ {RSP_ZSAU, 750, 750, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
+ {RSP_ZSAU, 751, 751, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
+ ACT_NOTIFY_BC_UP} },
+ {EV_BC_OPEN, 800, 800, -1, 800, -1, {ACT_NOTIFY_BC_UP} },
+
+/* remote hangup */
+ {RSP_ZSAU, 650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} },
+ {RSP_ZSAU, 750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
+ {RSP_ZSAU, 800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
+
+/* hangup */
+ {EV_HUP, -1, -1, -1, -1, -1, {ACT_HUP} },
+ {RSP_INIT, -1, -1, SEQ_HUP, 401, 5, {0}, "+VLS=0\r"},
+ {RSP_OK, 401, 401, -1, 402, 5},
+ {RSP_ZVLS, 402, 402, 0, 403, 5},
+ {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
+ {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} },
+ {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} },
+ {RSP_ERROR, 401, 401, -1, 0, 0, {ACT_ABORTHUP} },
+ {EV_TIMEOUT, 401, 403, -1, 0, 0, {ACT_ABORTHUP} },
+
+ {EV_BC_CLOSED, 0, 0, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
+
+/* ring */
+ {RSP_ZBC, 700, 700, -1, -1, -1, {0} },
+ {RSP_ZHLC, 700, 700, -1, -1, -1, {0} },
+ {RSP_NMBR, 700, 700, -1, -1, -1, {0} },
+ {RSP_ZCPN, 700, 700, -1, -1, -1, {0} },
+ {RSP_ZCTP, 700, 700, -1, -1, -1, {0} },
+ {EV_TIMEOUT, 700, 700, -1, 720, 720, {ACT_ICALL} },
+ {EV_BC_CLOSED, 720, 720, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
+
+/*accept icall*/
+ {EV_ACCEPT, -1, -1, -1, -1, -1, {ACT_ACCEPT} },
+ {RSP_INIT, 720, 720, SEQ_ACCEPT, 721, 5, {ACT_CMD + AT_PROTO} },
+ {RSP_OK, 721, 721, -1, 722, 5, {ACT_CMD + AT_ISO} },
+ {RSP_OK, 722, 722, -1, 723, 5, {0}, "+VLS=17\r"},
+ {RSP_OK, 723, 723, -1, 724, 5, {0} },
+ {RSP_ZVLS, 724, 724, 17, 750, 50, {ACT_ACCEPTED} },
+ {RSP_ERROR, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
+ {EV_TIMEOUT, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
+ {RSP_ZSAU, 700, 729, ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT} },
+ {RSP_ZSAU, 700, 729, ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT} },
+ {RSP_ZSAU, 700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} },
+
+ {EV_BC_OPEN, 750, 750, -1, 751, -1},
+ {EV_TIMEOUT, 750, 751, -1, 0, 0, {ACT_CONNTIMEOUT} },
+
+/* B channel closed (general case) */
+ {EV_BC_CLOSED, -1, -1, -1, -1, -1, {ACT_NOTIFY_BC_DOWN} },
+
+/* misc. */
+ {RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} },
+ {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
+ {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
+ {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
+ {RSP_LAST}
+};
+
+
+static const struct resp_type_t {
+ char *response;
+ int resp_code;
+ int type;
+}
+resp_type[] =
+{
+ {"OK", RSP_OK, RT_NOTHING},
+ {"ERROR", RSP_ERROR, RT_NOTHING},
+ {"ZSAU", RSP_ZSAU, RT_ZSAU},
+ {"ZCAU", RSP_ZCAU, RT_ZCAU},
+ {"RING", RSP_RING, RT_RING},
+ {"ZGCI", RSP_ZGCI, RT_NUMBER},
+ {"ZVLS", RSP_ZVLS, RT_NUMBER},
+ {"ZCTP", RSP_ZCTP, RT_NUMBER},
+ {"ZDLE", RSP_ZDLE, RT_NUMBER},
+ {"ZHLC", RSP_ZHLC, RT_STRING},
+ {"ZBC", RSP_ZBC, RT_STRING},
+ {"NMBR", RSP_NMBR, RT_STRING},
+ {"ZCPN", RSP_ZCPN, RT_STRING},
+ {"ZCON", RSP_ZCON, RT_STRING},
+ {NULL, 0, 0}
+};
+
+static const struct zsau_resp_t {
+ char *str;
+ int code;
+}
+zsau_resp[] =
+{
+ {"OUTGOING_CALL_PROCEEDING", ZSAU_PROCEEDING},
+ {"CALL_DELIVERED", ZSAU_CALL_DELIVERED},
+ {"ACTIVE", ZSAU_ACTIVE},
+ {"DISCONNECT_IND", ZSAU_DISCONNECT_IND},
+ {"NULL", ZSAU_NULL},
+ {"DISCONNECT_REQ", ZSAU_DISCONNECT_REQ},
+ {NULL, ZSAU_UNKNOWN}
+};
+
+/* check for and remove fixed string prefix
+ * If s starts with prefix terminated by a non-alphanumeric character,
+ * return pointer to the first character after that, otherwise return NULL.
+ */
+static char *skip_prefix(char *s, const char *prefix)
+{
+ while (*prefix)
+ if (*s++ != *prefix++)
+ return NULL;
+ if (isalnum(*s))
+ return NULL;
+ return s;
+}
+
+/* queue event with CID */
+static void add_cid_event(struct cardstate *cs, int cid, int type,
+ void *ptr, int parameter)
+{
+ unsigned long flags;
+ unsigned next, tail;
+ struct event_t *event;
+
+ gig_dbg(DEBUG_EVENT, "queueing event %d for cid %d", type, cid);
+
+ spin_lock_irqsave(&cs->ev_lock, flags);
+
+ tail = cs->ev_tail;
+ next = (tail + 1) % MAX_EVENTS;
+ if (unlikely(next == cs->ev_head)) {
+ dev_err(cs->dev, "event queue full\n");
+ kfree(ptr);
+ } else {
+ event = cs->events + tail;
+ event->type = type;
+ event->cid = cid;
+ event->ptr = ptr;
+ event->arg = NULL;
+ event->parameter = parameter;
+ event->at_state = NULL;
+ cs->ev_tail = next;
+ }
+
+ spin_unlock_irqrestore(&cs->ev_lock, flags);
+}
+
+/**
+ * gigaset_handle_modem_response() - process received modem response
+ * @cs: device descriptor structure.
+ *
+ * Called by asyncdata/isocdata if a block of data received from the
+ * device must be processed as a modem command response. The data is
+ * already in the cs structure.
+ */
+void gigaset_handle_modem_response(struct cardstate *cs)
+{
+ char *eoc, *psep, *ptr;
+ const struct resp_type_t *rt;
+ const struct zsau_resp_t *zr;
+ int cid, parameter;
+ u8 type, value;
+
+ if (!cs->cbytes) {
+ /* ignore additional LFs/CRs (M10x config mode or cx100) */
+ gig_dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[0]);
+ return;
+ }
+ cs->respdata[cs->cbytes] = 0;
+
+ if (cs->at_state.getstring) {
+ /* state machine wants next line verbatim */
+ cs->at_state.getstring = 0;
+ ptr = kstrdup(cs->respdata, GFP_ATOMIC);
+ gig_dbg(DEBUG_EVENT, "string==%s", ptr ? ptr : "NULL");
+ add_cid_event(cs, 0, RSP_STRING, ptr, 0);
+ return;
+ }
+
+ /* look up response type */
+ for (rt = resp_type; rt->response; ++rt) {
+ eoc = skip_prefix(cs->respdata, rt->response);
+ if (eoc)
+ break;
+ }
+ if (!rt->response) {
+ add_cid_event(cs, 0, RSP_NONE, NULL, 0);
+ gig_dbg(DEBUG_EVENT, "unknown modem response: '%s'\n",
+ cs->respdata);
+ return;
+ }
+
+ /* check for CID */
+ psep = strrchr(cs->respdata, ';');
+ if (psep &&
+ !kstrtoint(psep + 1, 10, &cid) &&
+ cid >= 1 && cid <= 65535) {
+ /* valid CID: chop it off */
+ *psep = 0;
+ } else {
+ /* no valid CID: leave unchanged */
+ cid = 0;
+ }
+
+ gig_dbg(DEBUG_EVENT, "CMD received: %s", cs->respdata);
+ if (cid)
+ gig_dbg(DEBUG_EVENT, "CID: %d", cid);
+
+ switch (rt->type) {
+ case RT_NOTHING:
+ /* check parameter separator */
+ if (*eoc)
+ goto bad_param; /* extra parameter */
+
+ add_cid_event(cs, cid, rt->resp_code, NULL, 0);
+ break;
+
+ case RT_RING:
+ /* check parameter separator */
+ if (!*eoc)
+ eoc = NULL; /* no parameter */
+ else if (*eoc++ != ',')
+ goto bad_param;
+
+ add_cid_event(cs, 0, rt->resp_code, NULL, cid);
+
+ /* process parameters as individual responses */
+ while (eoc) {
+ /* look up parameter type */
+ psep = NULL;
+ for (rt = resp_type; rt->response; ++rt) {
+ psep = skip_prefix(eoc, rt->response);
+ if (psep)
+ break;
+ }
+
+ /* all legal parameters are of type RT_STRING */
+ if (!psep || rt->type != RT_STRING) {
+ dev_warn(cs->dev,
+ "illegal RING parameter: '%s'\n",
+ eoc);
+ return;
+ }
+
+ /* skip parameter value separator */
+ if (*psep++ != '=')
+ goto bad_param;
+
+ /* look up end of parameter */
+ eoc = strchr(psep, ',');
+ if (eoc)
+ *eoc++ = 0;
+
+ /* retrieve parameter value */
+ ptr = kstrdup(psep, GFP_ATOMIC);
+
+ /* queue event */
+ add_cid_event(cs, cid, rt->resp_code, ptr, 0);
+ }
+ break;
+
+ case RT_ZSAU:
+ /* check parameter separator */
+ if (!*eoc) {
+ /* no parameter */
+ add_cid_event(cs, cid, rt->resp_code, NULL, ZSAU_NONE);
+ break;
+ }
+ if (*eoc++ != '=')
+ goto bad_param;
+
+ /* look up parameter value */
+ for (zr = zsau_resp; zr->str; ++zr)
+ if (!strcmp(eoc, zr->str))
+ break;
+ if (!zr->str)
+ goto bad_param;
+
+ add_cid_event(cs, cid, rt->resp_code, NULL, zr->code);
+ break;
+
+ case RT_STRING:
+ /* check parameter separator */
+ if (*eoc++ != '=')
+ goto bad_param;
+
+ /* retrieve parameter value */
+ ptr = kstrdup(eoc, GFP_ATOMIC);
+
+ /* queue event */
+ add_cid_event(cs, cid, rt->resp_code, ptr, 0);
+ break;
+
+ case RT_ZCAU:
+ /* check parameter separators */
+ if (*eoc++ != '=')
+ goto bad_param;
+ psep = strchr(eoc, ',');
+ if (!psep)
+ goto bad_param;
+ *psep++ = 0;
+
+ /* decode parameter values */
+ if (kstrtou8(eoc, 16, &type) || kstrtou8(psep, 16, &value)) {
+ *--psep = ',';
+ goto bad_param;
+ }
+ parameter = (type << 8) | value;
+
+ add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
+ break;
+
+ case RT_NUMBER:
+ /* check parameter separator */
+ if (*eoc++ != '=')
+ goto bad_param;
+
+ /* decode parameter value */
+ if (kstrtoint(eoc, 10, &parameter))
+ goto bad_param;
+
+ /* special case ZDLE: set flag before queueing event */
+ if (rt->resp_code == RSP_ZDLE)
+ cs->dle = parameter;
+
+ add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
+ break;
+
+bad_param:
+ /* parameter unexpected, incomplete or malformed */
+ dev_warn(cs->dev, "bad parameter in response '%s'\n",
+ cs->respdata);
+ add_cid_event(cs, cid, rt->resp_code, NULL, -1);
+ break;
+
+ default:
+ dev_err(cs->dev, "%s: internal error on '%s'\n",
+ __func__, cs->respdata);
+ }
+}
+EXPORT_SYMBOL_GPL(gigaset_handle_modem_response);
+
+/* disconnect_nobc
+ * process closing of connection associated with given AT state structure
+ * without B channel
+ */
+static void disconnect_nobc(struct at_state_t **at_state_p,
+ struct cardstate *cs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ ++(*at_state_p)->seq_index;
+
+ /* revert to selected idle mode */
+ if (!cs->cidmode) {
+ cs->at_state.pending_commands |= PC_UMMODE;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
+ cs->commands_pending = 1;
+ }
+
+ /* check for and deallocate temporary AT state */
+ if (!list_empty(&(*at_state_p)->list)) {
+ list_del(&(*at_state_p)->list);
+ kfree(*at_state_p);
+ *at_state_p = NULL;
+ }
+
+ spin_unlock_irqrestore(&cs->lock, flags);
+}
+
+/* disconnect_bc
+ * process closing of connection associated with given AT state structure
+ * and B channel
+ */
+static void disconnect_bc(struct at_state_t *at_state,
+ struct cardstate *cs, struct bc_state *bcs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ ++at_state->seq_index;
+
+ /* revert to selected idle mode */
+ if (!cs->cidmode) {
+ cs->at_state.pending_commands |= PC_UMMODE;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
+ cs->commands_pending = 1;
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ /* invoke hardware specific handler */
+ cs->ops->close_bchannel(bcs);
+
+ /* notify LL */
+ if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
+ bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
+ gigaset_isdn_hupD(bcs);
+ }
+}
+
+/* get_free_channel
+ * get a free AT state structure: either one of those associated with the
+ * B channels of the Gigaset device, or if none of those is available,
+ * a newly allocated one with bcs=NULL
+ * The structure should be freed by calling disconnect_nobc() after use.
+ */
+static inline struct at_state_t *get_free_channel(struct cardstate *cs,
+ int cid)
+/* cids: >0: siemens-cid
+ * 0: without cid
+ * -1: no cid assigned yet
+ */
+{
+ unsigned long flags;
+ int i;
+ struct at_state_t *ret;
+
+ for (i = 0; i < cs->channels; ++i)
+ if (gigaset_get_channel(cs->bcs + i) >= 0) {
+ ret = &cs->bcs[i].at_state;
+ ret->cid = cid;
+ return ret;
+ }
+
+ spin_lock_irqsave(&cs->lock, flags);
+ ret = kmalloc(sizeof(struct at_state_t), GFP_ATOMIC);
+ if (ret) {
+ gigaset_at_init(ret, NULL, cs, cid);
+ list_add(&ret->list, &cs->temp_at_states);
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return ret;
+}
+
+static void init_failed(struct cardstate *cs, int mode)
+{
+ int i;
+ struct at_state_t *at_state;
+
+ cs->at_state.pending_commands &= ~PC_INIT;
+ cs->mode = mode;
+ cs->mstate = MS_UNINITIALIZED;
+ gigaset_free_channels(cs);
+ for (i = 0; i < cs->channels; ++i) {
+ at_state = &cs->bcs[i].at_state;
+ if (at_state->pending_commands & PC_CID) {
+ at_state->pending_commands &= ~PC_CID;
+ at_state->pending_commands |= PC_NOCID;
+ cs->commands_pending = 1;
+ }
+ }
+}
+
+static void schedule_init(struct cardstate *cs, int state)
+{
+ if (cs->at_state.pending_commands & PC_INIT) {
+ gig_dbg(DEBUG_EVENT, "not scheduling PC_INIT again");
+ return;
+ }
+ cs->mstate = state;
+ cs->mode = M_UNKNOWN;
+ gigaset_block_channels(cs);
+ cs->at_state.pending_commands |= PC_INIT;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_INIT");
+ cs->commands_pending = 1;
+}
+
+/* send an AT command
+ * adding the "AT" prefix, cid and DLE encapsulation as appropriate
+ */
+static void send_command(struct cardstate *cs, const char *cmd,
+ struct at_state_t *at_state)
+{
+ int cid = at_state->cid;
+ struct cmdbuf_t *cb;
+ size_t buflen;
+
+ buflen = strlen(cmd) + 12; /* DLE ( A T 1 2 3 4 5 <cmd> DLE ) \0 */
+ cb = kmalloc(sizeof(struct cmdbuf_t) + buflen, GFP_ATOMIC);
+ if (!cb) {
+ dev_err(cs->dev, "%s: out of memory\n", __func__);
+ return;
+ }
+ if (cid > 0 && cid <= 65535)
+ cb->len = snprintf(cb->buf, buflen,
+ cs->dle ? "\020(AT%d%s\020)" : "AT%d%s",
+ cid, cmd);
+ else
+ cb->len = snprintf(cb->buf, buflen,
+ cs->dle ? "\020(AT%s\020)" : "AT%s",
+ cmd);
+ cb->offset = 0;
+ cb->next = NULL;
+ cb->wake_tasklet = NULL;
+ cs->ops->write_cmd(cs, cb);
+}
+
+static struct at_state_t *at_state_from_cid(struct cardstate *cs, int cid)
+{
+ struct at_state_t *at_state;
+ int i;
+ unsigned long flags;
+
+ if (cid == 0)
+ return &cs->at_state;
+
+ for (i = 0; i < cs->channels; ++i)
+ if (cid == cs->bcs[i].at_state.cid)
+ return &cs->bcs[i].at_state;
+
+ spin_lock_irqsave(&cs->lock, flags);
+
+ list_for_each_entry(at_state, &cs->temp_at_states, list)
+ if (cid == at_state->cid) {
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return at_state;
+ }
+
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ return NULL;
+}
+
+static void bchannel_down(struct bc_state *bcs)
+{
+ if (bcs->chstate & CHS_B_UP) {
+ bcs->chstate &= ~CHS_B_UP;
+ gigaset_isdn_hupB(bcs);
+ }
+
+ if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
+ bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
+ gigaset_isdn_hupD(bcs);
+ }
+
+ gigaset_free_channel(bcs);
+
+ gigaset_bcs_reinit(bcs);
+}
+
+static void bchannel_up(struct bc_state *bcs)
+{
+ if (bcs->chstate & CHS_B_UP) {
+ dev_notice(bcs->cs->dev, "%s: B channel already up\n",
+ __func__);
+ return;
+ }
+
+ bcs->chstate |= CHS_B_UP;
+ gigaset_isdn_connB(bcs);
+}
+
+static void start_dial(struct at_state_t *at_state, void *data,
+ unsigned seq_index)
+{
+ struct bc_state *bcs = at_state->bcs;
+ struct cardstate *cs = at_state->cs;
+ char **commands = data;
+ unsigned long flags;
+ int i;
+
+ bcs->chstate |= CHS_NOTIFY_LL;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if (at_state->seq_index != seq_index) {
+ spin_unlock_irqrestore(&cs->lock, flags);
+ goto error;
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ for (i = 0; i < AT_NUM; ++i) {
+ kfree(bcs->commands[i]);
+ bcs->commands[i] = commands[i];
+ }
+
+ at_state->pending_commands |= PC_CID;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_CID");
+ cs->commands_pending = 1;
+ return;
+
+error:
+ for (i = 0; i < AT_NUM; ++i) {
+ kfree(commands[i]);
+ commands[i] = NULL;
+ }
+ at_state->pending_commands |= PC_NOCID;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_NOCID");
+ cs->commands_pending = 1;
+ return;
+}
+
+static void start_accept(struct at_state_t *at_state)
+{
+ struct cardstate *cs = at_state->cs;
+ struct bc_state *bcs = at_state->bcs;
+ int i;
+
+ for (i = 0; i < AT_NUM; ++i) {
+ kfree(bcs->commands[i]);
+ bcs->commands[i] = NULL;
+ }
+
+ bcs->commands[AT_PROTO] = kmalloc(9, GFP_ATOMIC);
+ bcs->commands[AT_ISO] = kmalloc(9, GFP_ATOMIC);
+ if (!bcs->commands[AT_PROTO] || !bcs->commands[AT_ISO]) {
+ dev_err(at_state->cs->dev, "out of memory\n");
+ /* error reset */
+ at_state->pending_commands |= PC_HUP;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_HUP");
+ cs->commands_pending = 1;
+ return;
+ }
+
+ snprintf(bcs->commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
+ snprintf(bcs->commands[AT_ISO], 9, "^SISO=%u\r", bcs->channel + 1);
+
+ at_state->pending_commands |= PC_ACCEPT;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_ACCEPT");
+ cs->commands_pending = 1;
+}
+
+static void do_start(struct cardstate *cs)
+{
+ gigaset_free_channels(cs);
+
+ if (cs->mstate != MS_LOCKED)
+ schedule_init(cs, MS_INIT);
+
+ cs->isdn_up = 1;
+ gigaset_isdn_start(cs);
+
+ cs->waiting = 0;
+ wake_up(&cs->waitqueue);
+}
+
+static void finish_shutdown(struct cardstate *cs)
+{
+ if (cs->mstate != MS_LOCKED) {
+ cs->mstate = MS_UNINITIALIZED;
+ cs->mode = M_UNKNOWN;
+ }
+
+ /* Tell the LL that the device is not available .. */
+ if (cs->isdn_up) {
+ cs->isdn_up = 0;
+ gigaset_isdn_stop(cs);
+ }
+
+ /* The rest is done by cleanup_cs() in process context. */
+
+ cs->cmd_result = -ENODEV;
+ cs->waiting = 0;
+ wake_up(&cs->waitqueue);
+}
+
+static void do_shutdown(struct cardstate *cs)
+{
+ gigaset_block_channels(cs);
+
+ if (cs->mstate == MS_READY) {
+ cs->mstate = MS_SHUTDOWN;
+ cs->at_state.pending_commands |= PC_SHUTDOWN;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_SHUTDOWN");
+ cs->commands_pending = 1;
+ } else
+ finish_shutdown(cs);
+}
+
+static void do_stop(struct cardstate *cs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ cs->connected = 0;
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ do_shutdown(cs);
+}
+
+/* Entering cid mode or getting a cid failed:
+ * try to initialize the device and try again.
+ *
+ * channel >= 0: getting cid for the channel failed
+ * channel < 0: entering cid mode failed
+ *
+ * returns 0 on success, <0 on failure
+ */
+static int reinit_and_retry(struct cardstate *cs, int channel)
+{
+ int i;
+
+ if (--cs->retry_count <= 0)
+ return -EFAULT;
+
+ for (i = 0; i < cs->channels; ++i)
+ if (cs->bcs[i].at_state.cid > 0)
+ return -EBUSY;
+
+ if (channel < 0)
+ dev_warn(cs->dev,
+ "Could not enter cid mode. Reinit device and try again.\n");
+ else {
+ dev_warn(cs->dev,
+ "Could not get a call id. Reinit device and try again.\n");
+ cs->bcs[channel].at_state.pending_commands |= PC_CID;
+ }
+ schedule_init(cs, MS_INIT);
+ return 0;
+}
+
+static int at_state_invalid(struct cardstate *cs,
+ struct at_state_t *test_ptr)
+{
+ unsigned long flags;
+ unsigned channel;
+ struct at_state_t *at_state;
+ int retval = 0;
+
+ spin_lock_irqsave(&cs->lock, flags);
+
+ if (test_ptr == &cs->at_state)
+ goto exit;
+
+ list_for_each_entry(at_state, &cs->temp_at_states, list)
+ if (at_state == test_ptr)
+ goto exit;
+
+ for (channel = 0; channel < cs->channels; ++channel)
+ if (&cs->bcs[channel].at_state == test_ptr)
+ goto exit;
+
+ retval = 1;
+exit:
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return retval;
+}
+
+static void handle_icall(struct cardstate *cs, struct bc_state *bcs,
+ struct at_state_t *at_state)
+{
+ int retval;
+
+ retval = gigaset_isdn_icall(at_state);
+ switch (retval) {
+ case ICALL_ACCEPT:
+ break;
+ default:
+ dev_err(cs->dev, "internal error: disposition=%d\n", retval);
+ /* fall through */
+ case ICALL_IGNORE:
+ case ICALL_REJECT:
+ /* hang up actively
+ * Device doc says that would reject the call.
+ * In fact it doesn't.
+ */
+ at_state->pending_commands |= PC_HUP;
+ cs->commands_pending = 1;
+ break;
+ }
+}
+
+static int do_lock(struct cardstate *cs)
+{
+ int mode;
+ int i;
+
+ switch (cs->mstate) {
+ case MS_UNINITIALIZED:
+ case MS_READY:
+ if (cs->cur_at_seq || !list_empty(&cs->temp_at_states) ||
+ cs->at_state.pending_commands)
+ return -EBUSY;
+
+ for (i = 0; i < cs->channels; ++i)
+ if (cs->bcs[i].at_state.pending_commands)
+ return -EBUSY;
+
+ if (gigaset_get_channels(cs) < 0)
+ return -EBUSY;
+
+ break;
+ case MS_LOCKED:
+ break;
+ default:
+ return -EBUSY;
+ }
+
+ mode = cs->mode;
+ cs->mstate = MS_LOCKED;
+ cs->mode = M_UNKNOWN;
+
+ return mode;
+}
+
+static int do_unlock(struct cardstate *cs)
+{
+ if (cs->mstate != MS_LOCKED)
+ return -EINVAL;
+
+ cs->mstate = MS_UNINITIALIZED;
+ cs->mode = M_UNKNOWN;
+ gigaset_free_channels(cs);
+ if (cs->connected)
+ schedule_init(cs, MS_INIT);
+
+ return 0;
+}
+
+static void do_action(int action, struct cardstate *cs,
+ struct bc_state *bcs,
+ struct at_state_t **p_at_state, char **pp_command,
+ int *p_genresp, int *p_resp_code,
+ struct event_t *ev)
+{
+ struct at_state_t *at_state = *p_at_state;
+ struct bc_state *bcs2;
+ unsigned long flags;
+
+ int channel;
+
+ unsigned char *s, *e;
+ int i;
+ unsigned long val;
+
+ switch (action) {
+ case ACT_NOTHING:
+ break;
+ case ACT_TIMEOUT:
+ at_state->waiting = 1;
+ break;
+ case ACT_INIT:
+ cs->at_state.pending_commands &= ~PC_INIT;
+ cs->cur_at_seq = SEQ_NONE;
+ cs->mode = M_UNIMODEM;
+ spin_lock_irqsave(&cs->lock, flags);
+ if (!cs->cidmode) {
+ spin_unlock_irqrestore(&cs->lock, flags);
+ gigaset_free_channels(cs);
+ cs->mstate = MS_READY;
+ break;
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+ cs->at_state.pending_commands |= PC_CIDMODE;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
+ cs->commands_pending = 1;
+ break;
+ case ACT_FAILINIT:
+ dev_warn(cs->dev, "Could not initialize the device.\n");
+ cs->dle = 0;
+ init_failed(cs, M_UNKNOWN);
+ cs->cur_at_seq = SEQ_NONE;
+ break;
+ case ACT_CONFIGMODE:
+ init_failed(cs, M_CONFIG);
+ cs->cur_at_seq = SEQ_NONE;
+ break;
+ case ACT_SETDLE1:
+ cs->dle = 1;
+ /* cs->inbuf[0].inputstate |= INS_command | INS_DLE_command; */
+ cs->inbuf[0].inputstate &=
+ ~(INS_command | INS_DLE_command);
+ break;
+ case ACT_SETDLE0:
+ cs->dle = 0;
+ cs->inbuf[0].inputstate =
+ (cs->inbuf[0].inputstate & ~INS_DLE_command)
+ | INS_command;
+ break;
+ case ACT_CMODESET:
+ if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
+ gigaset_free_channels(cs);
+ cs->mstate = MS_READY;
+ }
+ cs->mode = M_CID;
+ cs->cur_at_seq = SEQ_NONE;
+ break;
+ case ACT_UMODESET:
+ cs->mode = M_UNIMODEM;
+ cs->cur_at_seq = SEQ_NONE;
+ break;
+ case ACT_FAILCMODE:
+ cs->cur_at_seq = SEQ_NONE;
+ if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
+ init_failed(cs, M_UNKNOWN);
+ break;
+ }
+ if (reinit_and_retry(cs, -1) < 0)
+ schedule_init(cs, MS_RECOVER);
+ break;
+ case ACT_FAILUMODE:
+ cs->cur_at_seq = SEQ_NONE;
+ schedule_init(cs, MS_RECOVER);
+ break;
+ case ACT_HUPMODEM:
+ /* send "+++" (hangup in unimodem mode) */
+ if (cs->connected) {
+ struct cmdbuf_t *cb;
+
+ cb = kmalloc(sizeof(struct cmdbuf_t) + 3, GFP_ATOMIC);
+ if (!cb) {
+ dev_err(cs->dev, "%s: out of memory\n",
+ __func__);
+ return;
+ }
+ memcpy(cb->buf, "+++", 3);
+ cb->len = 3;
+ cb->offset = 0;
+ cb->next = NULL;
+ cb->wake_tasklet = NULL;
+ cs->ops->write_cmd(cs, cb);
+ }
+ break;
+ case ACT_RING:
+ /* get fresh AT state structure for new CID */
+ at_state = get_free_channel(cs, ev->parameter);
+ if (!at_state) {
+ dev_warn(cs->dev,
+ "RING ignored: could not allocate channel structure\n");
+ break;
+ }
+
+ /* initialize AT state structure
+ * note that bcs may be NULL if no B channel is free
+ */
+ at_state->ConState = 700;
+ for (i = 0; i < STR_NUM; ++i) {
+ kfree(at_state->str_var[i]);
+ at_state->str_var[i] = NULL;
+ }
+ at_state->int_var[VAR_ZCTP] = -1;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ at_state->timer_expires = RING_TIMEOUT;
+ at_state->timer_active = 1;
+ spin_unlock_irqrestore(&cs->lock, flags);
+ break;
+ case ACT_ICALL:
+ handle_icall(cs, bcs, at_state);
+ break;
+ case ACT_FAILSDOWN:
+ dev_warn(cs->dev, "Could not shut down the device.\n");
+ /* fall through */
+ case ACT_FAKESDOWN:
+ case ACT_SDOWN:
+ cs->cur_at_seq = SEQ_NONE;
+ finish_shutdown(cs);
+ break;
+ case ACT_CONNECT:
+ if (cs->onechannel) {
+ at_state->pending_commands |= PC_DLE1;
+ cs->commands_pending = 1;
+ break;
+ }
+ bcs->chstate |= CHS_D_UP;
+ gigaset_isdn_connD(bcs);
+ cs->ops->init_bchannel(bcs);
+ break;
+ case ACT_DLE1:
+ cs->cur_at_seq = SEQ_NONE;
+ bcs = cs->bcs + cs->curchannel;
+
+ bcs->chstate |= CHS_D_UP;
+ gigaset_isdn_connD(bcs);
+ cs->ops->init_bchannel(bcs);
+ break;
+ case ACT_FAKEHUP:
+ at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
+ /* fall through */
+ case ACT_DISCONNECT:
+ cs->cur_at_seq = SEQ_NONE;
+ at_state->cid = -1;
+ if (!bcs) {
+ disconnect_nobc(p_at_state, cs);
+ } else if (cs->onechannel && cs->dle) {
+ /* Check for other open channels not needed:
+ * DLE only used for M10x with one B channel.
+ */
+ at_state->pending_commands |= PC_DLE0;
+ cs->commands_pending = 1;
+ } else {
+ disconnect_bc(at_state, cs, bcs);
+ }
+ break;
+ case ACT_FAKEDLE0:
+ at_state->int_var[VAR_ZDLE] = 0;
+ cs->dle = 0;
+ /* fall through */
+ case ACT_DLE0:
+ cs->cur_at_seq = SEQ_NONE;
+ bcs2 = cs->bcs + cs->curchannel;
+ disconnect_bc(&bcs2->at_state, cs, bcs2);
+ break;
+ case ACT_ABORTHUP:
+ cs->cur_at_seq = SEQ_NONE;
+ dev_warn(cs->dev, "Could not hang up.\n");
+ at_state->cid = -1;
+ if (!bcs)
+ disconnect_nobc(p_at_state, cs);
+ else if (cs->onechannel)
+ at_state->pending_commands |= PC_DLE0;
+ else
+ disconnect_bc(at_state, cs, bcs);
+ schedule_init(cs, MS_RECOVER);
+ break;
+ case ACT_FAILDLE0:
+ cs->cur_at_seq = SEQ_NONE;
+ dev_warn(cs->dev, "Error leaving DLE mode.\n");
+ cs->dle = 0;
+ bcs2 = cs->bcs + cs->curchannel;
+ disconnect_bc(&bcs2->at_state, cs, bcs2);
+ schedule_init(cs, MS_RECOVER);
+ break;
+ case ACT_FAILDLE1:
+ cs->cur_at_seq = SEQ_NONE;
+ dev_warn(cs->dev,
+ "Could not enter DLE mode. Trying to hang up.\n");
+ channel = cs->curchannel;
+ cs->bcs[channel].at_state.pending_commands |= PC_HUP;
+ cs->commands_pending = 1;
+ break;
+
+ case ACT_CID: /* got cid; start dialing */
+ cs->cur_at_seq = SEQ_NONE;
+ channel = cs->curchannel;
+ if (ev->parameter > 0 && ev->parameter <= 65535) {
+ cs->bcs[channel].at_state.cid = ev->parameter;
+ cs->bcs[channel].at_state.pending_commands |=
+ PC_DIAL;
+ cs->commands_pending = 1;
+ break;
+ }
+ /* fall through - bad cid */
+ case ACT_FAILCID:
+ cs->cur_at_seq = SEQ_NONE;
+ channel = cs->curchannel;
+ if (reinit_and_retry(cs, channel) < 0) {
+ dev_warn(cs->dev,
+ "Could not get a call ID. Cannot dial.\n");
+ bcs2 = cs->bcs + channel;
+ disconnect_bc(&bcs2->at_state, cs, bcs2);
+ }
+ break;
+ case ACT_ABORTCID:
+ cs->cur_at_seq = SEQ_NONE;
+ bcs2 = cs->bcs + cs->curchannel;
+ disconnect_bc(&bcs2->at_state, cs, bcs2);
+ break;
+
+ case ACT_DIALING:
+ case ACT_ACCEPTED:
+ cs->cur_at_seq = SEQ_NONE;
+ break;
+
+ case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL procssng */
+ if (bcs)
+ disconnect_bc(at_state, cs, bcs);
+ else
+ disconnect_nobc(p_at_state, cs);
+ break;
+
+ case ACT_ABORTDIAL: /* error/timeout during dial preparation */
+ cs->cur_at_seq = SEQ_NONE;
+ at_state->pending_commands |= PC_HUP;
+ cs->commands_pending = 1;
+ break;
+
+ case ACT_REMOTEREJECT: /* DISCONNECT_IND after dialling */
+ case ACT_CONNTIMEOUT: /* timeout waiting for ZSAU=ACTIVE */
+ case ACT_REMOTEHUP: /* DISCONNECT_IND with established connection */
+ at_state->pending_commands |= PC_HUP;
+ cs->commands_pending = 1;
+ break;
+ case ACT_GETSTRING: /* warning: RING, ZDLE, ...
+ are not handled properly anymore */
+ at_state->getstring = 1;
+ break;
+ case ACT_SETVER:
+ if (!ev->ptr) {
+ *p_genresp = 1;
+ *p_resp_code = RSP_ERROR;
+ break;
+ }
+ s = ev->ptr;
+
+ if (!strcmp(s, "OK")) {
+ /* OK without version string: assume old response */
+ *p_genresp = 1;
+ *p_resp_code = RSP_NONE;
+ break;
+ }
+
+ for (i = 0; i < 4; ++i) {
+ val = simple_strtoul(s, (char **) &e, 10);
+ if (val > INT_MAX || e == s)
+ break;
+ if (i == 3) {
+ if (*e)
+ break;
+ } else if (*e != '.')
+ break;
+ else
+ s = e + 1;
+ cs->fwver[i] = val;
+ }
+ if (i != 4) {
+ *p_genresp = 1;
+ *p_resp_code = RSP_ERROR;
+ break;
+ }
+ cs->gotfwver = 0;
+ break;
+ case ACT_GOTVER:
+ if (cs->gotfwver == 0) {
+ cs->gotfwver = 1;
+ gig_dbg(DEBUG_EVENT,
+ "firmware version %02d.%03d.%02d.%02d",
+ cs->fwver[0], cs->fwver[1],
+ cs->fwver[2], cs->fwver[3]);
+ break;
+ }
+ /* fall through */
+ case ACT_FAILVER:
+ cs->gotfwver = -1;
+ dev_err(cs->dev, "could not read firmware version.\n");
+ break;
+ case ACT_ERROR:
+ gig_dbg(DEBUG_ANY, "%s: ERROR response in ConState %d",
+ __func__, at_state->ConState);
+ cs->cur_at_seq = SEQ_NONE;
+ break;
+ case ACT_DEBUG:
+ gig_dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d",
+ __func__, ev->type, at_state->ConState);
+ break;
+ case ACT_WARN:
+ dev_warn(cs->dev, "%s: resp_code %d in ConState %d!\n",
+ __func__, ev->type, at_state->ConState);
+ break;
+ case ACT_ZCAU:
+ dev_warn(cs->dev, "cause code %04x in connection state %d.\n",
+ ev->parameter, at_state->ConState);
+ break;
+
+ /* events from the LL */
+
+ case ACT_DIAL:
+ if (!ev->ptr) {
+ *p_genresp = 1;
+ *p_resp_code = RSP_ERROR;
+ break;
+ }
+ start_dial(at_state, ev->ptr, ev->parameter);
+ break;
+ case ACT_ACCEPT:
+ start_accept(at_state);
+ break;
+ case ACT_HUP:
+ at_state->pending_commands |= PC_HUP;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_HUP");
+ cs->commands_pending = 1;
+ break;
+
+ /* hotplug events */
+
+ case ACT_STOP:
+ do_stop(cs);
+ break;
+ case ACT_START:
+ do_start(cs);
+ break;
+
+ /* events from the interface */
+
+ case ACT_IF_LOCK:
+ cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
+ cs->waiting = 0;
+ wake_up(&cs->waitqueue);
+ break;
+ case ACT_IF_VER:
+ if (ev->parameter != 0)
+ cs->cmd_result = -EINVAL;
+ else if (cs->gotfwver != 1) {
+ cs->cmd_result = -ENOENT;
+ } else {
+ memcpy(ev->arg, cs->fwver, sizeof cs->fwver);
+ cs->cmd_result = 0;
+ }
+ cs->waiting = 0;
+ wake_up(&cs->waitqueue);
+ break;
+
+ /* events from the proc file system */
+
+ case ACT_PROC_CIDMODE:
+ spin_lock_irqsave(&cs->lock, flags);
+ if (ev->parameter != cs->cidmode) {
+ cs->cidmode = ev->parameter;
+ if (ev->parameter) {
+ cs->at_state.pending_commands |= PC_CIDMODE;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
+ } else {
+ cs->at_state.pending_commands |= PC_UMMODE;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
+ }
+ cs->commands_pending = 1;
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+ cs->waiting = 0;
+ wake_up(&cs->waitqueue);
+ break;
+
+ /* events from the hardware drivers */
+
+ case ACT_NOTIFY_BC_DOWN:
+ bchannel_down(bcs);
+ break;
+ case ACT_NOTIFY_BC_UP:
+ bchannel_up(bcs);
+ break;
+ case ACT_SHUTDOWN:
+ do_shutdown(cs);
+ break;
+
+
+ default:
+ if (action >= ACT_CMD && action < ACT_CMD + AT_NUM) {
+ *pp_command = at_state->bcs->commands[action - ACT_CMD];
+ if (!*pp_command) {
+ *p_genresp = 1;
+ *p_resp_code = RSP_NULL;
+ }
+ } else
+ dev_err(cs->dev, "%s: action==%d!\n", __func__, action);
+ }
+}
+
+/* State machine to do the calling and hangup procedure */
+static void process_event(struct cardstate *cs, struct event_t *ev)
+{
+ struct bc_state *bcs;
+ char *p_command = NULL;
+ struct reply_t *rep;
+ int rcode;
+ int genresp = 0;
+ int resp_code = RSP_ERROR;
+ struct at_state_t *at_state;
+ int index;
+ int curact;
+ unsigned long flags;
+
+ if (ev->cid >= 0) {
+ at_state = at_state_from_cid(cs, ev->cid);
+ if (!at_state) {
+ gig_dbg(DEBUG_EVENT, "event %d for invalid cid %d",
+ ev->type, ev->cid);
+ gigaset_add_event(cs, &cs->at_state, RSP_WRONG_CID,
+ NULL, 0, NULL);
+ return;
+ }
+ } else {
+ at_state = ev->at_state;
+ if (at_state_invalid(cs, at_state)) {
+ gig_dbg(DEBUG_EVENT, "event for invalid at_state %p",
+ at_state);
+ return;
+ }
+ }
+
+ gig_dbg(DEBUG_EVENT, "connection state %d, event %d",
+ at_state->ConState, ev->type);
+
+ bcs = at_state->bcs;
+
+ /* Setting the pointer to the dial array */
+ rep = at_state->replystruct;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if (ev->type == EV_TIMEOUT) {
+ if (ev->parameter != at_state->timer_index
+ || !at_state->timer_active) {
+ ev->type = RSP_NONE; /* old timeout */
+ gig_dbg(DEBUG_EVENT, "old timeout");
+ } else {
+ if (at_state->waiting)
+ gig_dbg(DEBUG_EVENT, "stopped waiting");
+ else
+ gig_dbg(DEBUG_EVENT, "timeout occurred");
+ }
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ /* if the response belongs to a variable in at_state->int_var[VAR_XXXX]
+ or at_state->str_var[STR_XXXX], set it */
+ if (ev->type >= RSP_VAR && ev->type < RSP_VAR + VAR_NUM) {
+ index = ev->type - RSP_VAR;
+ at_state->int_var[index] = ev->parameter;
+ } else if (ev->type >= RSP_STR && ev->type < RSP_STR + STR_NUM) {
+ index = ev->type - RSP_STR;
+ kfree(at_state->str_var[index]);
+ at_state->str_var[index] = ev->ptr;
+ ev->ptr = NULL; /* prevent process_events() from
+ deallocating ptr */
+ }
+
+ if (ev->type == EV_TIMEOUT || ev->type == RSP_STRING)
+ at_state->getstring = 0;
+
+ /* Search row in dial array which matches modem response and current
+ constate */
+ for (;; rep++) {
+ rcode = rep->resp_code;
+ if (rcode == RSP_LAST) {
+ /* found nothing...*/
+ dev_warn(cs->dev, "%s: rcode=RSP_LAST: "
+ "resp_code %d in ConState %d!\n",
+ __func__, ev->type, at_state->ConState);
+ return;
+ }
+ if ((rcode == RSP_ANY || rcode == ev->type)
+ && ((int) at_state->ConState >= rep->min_ConState)
+ && (rep->max_ConState < 0
+ || (int) at_state->ConState <= rep->max_ConState)
+ && (rep->parameter < 0 || rep->parameter == ev->parameter))
+ break;
+ }
+
+ p_command = rep->command;
+
+ at_state->waiting = 0;
+ for (curact = 0; curact < MAXACT; ++curact) {
+ /* The row tells us what we should do ..
+ */
+ do_action(rep->action[curact], cs, bcs, &at_state, &p_command,
+ &genresp, &resp_code, ev);
+ if (!at_state)
+ /* at_state destroyed by disconnect */
+ return;
+ }
+
+ /* Jump to the next con-state regarding the array */
+ if (rep->new_ConState >= 0)
+ at_state->ConState = rep->new_ConState;
+
+ if (genresp) {
+ spin_lock_irqsave(&cs->lock, flags);
+ at_state->timer_expires = 0;
+ at_state->timer_active = 0;
+ spin_unlock_irqrestore(&cs->lock, flags);
+ gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL);
+ } else {
+ /* Send command to modem if not NULL... */
+ if (p_command) {
+ if (cs->connected)
+ send_command(cs, p_command, at_state);
+ else
+ gigaset_add_event(cs, at_state, RSP_NODEV,
+ NULL, 0, NULL);
+ }
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if (!rep->timeout) {
+ at_state->timer_expires = 0;
+ at_state->timer_active = 0;
+ } else if (rep->timeout > 0) { /* new timeout */
+ at_state->timer_expires = rep->timeout * 10;
+ at_state->timer_active = 1;
+ ++at_state->timer_index;
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+ }
+}
+
+static void schedule_sequence(struct cardstate *cs,
+ struct at_state_t *at_state, int sequence)
+{
+ cs->cur_at_seq = sequence;
+ gigaset_add_event(cs, at_state, RSP_INIT, NULL, sequence, NULL);
+}
+
+static void process_command_flags(struct cardstate *cs)
+{
+ struct at_state_t *at_state = NULL;
+ struct bc_state *bcs;
+ int i;
+ int sequence;
+ unsigned long flags;
+
+ cs->commands_pending = 0;
+
+ if (cs->cur_at_seq) {
+ gig_dbg(DEBUG_EVENT, "not searching scheduled commands: busy");
+ return;
+ }
+
+ gig_dbg(DEBUG_EVENT, "searching scheduled commands");
+
+ sequence = SEQ_NONE;
+
+ /* clear pending_commands and hangup channels on shutdown */
+ if (cs->at_state.pending_commands & PC_SHUTDOWN) {
+ cs->at_state.pending_commands &= ~PC_CIDMODE;
+ for (i = 0; i < cs->channels; ++i) {
+ bcs = cs->bcs + i;
+ at_state = &bcs->at_state;
+ at_state->pending_commands &=
+ ~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
+ if (at_state->cid > 0)
+ at_state->pending_commands |= PC_HUP;
+ if (at_state->pending_commands & PC_CID) {
+ at_state->pending_commands |= PC_NOCID;
+ at_state->pending_commands &= ~PC_CID;
+ }
+ }
+ }
+
+ /* clear pending_commands and hangup channels on reset */
+ if (cs->at_state.pending_commands & PC_INIT) {
+ cs->at_state.pending_commands &= ~PC_CIDMODE;
+ for (i = 0; i < cs->channels; ++i) {
+ bcs = cs->bcs + i;
+ at_state = &bcs->at_state;
+ at_state->pending_commands &=
+ ~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
+ if (at_state->cid > 0)
+ at_state->pending_commands |= PC_HUP;
+ if (cs->mstate == MS_RECOVER) {
+ if (at_state->pending_commands & PC_CID) {
+ at_state->pending_commands |= PC_NOCID;
+ at_state->pending_commands &= ~PC_CID;
+ }
+ }
+ }
+ }
+
+ /* only switch back to unimodem mode if no commands are pending and
+ * no channels are up */
+ spin_lock_irqsave(&cs->lock, flags);
+ if (cs->at_state.pending_commands == PC_UMMODE
+ && !cs->cidmode
+ && list_empty(&cs->temp_at_states)
+ && cs->mode == M_CID) {
+ sequence = SEQ_UMMODE;
+ at_state = &cs->at_state;
+ for (i = 0; i < cs->channels; ++i) {
+ bcs = cs->bcs + i;
+ if (bcs->at_state.pending_commands ||
+ bcs->at_state.cid > 0) {
+ sequence = SEQ_NONE;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+ cs->at_state.pending_commands &= ~PC_UMMODE;
+ if (sequence != SEQ_NONE) {
+ schedule_sequence(cs, at_state, sequence);
+ return;
+ }
+
+ for (i = 0; i < cs->channels; ++i) {
+ bcs = cs->bcs + i;
+ if (bcs->at_state.pending_commands & PC_HUP) {
+ if (cs->dle) {
+ cs->curchannel = bcs->channel;
+ schedule_sequence(cs, &cs->at_state, SEQ_DLE0);
+ return;
+ }
+ bcs->at_state.pending_commands &= ~PC_HUP;
+ if (bcs->at_state.pending_commands & PC_CID) {
+ /* not yet dialing: PC_NOCID is sufficient */
+ bcs->at_state.pending_commands |= PC_NOCID;
+ bcs->at_state.pending_commands &= ~PC_CID;
+ } else {
+ schedule_sequence(cs, &bcs->at_state, SEQ_HUP);
+ return;
+ }
+ }
+ if (bcs->at_state.pending_commands & PC_NOCID) {
+ bcs->at_state.pending_commands &= ~PC_NOCID;
+ cs->curchannel = bcs->channel;
+ schedule_sequence(cs, &cs->at_state, SEQ_NOCID);
+ return;
+ } else if (bcs->at_state.pending_commands & PC_DLE0) {
+ bcs->at_state.pending_commands &= ~PC_DLE0;
+ cs->curchannel = bcs->channel;
+ schedule_sequence(cs, &cs->at_state, SEQ_DLE0);
+ return;
+ }
+ }
+
+ list_for_each_entry(at_state, &cs->temp_at_states, list)
+ if (at_state->pending_commands & PC_HUP) {
+ at_state->pending_commands &= ~PC_HUP;
+ schedule_sequence(cs, at_state, SEQ_HUP);
+ return;
+ }
+
+ if (cs->at_state.pending_commands & PC_INIT) {
+ cs->at_state.pending_commands &= ~PC_INIT;
+ cs->dle = 0;
+ cs->inbuf->inputstate = INS_command;
+ schedule_sequence(cs, &cs->at_state, SEQ_INIT);
+ return;
+ }
+ if (cs->at_state.pending_commands & PC_SHUTDOWN) {
+ cs->at_state.pending_commands &= ~PC_SHUTDOWN;
+ schedule_sequence(cs, &cs->at_state, SEQ_SHUTDOWN);
+ return;
+ }
+ if (cs->at_state.pending_commands & PC_CIDMODE) {
+ cs->at_state.pending_commands &= ~PC_CIDMODE;
+ if (cs->mode == M_UNIMODEM) {
+ cs->retry_count = 1;
+ schedule_sequence(cs, &cs->at_state, SEQ_CIDMODE);
+ return;
+ }
+ }
+
+ for (i = 0; i < cs->channels; ++i) {
+ bcs = cs->bcs + i;
+ if (bcs->at_state.pending_commands & PC_DLE1) {
+ bcs->at_state.pending_commands &= ~PC_DLE1;
+ cs->curchannel = bcs->channel;
+ schedule_sequence(cs, &cs->at_state, SEQ_DLE1);
+ return;
+ }
+ if (bcs->at_state.pending_commands & PC_ACCEPT) {
+ bcs->at_state.pending_commands &= ~PC_ACCEPT;
+ schedule_sequence(cs, &bcs->at_state, SEQ_ACCEPT);
+ return;
+ }
+ if (bcs->at_state.pending_commands & PC_DIAL) {
+ bcs->at_state.pending_commands &= ~PC_DIAL;
+ schedule_sequence(cs, &bcs->at_state, SEQ_DIAL);
+ return;
+ }
+ if (bcs->at_state.pending_commands & PC_CID) {
+ switch (cs->mode) {
+ case M_UNIMODEM:
+ cs->at_state.pending_commands |= PC_CIDMODE;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
+ cs->commands_pending = 1;
+ return;
+ case M_UNKNOWN:
+ schedule_init(cs, MS_INIT);
+ return;
+ }
+ bcs->at_state.pending_commands &= ~PC_CID;
+ cs->curchannel = bcs->channel;
+ cs->retry_count = 2;
+ schedule_sequence(cs, &cs->at_state, SEQ_CID);
+ return;
+ }
+ }
+}
+
+static void process_events(struct cardstate *cs)
+{
+ struct event_t *ev;
+ unsigned head, tail;
+ int i;
+ int check_flags = 0;
+ int was_busy;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->ev_lock, flags);
+ head = cs->ev_head;
+
+ for (i = 0; i < 2 * MAX_EVENTS; ++i) {
+ tail = cs->ev_tail;
+ if (tail == head) {
+ if (!check_flags && !cs->commands_pending)
+ break;
+ check_flags = 0;
+ spin_unlock_irqrestore(&cs->ev_lock, flags);
+ process_command_flags(cs);
+ spin_lock_irqsave(&cs->ev_lock, flags);
+ tail = cs->ev_tail;
+ if (tail == head) {
+ if (!cs->commands_pending)
+ break;
+ continue;
+ }
+ }
+
+ ev = cs->events + head;
+ was_busy = cs->cur_at_seq != SEQ_NONE;
+ spin_unlock_irqrestore(&cs->ev_lock, flags);
+ process_event(cs, ev);
+ spin_lock_irqsave(&cs->ev_lock, flags);
+ kfree(ev->ptr);
+ ev->ptr = NULL;
+ if (was_busy && cs->cur_at_seq == SEQ_NONE)
+ check_flags = 1;
+
+ head = (head + 1) % MAX_EVENTS;
+ cs->ev_head = head;
+ }
+
+ spin_unlock_irqrestore(&cs->ev_lock, flags);
+
+ if (i == 2 * MAX_EVENTS) {
+ dev_err(cs->dev,
+ "infinite loop in process_events; aborting.\n");
+ }
+}
+
+/* tasklet scheduled on any event received from the Gigaset device
+ * parameter:
+ * data ISDN controller state structure
+ */
+void gigaset_handle_event(unsigned long data)
+{
+ struct cardstate *cs = (struct cardstate *) data;
+
+ /* handle incoming data on control/common channel */
+ if (cs->inbuf->head != cs->inbuf->tail) {
+ gig_dbg(DEBUG_INTR, "processing new data");
+ cs->ops->handle_input(cs->inbuf);
+ }
+
+ process_events(cs);
+}
diff --git a/drivers/staging/isdn/gigaset/gigaset.h b/drivers/staging/isdn/gigaset/gigaset.h
new file mode 100644
index 000000000000..0ecc2b5ea553
--- /dev/null
+++ b/drivers/staging/isdn/gigaset/gigaset.h
@@ -0,0 +1,827 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Siemens Gigaset 307x driver
+ * Common header file for all connection variants
+ *
+ * Written by Stefan Eilers
+ * and Hansjoerg Lipp <hjlipp@web.de>
+ *
+ * =====================================================================
+ * =====================================================================
+ */
+
+#ifndef GIGASET_H
+#define GIGASET_H
+
+/* define global prefix for pr_ macros in linux/kernel.h */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ppp_defs.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+
+#define GIG_VERSION {0, 5, 0, 0}
+#define GIG_COMPAT {0, 4, 0, 0}
+
+#define MAX_REC_PARAMS 10 /* Max. number of params in response string */
+#define MAX_RESP_SIZE 511 /* Max. size of a response string */
+
+#define MAX_EVENTS 64 /* size of event queue */
+
+#define RBUFSIZE 8192
+
+#define GIG_TICK 100 /* in milliseconds */
+
+/* timeout values (unit: 1 sec) */
+#define INIT_TIMEOUT 1
+
+/* timeout values (unit: 0.1 sec) */
+#define RING_TIMEOUT 3 /* for additional parameters to RING */
+#define BAS_TIMEOUT 20 /* for response to Base USB ops */
+#define ATRDY_TIMEOUT 3 /* for HD_READY_SEND_ATDATA */
+
+#define BAS_RETRY 3 /* max. retries for base USB ops */
+
+#define MAXACT 3
+
+extern int gigaset_debuglevel; /* "needs" cast to (enum debuglevel) */
+
+/* debug flags, combine by adding/bitwise OR */
+enum debuglevel {
+ DEBUG_INTR = 0x00008, /* interrupt processing */
+ DEBUG_CMD = 0x00020, /* sent/received LL commands */
+ DEBUG_STREAM = 0x00040, /* application data stream I/O events */
+ DEBUG_STREAM_DUMP = 0x00080, /* application data stream content */
+ DEBUG_LLDATA = 0x00100, /* sent/received LL data */
+ DEBUG_EVENT = 0x00200, /* event processing */
+ DEBUG_HDLC = 0x00800, /* M10x HDLC processing */
+ DEBUG_CHANNEL = 0x01000, /* channel allocation/deallocation */
+ DEBUG_TRANSCMD = 0x02000, /* AT-COMMANDS+RESPONSES */
+ DEBUG_MCMD = 0x04000, /* COMMANDS THAT ARE SENT VERY OFTEN */
+ DEBUG_INIT = 0x08000, /* (de)allocation+initialization of data
+ structures */
+ DEBUG_SUSPEND = 0x10000, /* suspend/resume processing */
+ DEBUG_OUTPUT = 0x20000, /* output to device */
+ DEBUG_ISO = 0x40000, /* isochronous transfers */
+ DEBUG_IF = 0x80000, /* character device operations */
+ DEBUG_USBREQ = 0x100000, /* USB communication (except payload
+ data) */
+ DEBUG_LOCKCMD = 0x200000, /* AT commands and responses when
+ MS_LOCKED */
+
+ DEBUG_ANY = 0x3fffff, /* print message if any of the others is
+ activated */
+};
+
+#ifdef CONFIG_GIGASET_DEBUG
+
+#define gig_dbg(level, format, arg...) \
+ do { \
+ if (unlikely(((enum debuglevel)gigaset_debuglevel) & (level))) \
+ printk(KERN_DEBUG KBUILD_MODNAME ": " format "\n", \
+ ## arg); \
+ } while (0)
+#define DEBUG_DEFAULT (DEBUG_TRANSCMD | DEBUG_CMD | DEBUG_USBREQ)
+
+#else
+
+#define gig_dbg(level, format, arg...) do {} while (0)
+#define DEBUG_DEFAULT 0
+
+#endif
+
+void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
+ size_t len, const unsigned char *buf);
+
+/* connection state */
+#define ZSAU_NONE 0
+#define ZSAU_PROCEEDING 1
+#define ZSAU_CALL_DELIVERED 2
+#define ZSAU_ACTIVE 3
+#define ZSAU_DISCONNECT_IND 4
+#define ZSAU_NULL 5
+#define ZSAU_DISCONNECT_REQ 6
+#define ZSAU_UNKNOWN -1
+
+/* USB control transfer requests */
+#define OUT_VENDOR_REQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
+#define IN_VENDOR_REQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
+
+/* interrupt pipe messages */
+#define HD_B1_FLOW_CONTROL 0x80
+#define HD_B2_FLOW_CONTROL 0x81
+#define HD_RECEIVEATDATA_ACK (0x35) /* 3070 */
+#define HD_READY_SEND_ATDATA (0x36) /* 3070 */
+#define HD_OPEN_ATCHANNEL_ACK (0x37) /* 3070 */
+#define HD_CLOSE_ATCHANNEL_ACK (0x38) /* 3070 */
+#define HD_DEVICE_INIT_OK (0x11) /* ISurf USB + 3070 */
+#define HD_OPEN_B1CHANNEL_ACK (0x51) /* ISurf USB + 3070 */
+#define HD_OPEN_B2CHANNEL_ACK (0x52) /* ISurf USB + 3070 */
+#define HD_CLOSE_B1CHANNEL_ACK (0x53) /* ISurf USB + 3070 */
+#define HD_CLOSE_B2CHANNEL_ACK (0x54) /* ISurf USB + 3070 */
+#define HD_SUSPEND_END (0x61) /* ISurf USB */
+#define HD_RESET_INTERRUPT_PIPE_ACK (0xFF) /* ISurf USB + 3070 */
+
+/* control requests */
+#define HD_OPEN_B1CHANNEL (0x23) /* ISurf USB + 3070 */
+#define HD_CLOSE_B1CHANNEL (0x24) /* ISurf USB + 3070 */
+#define HD_OPEN_B2CHANNEL (0x25) /* ISurf USB + 3070 */
+#define HD_CLOSE_B2CHANNEL (0x26) /* ISurf USB + 3070 */
+#define HD_RESET_INTERRUPT_PIPE (0x27) /* ISurf USB + 3070 */
+#define HD_DEVICE_INIT_ACK (0x34) /* ISurf USB + 3070 */
+#define HD_WRITE_ATMESSAGE (0x12) /* 3070 */
+#define HD_READ_ATMESSAGE (0x13) /* 3070 */
+#define HD_OPEN_ATCHANNEL (0x28) /* 3070 */
+#define HD_CLOSE_ATCHANNEL (0x29) /* 3070 */
+
+/* number of B channels supported by base driver */
+#define BAS_CHANNELS 2
+
+/* USB frames for isochronous transfer */
+#define BAS_FRAMETIME 1 /* number of milliseconds between frames */
+#define BAS_NUMFRAMES 8 /* number of frames per URB */
+#define BAS_MAXFRAME 16 /* allocated bytes per frame */
+#define BAS_NORMFRAME 8 /* send size without flow control */
+#define BAS_HIGHFRAME 10 /* " " with positive flow control */
+#define BAS_LOWFRAME 5 /* " " with negative flow control */
+#define BAS_CORRFRAMES 4 /* flow control multiplicator */
+
+#define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) /* size of isoc in buf
+ * per URB */
+#define BAS_OUTBUFSIZE 4096 /* size of common isoc out buffer */
+#define BAS_OUTBUFPAD BAS_MAXFRAME /* size of pad area for isoc out buf */
+
+#define BAS_INURBS 3
+#define BAS_OUTURBS 3
+
+/* variable commands in struct bc_state */
+#define AT_ISO 0
+#define AT_DIAL 1
+#define AT_MSN 2
+#define AT_BC 3
+#define AT_PROTO 4
+#define AT_TYPE 5
+#define AT_CLIP 6
+/* total number */
+#define AT_NUM 7
+
+/* variables in struct at_state_t */
+/* - numeric */
+#define VAR_ZSAU 0
+#define VAR_ZDLE 1
+#define VAR_ZCTP 2
+/* total number */
+#define VAR_NUM 3
+/* - string */
+#define STR_NMBR 0
+#define STR_ZCPN 1
+#define STR_ZCON 2
+#define STR_ZBC 3
+#define STR_ZHLC 4
+/* total number */
+#define STR_NUM 5
+
+/* event types */
+#define EV_TIMEOUT -105
+#define EV_IF_VER -106
+#define EV_PROC_CIDMODE -107
+#define EV_SHUTDOWN -108
+#define EV_START -110
+#define EV_STOP -111
+#define EV_IF_LOCK -112
+#define EV_ACCEPT -114
+#define EV_DIAL -115
+#define EV_HUP -116
+#define EV_BC_OPEN -117
+#define EV_BC_CLOSED -118
+
+/* input state */
+#define INS_command 0x0001 /* receiving messages (not payload data) */
+#define INS_DLE_char 0x0002 /* DLE flag received (in DLE mode) */
+#define INS_byte_stuff 0x0004
+#define INS_have_data 0x0008
+#define INS_DLE_command 0x0020 /* DLE message start (<DLE> X) received */
+#define INS_flag_hunt 0x0040
+
+/* channel state */
+#define CHS_D_UP 0x01
+#define CHS_B_UP 0x02
+#define CHS_NOTIFY_LL 0x04
+
+#define ICALL_REJECT 0
+#define ICALL_ACCEPT 1
+#define ICALL_IGNORE 2
+
+/* device state */
+#define MS_UNINITIALIZED 0
+#define MS_INIT 1
+#define MS_LOCKED 2
+#define MS_SHUTDOWN 3
+#define MS_RECOVER 4
+#define MS_READY 5
+
+/* mode */
+#define M_UNKNOWN 0
+#define M_CONFIG 1
+#define M_UNIMODEM 2
+#define M_CID 3
+
+/* start mode */
+#define SM_LOCKED 0
+#define SM_ISDN 1 /* default */
+
+/* layer 2 protocols (AT^SBPR=...) */
+#define L2_BITSYNC 0
+#define L2_HDLC 1
+#define L2_VOICE 2
+
+struct gigaset_ops;
+struct gigaset_driver;
+
+struct usb_cardstate;
+struct ser_cardstate;
+struct bas_cardstate;
+
+struct bc_state;
+struct usb_bc_state;
+struct ser_bc_state;
+struct bas_bc_state;
+
+struct reply_t {
+ int resp_code; /* RSP_XXXX */
+ int min_ConState; /* <0 => ignore */
+ int max_ConState; /* <0 => ignore */
+ int parameter; /* e.g. ZSAU_XXXX <0: ignore*/
+ int new_ConState; /* <0 => ignore */
+ int timeout; /* >0 => *HZ; <=0 => TOUT_XXXX*/
+ int action[MAXACT]; /* ACT_XXXX */
+ char *command; /* NULL==none */
+};
+
+extern struct reply_t gigaset_tab_cid[];
+extern struct reply_t gigaset_tab_nocid[];
+
+struct inbuf_t {
+ struct cardstate *cs;
+ int inputstate;
+ int head, tail;
+ unsigned char data[RBUFSIZE];
+};
+
+/* isochronous write buffer structure
+ * circular buffer with pad area for extraction of complete USB frames
+ * - data[read..nextread-1] is valid data already submitted to the USB subsystem
+ * - data[nextread..write-1] is valid data yet to be sent
+ * - data[write] is the next byte to write to
+ * - in byte-oriented L2 procotols, it is completely free
+ * - in bit-oriented L2 procotols, it may contain a partial byte of valid data
+ * - data[write+1..read-1] is free
+ * - wbits is the number of valid data bits in data[write], starting at the LSB
+ * - writesem is the semaphore for writing to the buffer:
+ * if writesem <= 0, data[write..read-1] is currently being written to
+ * - idle contains the byte value to repeat when the end of valid data is
+ * reached; if nextread==write (buffer contains no data to send), either the
+ * BAS_OUTBUFPAD bytes immediately before data[write] (if
+ * write>=BAS_OUTBUFPAD) or those of the pad area (if write<BAS_OUTBUFPAD)
+ * are also filled with that value
+ */
+struct isowbuf_t {
+ int read;
+ int nextread;
+ int write;
+ atomic_t writesem;
+ int wbits;
+ unsigned char data[BAS_OUTBUFSIZE + BAS_OUTBUFPAD];
+ unsigned char idle;
+};
+
+/* isochronous write URB context structure
+ * data to be stored along with the URB and retrieved when it is returned
+ * as completed by the USB subsystem
+ * - urb: pointer to the URB itself
+ * - bcs: pointer to the B Channel control structure
+ * - limit: end of write buffer area covered by this URB
+ * - status: URB completion status
+ */
+struct isow_urbctx_t {
+ struct urb *urb;
+ struct bc_state *bcs;
+ int limit;
+ int status;
+};
+
+/* AT state structure
+ * data associated with the state of an ISDN connection, whether or not
+ * it is currently assigned a B channel
+ */
+struct at_state_t {
+ struct list_head list;
+ int waiting;
+ int getstring;
+ unsigned timer_index;
+ unsigned long timer_expires;
+ int timer_active;
+ unsigned int ConState; /* State of connection */
+ struct reply_t *replystruct;
+ int cid;
+ int int_var[VAR_NUM]; /* see VAR_XXXX */
+ char *str_var[STR_NUM]; /* see STR_XXXX */
+ unsigned pending_commands; /* see PC_XXXX */
+ unsigned seq_index;
+
+ struct cardstate *cs;
+ struct bc_state *bcs;
+};
+
+struct event_t {
+ int type;
+ void *ptr, *arg;
+ int parameter;
+ int cid;
+ struct at_state_t *at_state;
+};
+
+/* This buffer holds all information about the used B-Channel */
+struct bc_state {
+ struct sk_buff *tx_skb; /* Current transfer buffer to modem */
+ struct sk_buff_head squeue; /* B-Channel send Queue */
+
+ /* Variables for debugging .. */
+ int corrupted; /* Counter for corrupted packages */
+ int trans_down; /* Counter of packages (downstream) */
+ int trans_up; /* Counter of packages (upstream) */
+
+ struct at_state_t at_state;
+
+ /* receive buffer */
+ unsigned rx_bufsize; /* max size accepted by application */
+ struct sk_buff *rx_skb;
+ __u16 rx_fcs;
+ int inputstate; /* see INS_XXXX */
+
+ int channel;
+
+ struct cardstate *cs;
+
+ unsigned chstate; /* bitmap (CHS_*) */
+ int ignore;
+ unsigned proto2; /* layer 2 protocol (L2_*) */
+ char *commands[AT_NUM]; /* see AT_XXXX */
+
+#ifdef CONFIG_GIGASET_DEBUG
+ int emptycount;
+#endif
+ int busy;
+ int use_count;
+
+ /* private data of hardware drivers */
+ union {
+ struct ser_bc_state *ser; /* serial hardware driver */
+ struct usb_bc_state *usb; /* usb hardware driver (m105) */
+ struct bas_bc_state *bas; /* usb hardware driver (base) */
+ } hw;
+
+ void *ap; /* associated LL application */
+ int apconnstate; /* LL application connection state */
+ spinlock_t aplock;
+};
+
+struct cardstate {
+ struct gigaset_driver *driver;
+ unsigned minor_index;
+ struct device *dev;
+ struct device *tty_dev;
+ unsigned flags;
+
+ const struct gigaset_ops *ops;
+
+ /* Stuff to handle communication */
+ wait_queue_head_t waitqueue;
+ int waiting;
+ int mode; /* see M_XXXX */
+ int mstate; /* Modem state: see MS_XXXX */
+ /* only changed by the event layer */
+ int cmd_result;
+
+ int channels;
+ struct bc_state *bcs; /* Array of struct bc_state */
+
+ int onechannel; /* data and commands transmitted in one
+ stream (M10x) */
+
+ spinlock_t lock;
+ struct at_state_t at_state; /* at_state_t for cid == 0 */
+ struct list_head temp_at_states;/* list of temporary "struct
+ at_state_t"s without B channel */
+
+ struct inbuf_t *inbuf;
+
+ struct cmdbuf_t *cmdbuf, *lastcmdbuf;
+ spinlock_t cmdlock;
+ unsigned curlen, cmdbytes;
+
+ struct tty_port port;
+ struct tasklet_struct if_wake_tasklet;
+ unsigned control_state;
+
+ unsigned fwver[4];
+ int gotfwver;
+
+ unsigned running; /* !=0 if events are handled */
+ unsigned connected; /* !=0 if hardware is connected */
+ unsigned isdn_up; /* !=0 after gigaset_isdn_start() */
+
+ unsigned cidmode;
+
+ int myid; /* id for communication with LL */
+ void *iif; /* LL interface structure */
+ unsigned short hw_hdr_len; /* headroom needed in data skbs */
+
+ struct reply_t *tabnocid;
+ struct reply_t *tabcid;
+ int cs_init;
+ int ignoreframes; /* frames to ignore after setting up the
+ B channel */
+ struct mutex mutex; /* locks this structure:
+ * connected is not changed,
+ * hardware_up is not changed,
+ * MState is not changed to or from
+ * MS_LOCKED */
+
+ struct timer_list timer;
+ int retry_count;
+ int dle; /* !=0 if DLE mode is active
+ (ZDLE=1 received -- M10x only) */
+ int cur_at_seq; /* sequence of AT commands being
+ processed */
+ int curchannel; /* channel those commands are meant
+ for */
+ int commands_pending; /* flag(s) in xxx.commands_pending have
+ been set */
+ struct tasklet_struct
+ event_tasklet; /* tasklet for serializing AT commands.
+ * Scheduled
+ * -> for modem reponses (and
+ * incoming data for M10x)
+ * -> on timeout
+ * -> after setting bits in
+ * xxx.at_state.pending_command
+ * (e.g. command from LL) */
+ struct tasklet_struct
+ write_tasklet; /* tasklet for serial output
+ * (not used in base driver) */
+
+ /* event queue */
+ struct event_t events[MAX_EVENTS];
+ unsigned ev_tail, ev_head;
+ spinlock_t ev_lock;
+
+ /* current modem response */
+ unsigned char respdata[MAX_RESP_SIZE + 1];
+ unsigned cbytes;
+
+ /* private data of hardware drivers */
+ union {
+ struct usb_cardstate *usb; /* USB hardware driver (m105) */
+ struct ser_cardstate *ser; /* serial hardware driver */
+ struct bas_cardstate *bas; /* USB hardware driver (base) */
+ } hw;
+};
+
+struct gigaset_driver {
+ struct list_head list;
+ spinlock_t lock; /* locks minor tables and blocked */
+ struct tty_driver *tty;
+ unsigned have_tty;
+ unsigned minor;
+ unsigned minors;
+ struct cardstate *cs;
+ int blocked;
+
+ const struct gigaset_ops *ops;
+ struct module *owner;
+};
+
+struct cmdbuf_t {
+ struct cmdbuf_t *next, *prev;
+ int len, offset;
+ struct tasklet_struct *wake_tasklet;
+ unsigned char buf[0];
+};
+
+struct bas_bc_state {
+ /* isochronous output state */
+ int running;
+ atomic_t corrbytes;
+ spinlock_t isooutlock;
+ struct isow_urbctx_t isoouturbs[BAS_OUTURBS];
+ struct isow_urbctx_t *isooutdone, *isooutfree, *isooutovfl;
+ struct isowbuf_t *isooutbuf;
+ unsigned numsub; /* submitted URB counter
+ (for diagnostic messages only) */
+ struct tasklet_struct sent_tasklet;
+
+ /* isochronous input state */
+ spinlock_t isoinlock;
+ struct urb *isoinurbs[BAS_INURBS];
+ unsigned char isoinbuf[BAS_INBUFSIZE * BAS_INURBS];
+ struct urb *isoindone; /* completed isoc read URB */
+ int isoinstatus; /* status of completed URB */
+ int loststatus; /* status of dropped URB */
+ unsigned isoinlost; /* number of bytes lost */
+ /* state of bit unstuffing algorithm
+ (in addition to BC_state.inputstate) */
+ unsigned seqlen; /* number of '1' bits not yet
+ unstuffed */
+ unsigned inbyte, inbits; /* collected bits for next byte */
+ /* statistics */
+ unsigned goodbytes; /* bytes correctly received */
+ unsigned alignerrs; /* frames with incomplete byte at end */
+ unsigned fcserrs; /* FCS errors */
+ unsigned frameerrs; /* framing errors */
+ unsigned giants; /* long frames */
+ unsigned runts; /* short frames */
+ unsigned aborts; /* HDLC aborts */
+ unsigned shared0s; /* '0' bits shared between flags */
+ unsigned stolen0s; /* '0' stuff bits also serving as
+ leading flag bits */
+ struct tasklet_struct rcvd_tasklet;
+};
+
+struct gigaset_ops {
+ /* Called from ev-layer.c/interface.c for sending AT commands to the
+ device */
+ int (*write_cmd)(struct cardstate *cs, struct cmdbuf_t *cb);
+
+ /* Called from interface.c for additional device control */
+ int (*write_room)(struct cardstate *cs);
+ int (*chars_in_buffer)(struct cardstate *cs);
+ int (*brkchars)(struct cardstate *cs, const unsigned char buf[6]);
+
+ /* Called from ev-layer.c after setting up connection
+ * Should call gigaset_bchannel_up(), when finished. */
+ int (*init_bchannel)(struct bc_state *bcs);
+
+ /* Called from ev-layer.c after hanging up
+ * Should call gigaset_bchannel_down(), when finished. */
+ int (*close_bchannel)(struct bc_state *bcs);
+
+ /* Called by gigaset_initcs() for setting up bcs->hw.xxx */
+ int (*initbcshw)(struct bc_state *bcs);
+
+ /* Called by gigaset_freecs() for freeing bcs->hw.xxx */
+ void (*freebcshw)(struct bc_state *bcs);
+
+ /* Called by gigaset_bchannel_down() for resetting bcs->hw.xxx */
+ void (*reinitbcshw)(struct bc_state *bcs);
+
+ /* Called by gigaset_initcs() for setting up cs->hw.xxx */
+ int (*initcshw)(struct cardstate *cs);
+
+ /* Called by gigaset_freecs() for freeing cs->hw.xxx */
+ void (*freecshw)(struct cardstate *cs);
+
+ /* Called from common.c/interface.c for additional serial port
+ control */
+ int (*set_modem_ctrl)(struct cardstate *cs, unsigned old_state,
+ unsigned new_state);
+ int (*baud_rate)(struct cardstate *cs, unsigned cflag);
+ int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag);
+
+ /* Called from LL interface to put an skb into the send-queue.
+ * After sending is completed, gigaset_skb_sent() must be called
+ * with the skb's link layer header preserved. */
+ int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb);
+
+ /* Called from ev-layer.c to process a block of data
+ * received through the common/control channel. */
+ void (*handle_input)(struct inbuf_t *inbuf);
+
+};
+
+/* = Common structures and definitions =======================================
+ */
+
+/* Parser states for DLE-Event:
+ * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "."
+ * <DLE_FLAG>: 0x10
+ * <EVENT>: ((a-z)* | (A-Z)* | (0-10)*)+
+ */
+#define DLE_FLAG 0x10
+
+/* ===========================================================================
+ * Functions implemented in asyncdata.c
+ */
+
+/* Called from LL interface to put an skb into the send queue. */
+int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb);
+
+/* Called from ev-layer.c to process a block of data
+ * received through the common/control channel. */
+void gigaset_m10x_input(struct inbuf_t *inbuf);
+
+/* ===========================================================================
+ * Functions implemented in isocdata.c
+ */
+
+/* Called from LL interface to put an skb into the send queue. */
+int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb);
+
+/* Called from ev-layer.c to process a block of data
+ * received through the common/control channel. */
+void gigaset_isoc_input(struct inbuf_t *inbuf);
+
+/* Called from bas-gigaset.c to process a block of data
+ * received through the isochronous channel */
+void gigaset_isoc_receive(unsigned char *src, unsigned count,
+ struct bc_state *bcs);
+
+/* Called from bas-gigaset.c to put a block of data
+ * into the isochronous output buffer */
+int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len);
+
+/* Called from bas-gigaset.c to initialize the isochronous output buffer */
+void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle);
+
+/* Called from bas-gigaset.c to retrieve a block of bytes for sending */
+int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size);
+
+/* ===========================================================================
+ * Functions implemented in LL interface
+ */
+
+/* Called from common.c for setting up/shutting down with the ISDN subsystem */
+void gigaset_isdn_regdrv(void);
+void gigaset_isdn_unregdrv(void);
+int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid);
+void gigaset_isdn_unregdev(struct cardstate *cs);
+
+/* Called from hardware module to indicate completion of an skb */
+void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
+void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb);
+void gigaset_isdn_rcv_err(struct bc_state *bcs);
+
+/* Called from common.c/ev-layer.c to indicate events relevant to the LL */
+void gigaset_isdn_start(struct cardstate *cs);
+void gigaset_isdn_stop(struct cardstate *cs);
+int gigaset_isdn_icall(struct at_state_t *at_state);
+void gigaset_isdn_connD(struct bc_state *bcs);
+void gigaset_isdn_hupD(struct bc_state *bcs);
+void gigaset_isdn_connB(struct bc_state *bcs);
+void gigaset_isdn_hupB(struct bc_state *bcs);
+
+/* ===========================================================================
+ * Functions implemented in ev-layer.c
+ */
+
+/* tasklet called from common.c to process queued events */
+void gigaset_handle_event(unsigned long data);
+
+/* called from isocdata.c / asyncdata.c
+ * when a complete modem response line has been received */
+void gigaset_handle_modem_response(struct cardstate *cs);
+
+/* ===========================================================================
+ * Functions implemented in proc.c
+ */
+
+/* initialize sysfs for device */
+void gigaset_init_dev_sysfs(struct cardstate *cs);
+void gigaset_free_dev_sysfs(struct cardstate *cs);
+
+/* ===========================================================================
+ * Functions implemented in common.c/gigaset.h
+ */
+
+void gigaset_bcs_reinit(struct bc_state *bcs);
+void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
+ struct cardstate *cs, int cid);
+int gigaset_get_channel(struct bc_state *bcs);
+struct bc_state *gigaset_get_free_channel(struct cardstate *cs);
+void gigaset_free_channel(struct bc_state *bcs);
+int gigaset_get_channels(struct cardstate *cs);
+void gigaset_free_channels(struct cardstate *cs);
+void gigaset_block_channels(struct cardstate *cs);
+
+/* Allocate and initialize driver structure. */
+struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
+ const char *procname,
+ const char *devname,
+ const struct gigaset_ops *ops,
+ struct module *owner);
+
+/* Deallocate driver structure. */
+void gigaset_freedriver(struct gigaset_driver *drv);
+
+struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty);
+struct cardstate *gigaset_get_cs_by_id(int id);
+void gigaset_blockdriver(struct gigaset_driver *drv);
+
+/* Allocate and initialize card state. Calls hardware dependent
+ gigaset_init[b]cs(). */
+struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
+ int onechannel, int ignoreframes,
+ int cidmode, const char *modulename);
+
+/* Free card state. Calls hardware dependent gigaset_free[b]cs(). */
+void gigaset_freecs(struct cardstate *cs);
+
+/* Tell common.c that hardware and driver are ready. */
+int gigaset_start(struct cardstate *cs);
+
+/* Tell common.c that the device is not present any more. */
+void gigaset_stop(struct cardstate *cs);
+
+/* Tell common.c that the driver is being unloaded. */
+int gigaset_shutdown(struct cardstate *cs);
+
+/* Append event to the queue.
+ * Returns NULL on failure or a pointer to the event on success.
+ * ptr must be kmalloc()ed (and not be freed by the caller).
+ */
+struct event_t *gigaset_add_event(struct cardstate *cs,
+ struct at_state_t *at_state, int type,
+ void *ptr, int parameter, void *arg);
+
+/* Called on CONFIG1 command from frontend. */
+int gigaset_enterconfigmode(struct cardstate *cs);
+
+/* cs->lock must not be locked */
+static inline void gigaset_schedule_event(struct cardstate *cs)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&cs->lock, flags);
+ if (cs->running)
+ tasklet_schedule(&cs->event_tasklet);
+ spin_unlock_irqrestore(&cs->lock, flags);
+}
+
+/* Tell common.c that B channel has been closed. */
+/* cs->lock must not be locked */
+static inline void gigaset_bchannel_down(struct bc_state *bcs)
+{
+ gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_CLOSED, NULL, 0, NULL);
+ gigaset_schedule_event(bcs->cs);
+}
+
+/* Tell common.c that B channel has been opened. */
+/* cs->lock must not be locked */
+static inline void gigaset_bchannel_up(struct bc_state *bcs)
+{
+ gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_OPEN, NULL, 0, NULL);
+ gigaset_schedule_event(bcs->cs);
+}
+
+/* set up next receive skb for data mode */
+static inline struct sk_buff *gigaset_new_rx_skb(struct bc_state *bcs)
+{
+ struct cardstate *cs = bcs->cs;
+ unsigned short hw_hdr_len = cs->hw_hdr_len;
+
+ if (bcs->ignore) {
+ bcs->rx_skb = NULL;
+ } else {
+ bcs->rx_skb = dev_alloc_skb(bcs->rx_bufsize + hw_hdr_len);
+ if (bcs->rx_skb == NULL)
+ dev_warn(cs->dev, "could not allocate skb\n");
+ else
+ skb_reserve(bcs->rx_skb, hw_hdr_len);
+ }
+ return bcs->rx_skb;
+}
+
+/* append received bytes to inbuf */
+int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
+ unsigned numbytes);
+
+/* ===========================================================================
+ * Functions implemented in interface.c
+ */
+
+/* initialize interface */
+void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
+ const char *devname);
+/* release interface */
+void gigaset_if_freedriver(struct gigaset_driver *drv);
+/* add minor */
+void gigaset_if_init(struct cardstate *cs);
+/* remove minor */
+void gigaset_if_free(struct cardstate *cs);
+/* device received data */
+void gigaset_if_receive(struct cardstate *cs,
+ unsigned char *buffer, size_t len);
+
+#endif
diff --git a/drivers/staging/isdn/gigaset/interface.c b/drivers/staging/isdn/gigaset/interface.c
new file mode 100644
index 000000000000..17fa615a8c68
--- /dev/null
+++ b/drivers/staging/isdn/gigaset/interface.c
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * interface to user space for the gigaset driver
+ *
+ * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de>
+ *
+ * =====================================================================
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+#include <linux/gigaset_dev.h>
+#include <linux/tty_flip.h>
+#include <linux/module.h>
+
+/*** our ioctls ***/
+
+static int if_lock(struct cardstate *cs, int *arg)
+{
+ int cmd = *arg;
+
+ gig_dbg(DEBUG_IF, "%u: if_lock (%d)", cs->minor_index, cmd);
+
+ if (cmd > 1)
+ return -EINVAL;
+
+ if (cmd < 0) {
+ *arg = cs->mstate == MS_LOCKED;
+ return 0;
+ }
+
+ if (!cmd && cs->mstate == MS_LOCKED && cs->connected) {
+ cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR | TIOCM_RTS);
+ cs->ops->baud_rate(cs, B115200);
+ cs->ops->set_line_ctrl(cs, CS8);
+ cs->control_state = TIOCM_DTR | TIOCM_RTS;
+ }
+
+ cs->waiting = 1;
+ if (!gigaset_add_event(cs, &cs->at_state, EV_IF_LOCK,
+ NULL, cmd, NULL)) {
+ cs->waiting = 0;
+ return -ENOMEM;
+ }
+ gigaset_schedule_event(cs);
+
+ wait_event(cs->waitqueue, !cs->waiting);
+
+ if (cs->cmd_result >= 0) {
+ *arg = cs->cmd_result;
+ return 0;
+ }
+
+ return cs->cmd_result;
+}
+
+static int if_version(struct cardstate *cs, unsigned arg[4])
+{
+ static const unsigned version[4] = GIG_VERSION;
+ static const unsigned compat[4] = GIG_COMPAT;
+ unsigned cmd = arg[0];
+
+ gig_dbg(DEBUG_IF, "%u: if_version (%d)", cs->minor_index, cmd);
+
+ switch (cmd) {
+ case GIGVER_DRIVER:
+ memcpy(arg, version, sizeof version);
+ return 0;
+ case GIGVER_COMPAT:
+ memcpy(arg, compat, sizeof compat);
+ return 0;
+ case GIGVER_FWBASE:
+ cs->waiting = 1;
+ if (!gigaset_add_event(cs, &cs->at_state, EV_IF_VER,
+ NULL, 0, arg)) {
+ cs->waiting = 0;
+ return -ENOMEM;
+ }
+ gigaset_schedule_event(cs);
+
+ wait_event(cs->waitqueue, !cs->waiting);
+
+ if (cs->cmd_result >= 0)
+ return 0;
+
+ return cs->cmd_result;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int if_config(struct cardstate *cs, int *arg)
+{
+ gig_dbg(DEBUG_IF, "%u: if_config (%d)", cs->minor_index, *arg);
+
+ if (*arg != 1)
+ return -EINVAL;
+
+ if (cs->mstate != MS_LOCKED)
+ return -EBUSY;
+
+ if (!cs->connected) {
+ pr_err("%s: not connected\n", __func__);
+ return -ENODEV;
+ }
+
+ *arg = 0;
+ return gigaset_enterconfigmode(cs);
+}
+
+/*** the terminal driver ***/
+
+static int if_open(struct tty_struct *tty, struct file *filp)
+{
+ struct cardstate *cs;
+
+ gig_dbg(DEBUG_IF, "%d+%d: %s()",
+ tty->driver->minor_start, tty->index, __func__);
+
+ cs = gigaset_get_cs_by_tty(tty);
+ if (!cs || !try_module_get(cs->driver->owner))
+ return -ENODEV;
+
+ if (mutex_lock_interruptible(&cs->mutex)) {
+ module_put(cs->driver->owner);
+ return -ERESTARTSYS;
+ }
+ tty->driver_data = cs;
+
+ ++cs->port.count;
+
+ if (cs->port.count == 1) {
+ tty_port_tty_set(&cs->port, tty);
+ cs->port.low_latency = 1;
+ }
+
+ mutex_unlock(&cs->mutex);
+ return 0;
+}
+
+static void if_close(struct tty_struct *tty, struct file *filp)
+{
+ struct cardstate *cs = tty->driver_data;
+
+ if (!cs) { /* happens if we didn't find cs in open */
+ gig_dbg(DEBUG_IF, "%s: no cardstate", __func__);
+ return;
+ }
+
+ gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
+
+ mutex_lock(&cs->mutex);
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
+ else if (!cs->port.count)
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else if (!--cs->port.count)
+ tty_port_tty_set(&cs->port, NULL);
+
+ mutex_unlock(&cs->mutex);
+
+ module_put(cs->driver->owner);
+}
+
+static int if_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cardstate *cs = tty->driver_data;
+ int retval = -ENODEV;
+ int int_arg;
+ unsigned char buf[6];
+ unsigned version[4];
+
+ gig_dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __func__, cmd);
+
+ if (mutex_lock_interruptible(&cs->mutex))
+ return -ERESTARTSYS;
+
+ if (!cs->connected) {
+ gig_dbg(DEBUG_IF, "not connected");
+ retval = -ENODEV;
+ } else {
+ retval = 0;
+ switch (cmd) {
+ case GIGASET_REDIR:
+ retval = get_user(int_arg, (int __user *) arg);
+ if (retval >= 0)
+ retval = if_lock(cs, &int_arg);
+ if (retval >= 0)
+ retval = put_user(int_arg, (int __user *) arg);
+ break;
+ case GIGASET_CONFIG:
+ retval = get_user(int_arg, (int __user *) arg);
+ if (retval >= 0)
+ retval = if_config(cs, &int_arg);
+ if (retval >= 0)
+ retval = put_user(int_arg, (int __user *) arg);
+ break;
+ case GIGASET_BRKCHARS:
+ retval = copy_from_user(&buf,
+ (const unsigned char __user *) arg, 6)
+ ? -EFAULT : 0;
+ if (retval >= 0) {
+ gigaset_dbg_buffer(DEBUG_IF, "GIGASET_BRKCHARS",
+ 6, buf);
+ retval = cs->ops->brkchars(cs, buf);
+ }
+ break;
+ case GIGASET_VERSION:
+ retval = copy_from_user(version,
+ (unsigned __user *) arg, sizeof version)
+ ? -EFAULT : 0;
+ if (retval >= 0)
+ retval = if_version(cs, version);
+ if (retval >= 0)
+ retval = copy_to_user((unsigned __user *) arg,
+ version, sizeof version)
+ ? -EFAULT : 0;
+ break;
+ default:
+ gig_dbg(DEBUG_IF, "%s: arg not supported - 0x%04x",
+ __func__, cmd);
+ retval = -ENOIOCTLCMD;
+ }
+ }
+
+ mutex_unlock(&cs->mutex);
+
+ return retval;
+}
+
+#ifdef CONFIG_COMPAT
+static long if_compat_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ return if_ioctl(tty, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static int if_tiocmget(struct tty_struct *tty)
+{
+ struct cardstate *cs = tty->driver_data;
+ int retval;
+
+ gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
+
+ if (mutex_lock_interruptible(&cs->mutex))
+ return -ERESTARTSYS;
+
+ retval = cs->control_state & (TIOCM_RTS | TIOCM_DTR);
+
+ mutex_unlock(&cs->mutex);
+
+ return retval;
+}
+
+static int if_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct cardstate *cs = tty->driver_data;
+ int retval;
+ unsigned mc;
+
+ gig_dbg(DEBUG_IF, "%u: %s(0x%x, 0x%x)",
+ cs->minor_index, __func__, set, clear);
+
+ if (mutex_lock_interruptible(&cs->mutex))
+ return -ERESTARTSYS;
+
+ if (!cs->connected) {
+ gig_dbg(DEBUG_IF, "not connected");
+ retval = -ENODEV;
+ } else {
+ mc = (cs->control_state | set) & ~clear & (TIOCM_RTS | TIOCM_DTR);
+ retval = cs->ops->set_modem_ctrl(cs, cs->control_state, mc);
+ cs->control_state = mc;
+ }
+
+ mutex_unlock(&cs->mutex);
+
+ return retval;
+}
+
+static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ struct cardstate *cs = tty->driver_data;
+ struct cmdbuf_t *cb;
+ int retval;
+
+ gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
+
+ if (mutex_lock_interruptible(&cs->mutex))
+ return -ERESTARTSYS;
+
+ if (!cs->connected) {
+ gig_dbg(DEBUG_IF, "not connected");
+ retval = -ENODEV;
+ goto done;
+ }
+ if (cs->mstate != MS_LOCKED) {
+ dev_warn(cs->dev, "can't write to unlocked device\n");
+ retval = -EBUSY;
+ goto done;
+ }
+ if (count <= 0) {
+ /* nothing to do */
+ retval = 0;
+ goto done;
+ }
+
+ cb = kmalloc(sizeof(struct cmdbuf_t) + count, GFP_KERNEL);
+ if (!cb) {
+ dev_err(cs->dev, "%s: out of memory\n", __func__);
+ retval = -ENOMEM;
+ goto done;
+ }
+
+ memcpy(cb->buf, buf, count);
+ cb->len = count;
+ cb->offset = 0;
+ cb->next = NULL;
+ cb->wake_tasklet = &cs->if_wake_tasklet;
+ retval = cs->ops->write_cmd(cs, cb);
+done:
+ mutex_unlock(&cs->mutex);
+ return retval;
+}
+
+static int if_write_room(struct tty_struct *tty)
+{
+ struct cardstate *cs = tty->driver_data;
+ int retval;
+
+ gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
+
+ if (mutex_lock_interruptible(&cs->mutex))
+ return -ERESTARTSYS;
+
+ if (!cs->connected) {
+ gig_dbg(DEBUG_IF, "not connected");
+ retval = -ENODEV;
+ } else if (cs->mstate != MS_LOCKED) {
+ dev_warn(cs->dev, "can't write to unlocked device\n");
+ retval = -EBUSY;
+ } else
+ retval = cs->ops->write_room(cs);
+
+ mutex_unlock(&cs->mutex);
+
+ return retval;
+}
+
+static int if_chars_in_buffer(struct tty_struct *tty)
+{
+ struct cardstate *cs = tty->driver_data;
+ int retval = 0;
+
+ gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
+
+ mutex_lock(&cs->mutex);
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected");
+ else if (cs->mstate != MS_LOCKED)
+ dev_warn(cs->dev, "can't write to unlocked device\n");
+ else
+ retval = cs->ops->chars_in_buffer(cs);
+
+ mutex_unlock(&cs->mutex);
+
+ return retval;
+}
+
+static void if_throttle(struct tty_struct *tty)
+{
+ struct cardstate *cs = tty->driver_data;
+
+ gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
+
+ mutex_lock(&cs->mutex);
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
+ else
+ gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
+
+ mutex_unlock(&cs->mutex);
+}
+
+static void if_unthrottle(struct tty_struct *tty)
+{
+ struct cardstate *cs = tty->driver_data;
+
+ gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
+
+ mutex_lock(&cs->mutex);
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
+ else
+ gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
+
+ mutex_unlock(&cs->mutex);
+}
+
+static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
+{
+ struct cardstate *cs = tty->driver_data;
+ unsigned int iflag;
+ unsigned int cflag;
+ unsigned int old_cflag;
+ unsigned int control_state, new_state;
+
+ gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
+
+ mutex_lock(&cs->mutex);
+
+ if (!cs->connected) {
+ gig_dbg(DEBUG_IF, "not connected");
+ goto out;
+ }
+
+ iflag = tty->termios.c_iflag;
+ cflag = tty->termios.c_cflag;
+ old_cflag = old ? old->c_cflag : cflag;
+ gig_dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x",
+ cs->minor_index, iflag, cflag, old_cflag);
+
+ /* get a local copy of the current port settings */
+ control_state = cs->control_state;
+
+ /*
+ * Update baud rate.
+ * Do not attempt to cache old rates and skip settings,
+ * disconnects screw such tricks up completely.
+ * Premature optimization is the root of all evil.
+ */
+
+ /* reassert DTR and (maybe) RTS on transition from B0 */
+ if ((old_cflag & CBAUD) == B0) {
+ new_state = control_state | TIOCM_DTR;
+ /* don't set RTS if using hardware flow control */
+ if (!(old_cflag & CRTSCTS))
+ new_state |= TIOCM_RTS;
+ gig_dbg(DEBUG_IF, "%u: from B0 - set DTR%s",
+ cs->minor_index,
+ (new_state & TIOCM_RTS) ? " only" : "/RTS");
+ cs->ops->set_modem_ctrl(cs, control_state, new_state);
+ control_state = new_state;
+ }
+
+ cs->ops->baud_rate(cs, cflag & CBAUD);
+
+ if ((cflag & CBAUD) == B0) {
+ /* Drop RTS and DTR */
+ gig_dbg(DEBUG_IF, "%u: to B0 - drop DTR/RTS", cs->minor_index);
+ new_state = control_state & ~(TIOCM_DTR | TIOCM_RTS);
+ cs->ops->set_modem_ctrl(cs, control_state, new_state);
+ control_state = new_state;
+ }
+
+ /*
+ * Update line control register (LCR)
+ */
+
+ cs->ops->set_line_ctrl(cs, cflag);
+
+ /* save off the modified port settings */
+ cs->control_state = control_state;
+
+out:
+ mutex_unlock(&cs->mutex);
+}
+
+static const struct tty_operations if_ops = {
+ .open = if_open,
+ .close = if_close,
+ .ioctl = if_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = if_compat_ioctl,
+#endif
+ .write = if_write,
+ .write_room = if_write_room,
+ .chars_in_buffer = if_chars_in_buffer,
+ .set_termios = if_set_termios,
+ .throttle = if_throttle,
+ .unthrottle = if_unthrottle,
+ .tiocmget = if_tiocmget,
+ .tiocmset = if_tiocmset,
+};
+
+
+/* wakeup tasklet for the write operation */
+static void if_wake(unsigned long data)
+{
+ struct cardstate *cs = (struct cardstate *)data;
+
+ tty_port_tty_wakeup(&cs->port);
+}
+
+/*** interface to common ***/
+
+void gigaset_if_init(struct cardstate *cs)
+{
+ struct gigaset_driver *drv;
+
+ drv = cs->driver;
+ if (!drv->have_tty)
+ return;
+
+ tasklet_init(&cs->if_wake_tasklet, if_wake, (unsigned long) cs);
+
+ mutex_lock(&cs->mutex);
+ cs->tty_dev = tty_port_register_device(&cs->port, drv->tty,
+ cs->minor_index, NULL);
+
+ if (!IS_ERR(cs->tty_dev))
+ dev_set_drvdata(cs->tty_dev, cs);
+ else {
+ pr_warning("could not register device to the tty subsystem\n");
+ cs->tty_dev = NULL;
+ }
+ mutex_unlock(&cs->mutex);
+}
+
+void gigaset_if_free(struct cardstate *cs)
+{
+ struct gigaset_driver *drv;
+
+ drv = cs->driver;
+ if (!drv->have_tty)
+ return;
+
+ tasklet_disable(&cs->if_wake_tasklet);
+ tasklet_kill(&cs->if_wake_tasklet);
+ cs->tty_dev = NULL;
+ tty_unregister_device(drv->tty, cs->minor_index);
+}
+
+/**
+ * gigaset_if_receive() - pass a received block of data to the tty device
+ * @cs: device descriptor structure.
+ * @buffer: received data.
+ * @len: number of bytes received.
+ *
+ * Called by asyncdata/isocdata if a block of data received from the
+ * device must be sent to userspace through the ttyG* device.
+ */
+void gigaset_if_receive(struct cardstate *cs,
+ unsigned char *buffer, size_t len)
+{
+ tty_insert_flip_string(&cs->port, buffer, len);
+ tty_flip_buffer_push(&cs->port);
+}
+EXPORT_SYMBOL_GPL(gigaset_if_receive);
+
+/* gigaset_if_initdriver
+ * Initialize tty interface.
+ * parameters:
+ * drv Driver
+ * procname Name of the driver (e.g. for /proc/tty/drivers)
+ * devname Name of the device files (prefix without minor number)
+ */
+void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
+ const char *devname)
+{
+ int ret;
+ struct tty_driver *tty;
+
+ drv->have_tty = 0;
+
+ drv->tty = tty = alloc_tty_driver(drv->minors);
+ if (tty == NULL)
+ goto enomem;
+
+ tty->type = TTY_DRIVER_TYPE_SERIAL;
+ tty->subtype = SERIAL_TYPE_NORMAL;
+ tty->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+
+ tty->driver_name = procname;
+ tty->name = devname;
+ tty->minor_start = drv->minor;
+
+ tty->init_termios = tty_std_termios;
+ tty->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ tty_set_operations(tty, &if_ops);
+
+ ret = tty_register_driver(tty);
+ if (ret < 0) {
+ pr_err("error %d registering tty driver\n", ret);
+ goto error;
+ }
+ gig_dbg(DEBUG_IF, "tty driver initialized");
+ drv->have_tty = 1;
+ return;
+
+enomem:
+ pr_err("out of memory\n");
+error:
+ if (drv->tty)
+ put_tty_driver(drv->tty);
+}
+
+void gigaset_if_freedriver(struct gigaset_driver *drv)
+{
+ if (!drv->have_tty)
+ return;
+
+ drv->have_tty = 0;
+ tty_unregister_driver(drv->tty);
+ put_tty_driver(drv->tty);
+}
diff --git a/drivers/staging/isdn/gigaset/isocdata.c b/drivers/staging/isdn/gigaset/isocdata.c
new file mode 100644
index 000000000000..3ecf6e33ed15
--- /dev/null
+++ b/drivers/staging/isdn/gigaset/isocdata.c
@@ -0,0 +1,1006 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Common data handling layer for bas_gigaset
+ *
+ * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
+ * Hansjoerg Lipp <hjlipp@web.de>.
+ *
+ * =====================================================================
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+#include <linux/crc-ccitt.h>
+#include <linux/bitrev.h>
+
+/* access methods for isowbuf_t */
+/* ============================ */
+
+/* initialize buffer structure
+ */
+void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle)
+{
+ iwb->read = 0;
+ iwb->nextread = 0;
+ iwb->write = 0;
+ atomic_set(&iwb->writesem, 1);
+ iwb->wbits = 0;
+ iwb->idle = idle;
+ memset(iwb->data + BAS_OUTBUFSIZE, idle, BAS_OUTBUFPAD);
+}
+
+/* compute number of bytes which can be appended to buffer
+ * so that there is still room to append a maximum frame of flags
+ */
+static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
+{
+ int read, write, freebytes;
+
+ read = iwb->read;
+ write = iwb->write;
+ freebytes = read - write;
+ if (freebytes > 0) {
+ /* no wraparound: need padding space within regular area */
+ return freebytes - BAS_OUTBUFPAD;
+ } else if (read < BAS_OUTBUFPAD) {
+ /* wraparound: can use space up to end of regular area */
+ return BAS_OUTBUFSIZE - write;
+ } else {
+ /* following the wraparound yields more space */
+ return freebytes + BAS_OUTBUFSIZE - BAS_OUTBUFPAD;
+ }
+}
+
+/* start writing
+ * acquire the write semaphore
+ * return 0 if acquired, <0 if busy
+ */
+static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
+{
+ if (!atomic_dec_and_test(&iwb->writesem)) {
+ atomic_inc(&iwb->writesem);
+ gig_dbg(DEBUG_ISO, "%s: couldn't acquire iso write semaphore",
+ __func__);
+ return -EBUSY;
+ }
+ gig_dbg(DEBUG_ISO,
+ "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d",
+ __func__, iwb->data[iwb->write], iwb->wbits);
+ return 0;
+}
+
+/* finish writing
+ * release the write semaphore
+ * returns the current write position
+ */
+static inline int isowbuf_donewrite(struct isowbuf_t *iwb)
+{
+ int write = iwb->write;
+ atomic_inc(&iwb->writesem);
+ return write;
+}
+
+/* append bits to buffer without any checks
+ * - data contains bits to append, starting at LSB
+ * - nbits is number of bits to append (0..24)
+ * must be called with the write semaphore held
+ * If more than nbits bits are set in data, the extraneous bits are set in the
+ * buffer too, but the write position is only advanced by nbits.
+ */
+static inline void isowbuf_putbits(struct isowbuf_t *iwb, u32 data, int nbits)
+{
+ int write = iwb->write;
+ data <<= iwb->wbits;
+ data |= iwb->data[write];
+ nbits += iwb->wbits;
+ while (nbits >= 8) {
+ iwb->data[write++] = data & 0xff;
+ write %= BAS_OUTBUFSIZE;
+ data >>= 8;
+ nbits -= 8;
+ }
+ iwb->wbits = nbits;
+ iwb->data[write] = data & 0xff;
+ iwb->write = write;
+}
+
+/* put final flag on HDLC bitstream
+ * also sets the idle fill byte to the correspondingly shifted flag pattern
+ * must be called with the write semaphore held
+ */
+static inline void isowbuf_putflag(struct isowbuf_t *iwb)
+{
+ int write;
+
+ /* add two flags, thus reliably covering one byte */
+ isowbuf_putbits(iwb, 0x7e7e, 8);
+ /* recover the idle flag byte */
+ write = iwb->write;
+ iwb->idle = iwb->data[write];
+ gig_dbg(DEBUG_ISO, "idle fill byte %02x", iwb->idle);
+ /* mask extraneous bits in buffer */
+ iwb->data[write] &= (1 << iwb->wbits) - 1;
+}
+
+/* retrieve a block of bytes for sending
+ * The requested number of bytes is provided as a contiguous block.
+ * If necessary, the frame is filled to the requested number of bytes
+ * with the idle value.
+ * returns offset to frame, < 0 on busy or error
+ */
+int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
+{
+ int read, write, limit, src, dst;
+ unsigned char pbyte;
+
+ read = iwb->nextread;
+ write = iwb->write;
+ if (likely(read == write)) {
+ /* return idle frame */
+ return read < BAS_OUTBUFPAD ?
+ BAS_OUTBUFSIZE : read - BAS_OUTBUFPAD;
+ }
+
+ limit = read + size;
+ gig_dbg(DEBUG_STREAM, "%s: read=%d write=%d limit=%d",
+ __func__, read, write, limit);
+#ifdef CONFIG_GIGASET_DEBUG
+ if (unlikely(size < 0 || size > BAS_OUTBUFPAD)) {
+ pr_err("invalid size %d\n", size);
+ return -EINVAL;
+ }
+#endif
+
+ if (read < write) {
+ /* no wraparound in valid data */
+ if (limit >= write) {
+ /* append idle frame */
+ if (isowbuf_startwrite(iwb) < 0)
+ return -EBUSY;
+ /* write position could have changed */
+ write = iwb->write;
+ if (limit >= write) {
+ pbyte = iwb->data[write]; /* save
+ partial byte */
+ limit = write + BAS_OUTBUFPAD;
+ gig_dbg(DEBUG_STREAM,
+ "%s: filling %d->%d with %02x",
+ __func__, write, limit, iwb->idle);
+ if (write + BAS_OUTBUFPAD < BAS_OUTBUFSIZE)
+ memset(iwb->data + write, iwb->idle,
+ BAS_OUTBUFPAD);
+ else {
+ /* wraparound, fill entire pad area */
+ memset(iwb->data + write, iwb->idle,
+ BAS_OUTBUFSIZE + BAS_OUTBUFPAD
+ - write);
+ limit = 0;
+ }
+ gig_dbg(DEBUG_STREAM,
+ "%s: restoring %02x at %d",
+ __func__, pbyte, limit);
+ iwb->data[limit] = pbyte; /* restore
+ partial byte */
+ iwb->write = limit;
+ }
+ isowbuf_donewrite(iwb);
+ }
+ } else {
+ /* valid data wraparound */
+ if (limit >= BAS_OUTBUFSIZE) {
+ /* copy wrapped part into pad area */
+ src = 0;
+ dst = BAS_OUTBUFSIZE;
+ while (dst < limit && src < write)
+ iwb->data[dst++] = iwb->data[src++];
+ if (dst <= limit) {
+ /* fill pad area with idle byte */
+ memset(iwb->data + dst, iwb->idle,
+ BAS_OUTBUFSIZE + BAS_OUTBUFPAD - dst);
+ }
+ limit = src;
+ }
+ }
+ iwb->nextread = limit;
+ return read;
+}
+
+/* dump_bytes
+ * write hex bytes to syslog for debugging
+ */
+static inline void dump_bytes(enum debuglevel level, const char *tag,
+ unsigned char *bytes, int count)
+{
+#ifdef CONFIG_GIGASET_DEBUG
+ unsigned char c;
+ static char dbgline[3 * 32 + 1];
+ int i = 0;
+
+ if (!(gigaset_debuglevel & level))
+ return;
+
+ while (count-- > 0) {
+ if (i > sizeof(dbgline) - 4) {
+ dbgline[i] = '\0';
+ gig_dbg(level, "%s:%s", tag, dbgline);
+ i = 0;
+ }
+ c = *bytes++;
+ dbgline[i] = (i && !(i % 12)) ? '-' : ' ';
+ i++;
+ dbgline[i++] = hex_asc_hi(c);
+ dbgline[i++] = hex_asc_lo(c);
+ }
+ dbgline[i] = '\0';
+ gig_dbg(level, "%s:%s", tag, dbgline);
+#endif
+}
+
+/*============================================================================*/
+
+/* bytewise HDLC bitstuffing via table lookup
+ * lookup table: 5 subtables for 0..4 preceding consecutive '1' bits
+ * index: 256*(number of preceding '1' bits) + (next byte to stuff)
+ * value: bit 9.. 0 = result bits
+ * bit 12..10 = number of trailing '1' bits in result
+ * bit 14..13 = number of bits added by stuffing
+ */
+static const u16 stufftab[5 * 256] = {
+/* previous 1s = 0: */
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
+ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f,
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
+ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x205f,
+ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x209f,
+ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
+ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20df,
+ 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x048f,
+ 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x251f,
+ 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x04af,
+ 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x255f,
+ 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x08cf,
+ 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x299f,
+ 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef,
+ 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf,
+
+/* previous 1s = 1: */
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f,
+ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f,
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f,
+ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x206f,
+ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x208f,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20af,
+ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20cf,
+ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20ef,
+ 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x250f,
+ 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x252f,
+ 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x254f,
+ 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x256f,
+ 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x298f,
+ 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29af,
+ 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf,
+ 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef,
+
+/* previous 1s = 2: */
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017,
+ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037,
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057,
+ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x2067, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x2077,
+ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x2087, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x2097,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x20a7, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20b7,
+ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x20c7, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20d7,
+ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x20e7, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20f7,
+ 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x2507, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x2517,
+ 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x2527, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x2537,
+ 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x2547, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x2557,
+ 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x2567, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x2577,
+ 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x2987, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x2997,
+ 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x29a7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29b7,
+ 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7,
+ 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7,
+
+/* previous 1s = 3: */
+ 0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b,
+ 0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b,
+ 0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b,
+ 0x0030, 0x0031, 0x0032, 0x2063, 0x0034, 0x0035, 0x0036, 0x206b, 0x0038, 0x0039, 0x003a, 0x2073, 0x003c, 0x003d, 0x203e, 0x207b,
+ 0x0040, 0x0041, 0x0042, 0x2083, 0x0044, 0x0045, 0x0046, 0x208b, 0x0048, 0x0049, 0x004a, 0x2093, 0x004c, 0x004d, 0x004e, 0x209b,
+ 0x0050, 0x0051, 0x0052, 0x20a3, 0x0054, 0x0055, 0x0056, 0x20ab, 0x0058, 0x0059, 0x005a, 0x20b3, 0x005c, 0x005d, 0x005e, 0x20bb,
+ 0x0060, 0x0061, 0x0062, 0x20c3, 0x0064, 0x0065, 0x0066, 0x20cb, 0x0068, 0x0069, 0x006a, 0x20d3, 0x006c, 0x006d, 0x006e, 0x20db,
+ 0x0070, 0x0071, 0x0072, 0x20e3, 0x0074, 0x0075, 0x0076, 0x20eb, 0x0078, 0x0079, 0x007a, 0x20f3, 0x207c, 0x207d, 0x20be, 0x40fb,
+ 0x0480, 0x0481, 0x0482, 0x2503, 0x0484, 0x0485, 0x0486, 0x250b, 0x0488, 0x0489, 0x048a, 0x2513, 0x048c, 0x048d, 0x048e, 0x251b,
+ 0x0490, 0x0491, 0x0492, 0x2523, 0x0494, 0x0495, 0x0496, 0x252b, 0x0498, 0x0499, 0x049a, 0x2533, 0x049c, 0x049d, 0x049e, 0x253b,
+ 0x04a0, 0x04a1, 0x04a2, 0x2543, 0x04a4, 0x04a5, 0x04a6, 0x254b, 0x04a8, 0x04a9, 0x04aa, 0x2553, 0x04ac, 0x04ad, 0x04ae, 0x255b,
+ 0x04b0, 0x04b1, 0x04b2, 0x2563, 0x04b4, 0x04b5, 0x04b6, 0x256b, 0x04b8, 0x04b9, 0x04ba, 0x2573, 0x04bc, 0x04bd, 0x253e, 0x257b,
+ 0x08c0, 0x08c1, 0x08c2, 0x2983, 0x08c4, 0x08c5, 0x08c6, 0x298b, 0x08c8, 0x08c9, 0x08ca, 0x2993, 0x08cc, 0x08cd, 0x08ce, 0x299b,
+ 0x08d0, 0x08d1, 0x08d2, 0x29a3, 0x08d4, 0x08d5, 0x08d6, 0x29ab, 0x08d8, 0x08d9, 0x08da, 0x29b3, 0x08dc, 0x08dd, 0x08de, 0x29bb,
+ 0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb,
+ 0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb,
+
+/* previous 1s = 4: */
+ 0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d,
+ 0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d,
+ 0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d,
+ 0x0030, 0x2061, 0x0032, 0x2065, 0x0034, 0x2069, 0x0036, 0x206d, 0x0038, 0x2071, 0x003a, 0x2075, 0x003c, 0x2079, 0x203e, 0x407d,
+ 0x0040, 0x2081, 0x0042, 0x2085, 0x0044, 0x2089, 0x0046, 0x208d, 0x0048, 0x2091, 0x004a, 0x2095, 0x004c, 0x2099, 0x004e, 0x209d,
+ 0x0050, 0x20a1, 0x0052, 0x20a5, 0x0054, 0x20a9, 0x0056, 0x20ad, 0x0058, 0x20b1, 0x005a, 0x20b5, 0x005c, 0x20b9, 0x005e, 0x20bd,
+ 0x0060, 0x20c1, 0x0062, 0x20c5, 0x0064, 0x20c9, 0x0066, 0x20cd, 0x0068, 0x20d1, 0x006a, 0x20d5, 0x006c, 0x20d9, 0x006e, 0x20dd,
+ 0x0070, 0x20e1, 0x0072, 0x20e5, 0x0074, 0x20e9, 0x0076, 0x20ed, 0x0078, 0x20f1, 0x007a, 0x20f5, 0x207c, 0x40f9, 0x20be, 0x417d,
+ 0x0480, 0x2501, 0x0482, 0x2505, 0x0484, 0x2509, 0x0486, 0x250d, 0x0488, 0x2511, 0x048a, 0x2515, 0x048c, 0x2519, 0x048e, 0x251d,
+ 0x0490, 0x2521, 0x0492, 0x2525, 0x0494, 0x2529, 0x0496, 0x252d, 0x0498, 0x2531, 0x049a, 0x2535, 0x049c, 0x2539, 0x049e, 0x253d,
+ 0x04a0, 0x2541, 0x04a2, 0x2545, 0x04a4, 0x2549, 0x04a6, 0x254d, 0x04a8, 0x2551, 0x04aa, 0x2555, 0x04ac, 0x2559, 0x04ae, 0x255d,
+ 0x04b0, 0x2561, 0x04b2, 0x2565, 0x04b4, 0x2569, 0x04b6, 0x256d, 0x04b8, 0x2571, 0x04ba, 0x2575, 0x04bc, 0x2579, 0x253e, 0x467d,
+ 0x08c0, 0x2981, 0x08c2, 0x2985, 0x08c4, 0x2989, 0x08c6, 0x298d, 0x08c8, 0x2991, 0x08ca, 0x2995, 0x08cc, 0x2999, 0x08ce, 0x299d,
+ 0x08d0, 0x29a1, 0x08d2, 0x29a5, 0x08d4, 0x29a9, 0x08d6, 0x29ad, 0x08d8, 0x29b1, 0x08da, 0x29b5, 0x08dc, 0x29b9, 0x08de, 0x29bd,
+ 0x0ce0, 0x2dc1, 0x0ce2, 0x2dc5, 0x0ce4, 0x2dc9, 0x0ce6, 0x2dcd, 0x0ce8, 0x2dd1, 0x0cea, 0x2dd5, 0x0cec, 0x2dd9, 0x0cee, 0x2ddd,
+ 0x10f0, 0x31e1, 0x10f2, 0x31e5, 0x10f4, 0x31e9, 0x10f6, 0x31ed, 0x20f8, 0x41f1, 0x20fa, 0x41f5, 0x257c, 0x46f9, 0x29be, 0x4b7d
+};
+
+/* hdlc_bitstuff_byte
+ * perform HDLC bitstuffing for one input byte (8 bits, LSB first)
+ * parameters:
+ * cin input byte
+ * ones number of trailing '1' bits in result before this step
+ * iwb pointer to output buffer structure
+ * (write semaphore must be held)
+ * return value:
+ * number of trailing '1' bits in result after this step
+ */
+
+static inline int hdlc_bitstuff_byte(struct isowbuf_t *iwb, unsigned char cin,
+ int ones)
+{
+ u16 stuff;
+ int shiftinc, newones;
+
+ /* get stuffing information for input byte
+ * value: bit 9.. 0 = result bits
+ * bit 12..10 = number of trailing '1' bits in result
+ * bit 14..13 = number of bits added by stuffing
+ */
+ stuff = stufftab[256 * ones + cin];
+ shiftinc = (stuff >> 13) & 3;
+ newones = (stuff >> 10) & 7;
+ stuff &= 0x3ff;
+
+ /* append stuffed byte to output stream */
+ isowbuf_putbits(iwb, stuff, 8 + shiftinc);
+ return newones;
+}
+
+/* hdlc_buildframe
+ * Perform HDLC framing with bitstuffing on a byte buffer
+ * The input buffer is regarded as a sequence of bits, starting with the least
+ * significant bit of the first byte and ending with the most significant bit
+ * of the last byte. A 16 bit FCS is appended as defined by RFC 1662.
+ * Whenever five consecutive '1' bits appear in the resulting bit sequence, a
+ * '0' bit is inserted after them.
+ * The resulting bit string and a closing flag pattern (PPP_FLAG, '01111110')
+ * are appended to the output buffer starting at the given bit position, which
+ * is assumed to already contain a leading flag.
+ * The output buffer must have sufficient length; count + count/5 + 6 bytes
+ * starting at *out are safe and are verified to be present.
+ * parameters:
+ * in input buffer
+ * count number of bytes in input buffer
+ * iwb pointer to output buffer structure
+ * (write semaphore must be held)
+ * return value:
+ * position of end of packet in output buffer on success,
+ * -EAGAIN if write semaphore busy or buffer full
+ */
+
+static inline int hdlc_buildframe(struct isowbuf_t *iwb,
+ unsigned char *in, int count)
+{
+ int ones;
+ u16 fcs;
+ int end;
+ unsigned char c;
+
+ if (isowbuf_freebytes(iwb) < count + count / 5 + 6 ||
+ isowbuf_startwrite(iwb) < 0) {
+ gig_dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN",
+ __func__, isowbuf_freebytes(iwb));
+ return -EAGAIN;
+ }
+
+ dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count);
+
+ /* bitstuff and checksum input data */
+ fcs = PPP_INITFCS;
+ ones = 0;
+ while (count-- > 0) {
+ c = *in++;
+ ones = hdlc_bitstuff_byte(iwb, c, ones);
+ fcs = crc_ccitt_byte(fcs, c);
+ }
+
+ /* bitstuff and append FCS
+ * (complemented, least significant byte first) */
+ fcs ^= 0xffff;
+ ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones);
+ ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones);
+
+ /* put closing flag and repeat byte for flag idle */
+ isowbuf_putflag(iwb);
+ end = isowbuf_donewrite(iwb);
+ return end;
+}
+
+/* trans_buildframe
+ * Append a block of 'transparent' data to the output buffer,
+ * inverting the bytes.
+ * The output buffer must have sufficient length; count bytes
+ * starting at *out are safe and are verified to be present.
+ * parameters:
+ * in input buffer
+ * count number of bytes in input buffer
+ * iwb pointer to output buffer structure
+ * (write semaphore must be held)
+ * return value:
+ * position of end of packet in output buffer on success,
+ * -EAGAIN if write semaphore busy or buffer full
+ */
+
+static inline int trans_buildframe(struct isowbuf_t *iwb,
+ unsigned char *in, int count)
+{
+ int write;
+ unsigned char c;
+
+ if (unlikely(count <= 0))
+ return iwb->write;
+
+ if (isowbuf_freebytes(iwb) < count ||
+ isowbuf_startwrite(iwb) < 0) {
+ gig_dbg(DEBUG_ISO, "can't put %d bytes", count);
+ return -EAGAIN;
+ }
+
+ gig_dbg(DEBUG_STREAM, "put %d bytes", count);
+ dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count);
+
+ write = iwb->write;
+ do {
+ c = bitrev8(*in++);
+ iwb->data[write++] = c;
+ write %= BAS_OUTBUFSIZE;
+ } while (--count > 0);
+ iwb->write = write;
+ iwb->idle = c;
+
+ return isowbuf_donewrite(iwb);
+}
+
+int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len)
+{
+ int result;
+
+ switch (bcs->proto2) {
+ case L2_HDLC:
+ result = hdlc_buildframe(bcs->hw.bas->isooutbuf, in, len);
+ gig_dbg(DEBUG_ISO, "%s: %d bytes HDLC -> %d",
+ __func__, len, result);
+ break;
+ default: /* assume transparent */
+ result = trans_buildframe(bcs->hw.bas->isooutbuf, in, len);
+ gig_dbg(DEBUG_ISO, "%s: %d bytes trans -> %d",
+ __func__, len, result);
+ }
+ return result;
+}
+
+/* hdlc_putbyte
+ * append byte c to current skb of B channel structure *bcs, updating fcs
+ */
+static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs)
+{
+ bcs->rx_fcs = crc_ccitt_byte(bcs->rx_fcs, c);
+ if (bcs->rx_skb == NULL)
+ /* skipping */
+ return;
+ if (bcs->rx_skb->len >= bcs->rx_bufsize) {
+ dev_warn(bcs->cs->dev, "received oversized packet discarded\n");
+ bcs->hw.bas->giants++;
+ dev_kfree_skb_any(bcs->rx_skb);
+ bcs->rx_skb = NULL;
+ return;
+ }
+ __skb_put_u8(bcs->rx_skb, c);
+}
+
+/* hdlc_flush
+ * drop partial HDLC data packet
+ */
+static inline void hdlc_flush(struct bc_state *bcs)
+{
+ /* clear skb or allocate new if not skipping */
+ if (bcs->rx_skb != NULL)
+ skb_trim(bcs->rx_skb, 0);
+ else
+ gigaset_new_rx_skb(bcs);
+
+ /* reset packet state */
+ bcs->rx_fcs = PPP_INITFCS;
+}
+
+/* hdlc_done
+ * process completed HDLC data packet
+ */
+static inline void hdlc_done(struct bc_state *bcs)
+{
+ struct cardstate *cs = bcs->cs;
+ struct sk_buff *procskb;
+ unsigned int len;
+
+ if (unlikely(bcs->ignore)) {
+ bcs->ignore--;
+ hdlc_flush(bcs);
+ return;
+ }
+ procskb = bcs->rx_skb;
+ if (procskb == NULL) {
+ /* previous error */
+ gig_dbg(DEBUG_ISO, "%s: skb=NULL", __func__);
+ gigaset_isdn_rcv_err(bcs);
+ } else if (procskb->len < 2) {
+ dev_notice(cs->dev, "received short frame (%d octets)\n",
+ procskb->len);
+ bcs->hw.bas->runts++;
+ dev_kfree_skb_any(procskb);
+ gigaset_isdn_rcv_err(bcs);
+ } else if (bcs->rx_fcs != PPP_GOODFCS) {
+ dev_notice(cs->dev, "frame check error\n");
+ bcs->hw.bas->fcserrs++;
+ dev_kfree_skb_any(procskb);
+ gigaset_isdn_rcv_err(bcs);
+ } else {
+ len = procskb->len;
+ __skb_trim(procskb, len -= 2); /* subtract FCS */
+ gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", __func__, len);
+ dump_bytes(DEBUG_STREAM_DUMP,
+ "rcv data", procskb->data, len);
+ bcs->hw.bas->goodbytes += len;
+ gigaset_skb_rcvd(bcs, procskb);
+ }
+ gigaset_new_rx_skb(bcs);
+ bcs->rx_fcs = PPP_INITFCS;
+}
+
+/* hdlc_frag
+ * drop HDLC data packet with non-integral last byte
+ */
+static inline void hdlc_frag(struct bc_state *bcs, unsigned inbits)
+{
+ if (unlikely(bcs->ignore)) {
+ bcs->ignore--;
+ hdlc_flush(bcs);
+ return;
+ }
+
+ dev_notice(bcs->cs->dev, "received partial byte (%d bits)\n", inbits);
+ bcs->hw.bas->alignerrs++;
+ gigaset_isdn_rcv_err(bcs);
+ __skb_trim(bcs->rx_skb, 0);
+ bcs->rx_fcs = PPP_INITFCS;
+}
+
+/* bit counts lookup table for HDLC bit unstuffing
+ * index: input byte
+ * value: bit 0..3 = number of consecutive '1' bits starting from LSB
+ * bit 4..6 = number of consecutive '1' bits starting from MSB
+ * (replacing 8 by 7 to make it fit; the algorithm won't care)
+ * bit 7 set if there are 5 or more "interior" consecutive '1' bits
+ */
+static const unsigned char bitcounts[256] = {
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x80, 0x06,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x80, 0x81, 0x80, 0x07,
+ 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14,
+ 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x15,
+ 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14,
+ 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x90, 0x16,
+ 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x24,
+ 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x25,
+ 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x33, 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x34,
+ 0x40, 0x41, 0x40, 0x42, 0x40, 0x41, 0x40, 0x43, 0x50, 0x51, 0x50, 0x52, 0x60, 0x61, 0x70, 0x78
+};
+
+/* hdlc_unpack
+ * perform HDLC frame processing (bit unstuffing, flag detection, FCS
+ * calculation) on a sequence of received data bytes (8 bits each, LSB first)
+ * pass on successfully received, complete frames as SKBs via gigaset_skb_rcvd
+ * notify of errors via gigaset_isdn_rcv_err
+ * tally frames, errors etc. in BC structure counters
+ * parameters:
+ * src received data
+ * count number of received bytes
+ * bcs receiving B channel structure
+ */
+static inline void hdlc_unpack(unsigned char *src, unsigned count,
+ struct bc_state *bcs)
+{
+ struct bas_bc_state *ubc = bcs->hw.bas;
+ int inputstate;
+ unsigned seqlen, inbyte, inbits;
+
+ /* load previous state:
+ * inputstate = set of flag bits:
+ * - INS_flag_hunt: no complete opening flag received since connection
+ * setup or last abort
+ * - INS_have_data: at least one complete data byte received since last
+ * flag
+ * seqlen = number of consecutive '1' bits in last 7 input stream bits
+ * (0..7)
+ * inbyte = accumulated partial data byte (if !INS_flag_hunt)
+ * inbits = number of valid bits in inbyte, starting at LSB (0..6)
+ */
+ inputstate = bcs->inputstate;
+ seqlen = ubc->seqlen;
+ inbyte = ubc->inbyte;
+ inbits = ubc->inbits;
+
+ /* bit unstuffing a byte a time
+ * Take your time to understand this; it's straightforward but tedious.
+ * The "bitcounts" lookup table is used to speed up the counting of
+ * leading and trailing '1' bits.
+ */
+ while (count--) {
+ unsigned char c = *src++;
+ unsigned char tabentry = bitcounts[c];
+ unsigned lead1 = tabentry & 0x0f;
+ unsigned trail1 = (tabentry >> 4) & 0x0f;
+
+ seqlen += lead1;
+
+ if (unlikely(inputstate & INS_flag_hunt)) {
+ if (c == PPP_FLAG) {
+ /* flag-in-one */
+ inputstate &= ~(INS_flag_hunt | INS_have_data);
+ inbyte = 0;
+ inbits = 0;
+ } else if (seqlen == 6 && trail1 != 7) {
+ /* flag completed & not followed by abort */
+ inputstate &= ~(INS_flag_hunt | INS_have_data);
+ inbyte = c >> (lead1 + 1);
+ inbits = 7 - lead1;
+ if (trail1 >= 8) {
+ /* interior stuffing:
+ * omitting the MSB handles most cases,
+ * correct the incorrectly handled
+ * cases individually */
+ inbits--;
+ switch (c) {
+ case 0xbe:
+ inbyte = 0x3f;
+ break;
+ }
+ }
+ }
+ /* else: continue flag-hunting */
+ } else if (likely(seqlen < 5 && trail1 < 7)) {
+ /* streamlined case: 8 data bits, no stuffing */
+ inbyte |= c << inbits;
+ hdlc_putbyte(inbyte & 0xff, bcs);
+ inputstate |= INS_have_data;
+ inbyte >>= 8;
+ /* inbits unchanged */
+ } else if (likely(seqlen == 6 && inbits == 7 - lead1 &&
+ trail1 + 1 == inbits &&
+ !(inputstate & INS_have_data))) {
+ /* streamlined case: flag idle - state unchanged */
+ } else if (unlikely(seqlen > 6)) {
+ /* abort sequence */
+ ubc->aborts++;
+ hdlc_flush(bcs);
+ inputstate |= INS_flag_hunt;
+ } else if (seqlen == 6) {
+ /* closing flag, including (6 - lead1) '1's
+ * and one '0' from inbits */
+ if (inbits > 7 - lead1) {
+ hdlc_frag(bcs, inbits + lead1 - 7);
+ inputstate &= ~INS_have_data;
+ } else {
+ if (inbits < 7 - lead1)
+ ubc->stolen0s++;
+ if (inputstate & INS_have_data) {
+ hdlc_done(bcs);
+ inputstate &= ~INS_have_data;
+ }
+ }
+
+ if (c == PPP_FLAG) {
+ /* complete flag, LSB overlaps preceding flag */
+ ubc->shared0s++;
+ inbits = 0;
+ inbyte = 0;
+ } else if (trail1 != 7) {
+ /* remaining bits */
+ inbyte = c >> (lead1 + 1);
+ inbits = 7 - lead1;
+ if (trail1 >= 8) {
+ /* interior stuffing:
+ * omitting the MSB handles most cases,
+ * correct the incorrectly handled
+ * cases individually */
+ inbits--;
+ switch (c) {
+ case 0xbe:
+ inbyte = 0x3f;
+ break;
+ }
+ }
+ } else {
+ /* abort sequence follows,
+ * skb already empty anyway */
+ ubc->aborts++;
+ inputstate |= INS_flag_hunt;
+ }
+ } else { /* (seqlen < 6) && (seqlen == 5 || trail1 >= 7) */
+
+ if (c == PPP_FLAG) {
+ /* complete flag */
+ if (seqlen == 5)
+ ubc->stolen0s++;
+ if (inbits) {
+ hdlc_frag(bcs, inbits);
+ inbits = 0;
+ inbyte = 0;
+ } else if (inputstate & INS_have_data)
+ hdlc_done(bcs);
+ inputstate &= ~INS_have_data;
+ } else if (trail1 == 7) {
+ /* abort sequence */
+ ubc->aborts++;
+ hdlc_flush(bcs);
+ inputstate |= INS_flag_hunt;
+ } else {
+ /* stuffed data */
+ if (trail1 < 7) { /* => seqlen == 5 */
+ /* stuff bit at position lead1,
+ * no interior stuffing */
+ unsigned char mask = (1 << lead1) - 1;
+ c = (c & mask) | ((c & ~mask) >> 1);
+ inbyte |= c << inbits;
+ inbits += 7;
+ } else if (seqlen < 5) { /* trail1 >= 8 */
+ /* interior stuffing:
+ * omitting the MSB handles most cases,
+ * correct the incorrectly handled
+ * cases individually */
+ switch (c) {
+ case 0xbe:
+ c = 0x7e;
+ break;
+ }
+ inbyte |= c << inbits;
+ inbits += 7;
+ } else { /* seqlen == 5 && trail1 >= 8 */
+
+ /* stuff bit at lead1 *and* interior
+ * stuffing -- unstuff individually */
+ switch (c) {
+ case 0x7d:
+ c = 0x3f;
+ break;
+ case 0xbe:
+ c = 0x3f;
+ break;
+ case 0x3e:
+ c = 0x1f;
+ break;
+ case 0x7c:
+ c = 0x3e;
+ break;
+ }
+ inbyte |= c << inbits;
+ inbits += 6;
+ }
+ if (inbits >= 8) {
+ inbits -= 8;
+ hdlc_putbyte(inbyte & 0xff, bcs);
+ inputstate |= INS_have_data;
+ inbyte >>= 8;
+ }
+ }
+ }
+ seqlen = trail1 & 7;
+ }
+
+ /* save new state */
+ bcs->inputstate = inputstate;
+ ubc->seqlen = seqlen;
+ ubc->inbyte = inbyte;
+ ubc->inbits = inbits;
+}
+
+/* trans_receive
+ * pass on received USB frame transparently as SKB via gigaset_skb_rcvd
+ * invert bytes
+ * tally frames, errors etc. in BC structure counters
+ * parameters:
+ * src received data
+ * count number of received bytes
+ * bcs receiving B channel structure
+ */
+static inline void trans_receive(unsigned char *src, unsigned count,
+ struct bc_state *bcs)
+{
+ struct sk_buff *skb;
+ int dobytes;
+ unsigned char *dst;
+
+ if (unlikely(bcs->ignore)) {
+ bcs->ignore--;
+ return;
+ }
+ skb = bcs->rx_skb;
+ if (skb == NULL) {
+ skb = gigaset_new_rx_skb(bcs);
+ if (skb == NULL)
+ return;
+ }
+ dobytes = bcs->rx_bufsize - skb->len;
+ while (count > 0) {
+ dst = skb_put(skb, count < dobytes ? count : dobytes);
+ while (count > 0 && dobytes > 0) {
+ *dst++ = bitrev8(*src++);
+ count--;
+ dobytes--;
+ }
+ if (dobytes == 0) {
+ dump_bytes(DEBUG_STREAM_DUMP,
+ "rcv data", skb->data, skb->len);
+ bcs->hw.bas->goodbytes += skb->len;
+ gigaset_skb_rcvd(bcs, skb);
+ skb = gigaset_new_rx_skb(bcs);
+ if (skb == NULL)
+ return;
+ dobytes = bcs->rx_bufsize;
+ }
+ }
+}
+
+void gigaset_isoc_receive(unsigned char *src, unsigned count,
+ struct bc_state *bcs)
+{
+ switch (bcs->proto2) {
+ case L2_HDLC:
+ hdlc_unpack(src, count, bcs);
+ break;
+ default: /* assume transparent */
+ trans_receive(src, count, bcs);
+ }
+}
+
+/* == data input =========================================================== */
+
+/* process a block of received bytes in command mode (mstate != MS_LOCKED)
+ * Append received bytes to the command response buffer and forward them
+ * line by line to the response handler.
+ * Note: Received lines may be terminated by CR, LF, or CR LF, which will be
+ * removed before passing the line to the response handler.
+ */
+static void cmd_loop(unsigned char *src, int numbytes, struct inbuf_t *inbuf)
+{
+ struct cardstate *cs = inbuf->cs;
+ unsigned cbytes = cs->cbytes;
+ unsigned char c;
+
+ while (numbytes--) {
+ c = *src++;
+ switch (c) {
+ case '\n':
+ if (cbytes == 0 && cs->respdata[0] == '\r') {
+ /* collapse LF with preceding CR */
+ cs->respdata[0] = 0;
+ break;
+ }
+ /* fall through */
+ case '\r':
+ /* end of message line, pass to response handler */
+ if (cbytes >= MAX_RESP_SIZE) {
+ dev_warn(cs->dev, "response too large (%d)\n",
+ cbytes);
+ cbytes = MAX_RESP_SIZE;
+ }
+ cs->cbytes = cbytes;
+ gigaset_dbg_buffer(DEBUG_TRANSCMD, "received response",
+ cbytes, cs->respdata);
+ gigaset_handle_modem_response(cs);
+ cbytes = 0;
+
+ /* store EOL byte for CRLF collapsing */
+ cs->respdata[0] = c;
+ break;
+ default:
+ /* append to line buffer if possible */
+ if (cbytes < MAX_RESP_SIZE)
+ cs->respdata[cbytes] = c;
+ cbytes++;
+ }
+ }
+
+ /* save state */
+ cs->cbytes = cbytes;
+}
+
+
+/* process a block of data received through the control channel
+ */
+void gigaset_isoc_input(struct inbuf_t *inbuf)
+{
+ struct cardstate *cs = inbuf->cs;
+ unsigned tail, head, numbytes;
+ unsigned char *src;
+
+ head = inbuf->head;
+ while (head != (tail = inbuf->tail)) {
+ gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
+ if (head > tail)
+ tail = RBUFSIZE;
+ src = inbuf->data + head;
+ numbytes = tail - head;
+ gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
+
+ if (cs->mstate == MS_LOCKED) {
+ gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response",
+ numbytes, src);
+ gigaset_if_receive(inbuf->cs, src, numbytes);
+ } else {
+ cmd_loop(src, numbytes, inbuf);
+ }
+
+ head += numbytes;
+ if (head == RBUFSIZE)
+ head = 0;
+ gig_dbg(DEBUG_INTR, "setting head to %u", head);
+ inbuf->head = head;
+ }
+}
+
+
+/* == data output ========================================================== */
+
+/**
+ * gigaset_isoc_send_skb() - queue an skb for sending
+ * @bcs: B channel descriptor structure.
+ * @skb: data to send.
+ *
+ * Called by LL to queue an skb for sending, and start transmission if
+ * necessary.
+ * Once the payload data has been transmitted completely, gigaset_skb_sent()
+ * will be called with the skb's link layer header preserved.
+ *
+ * Return value:
+ * number of bytes accepted for sending (skb->len) if ok,
+ * error code < 0 (eg. -ENODEV) on error
+ */
+int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb)
+{
+ int len = skb->len;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcs->cs->lock, flags);
+ if (!bcs->cs->connected) {
+ spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ return -ENODEV;
+ }
+
+ skb_queue_tail(&bcs->squeue, skb);
+ gig_dbg(DEBUG_ISO, "%s: skb queued, qlen=%d",
+ __func__, skb_queue_len(&bcs->squeue));
+
+ /* tasklet submits URB if necessary */
+ tasklet_schedule(&bcs->hw.bas->sent_tasklet);
+ spin_unlock_irqrestore(&bcs->cs->lock, flags);
+
+ return len; /* ok so far */
+}
diff --git a/drivers/staging/isdn/gigaset/proc.c b/drivers/staging/isdn/gigaset/proc.c
new file mode 100644
index 000000000000..8914439a4237
--- /dev/null
+++ b/drivers/staging/isdn/gigaset/proc.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Stuff used by all variants of the driver
+ *
+ * Copyright (c) 2001 by Stefan Eilers,
+ * Hansjoerg Lipp <hjlipp@web.de>,
+ * Tilman Schmidt <tilman@imap.cc>.
+ *
+ * =====================================================================
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+
+static ssize_t show_cidmode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cardstate *cs = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", cs->cidmode);
+}
+
+static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cardstate *cs = dev_get_drvdata(dev);
+ long int value;
+ char *end;
+
+ value = simple_strtol(buf, &end, 0);
+ while (*end)
+ if (!isspace(*end++))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&cs->mutex))
+ return -ERESTARTSYS;
+
+ cs->waiting = 1;
+ if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE,
+ NULL, value, NULL)) {
+ cs->waiting = 0;
+ mutex_unlock(&cs->mutex);
+ return -ENOMEM;
+ }
+ gigaset_schedule_event(cs);
+
+ wait_event(cs->waitqueue, !cs->waiting);
+
+ mutex_unlock(&cs->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(cidmode, S_IRUGO | S_IWUSR, show_cidmode, set_cidmode);
+
+/* free sysfs for device */
+void gigaset_free_dev_sysfs(struct cardstate *cs)
+{
+ if (!cs->tty_dev)
+ return;
+
+ gig_dbg(DEBUG_INIT, "removing sysfs entries");
+ device_remove_file(cs->tty_dev, &dev_attr_cidmode);
+}
+
+/* initialize sysfs for device */
+void gigaset_init_dev_sysfs(struct cardstate *cs)
+{
+ if (!cs->tty_dev)
+ return;
+
+ gig_dbg(DEBUG_INIT, "setting up sysfs");
+ if (device_create_file(cs->tty_dev, &dev_attr_cidmode))
+ pr_err("could not create sysfs attribute\n");
+}
diff --git a/drivers/staging/isdn/gigaset/ser-gigaset.c b/drivers/staging/isdn/gigaset/ser-gigaset.c
new file mode 100644
index 000000000000..5587e9e7fc73
--- /dev/null
+++ b/drivers/staging/isdn/gigaset/ser-gigaset.c
@@ -0,0 +1,796 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* This is the serial hardware link layer (HLL) for the Gigaset 307x isdn
+ * DECT base (aka Sinus 45 isdn) using the RS232 DECT data module M101,
+ * written as a line discipline.
+ *
+ * =====================================================================
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+
+/* Version Information */
+#define DRIVER_AUTHOR "Tilman Schmidt"
+#define DRIVER_DESC "Serial Driver for Gigaset 307x using Siemens M101"
+
+#define GIGASET_MINORS 1
+#define GIGASET_MINOR 0
+#define GIGASET_MODULENAME "ser_gigaset"
+#define GIGASET_DEVNAME "ttyGS"
+
+/* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
+#define IF_WRITEBUF 264
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_GIGASET_M101);
+
+static int startmode = SM_ISDN;
+module_param(startmode, int, S_IRUGO);
+MODULE_PARM_DESC(startmode, "initial operation mode");
+static int cidmode = 1;
+module_param(cidmode, int, S_IRUGO);
+MODULE_PARM_DESC(cidmode, "stay in CID mode when idle");
+
+static struct gigaset_driver *driver;
+
+struct ser_cardstate {
+ struct platform_device dev;
+ struct tty_struct *tty;
+ atomic_t refcnt;
+ struct completion dead_cmp;
+};
+
+static struct platform_driver device_driver = {
+ .driver = {
+ .name = GIGASET_MODULENAME,
+ },
+};
+
+static void flush_send_queue(struct cardstate *);
+
+/* transmit data from current open skb
+ * result: number of bytes sent or error code < 0
+ */
+static int write_modem(struct cardstate *cs)
+{
+ struct tty_struct *tty = cs->hw.ser->tty;
+ struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
+ struct sk_buff *skb = bcs->tx_skb;
+ int sent = -EOPNOTSUPP;
+
+ WARN_ON(!tty || !tty->ops || !skb);
+
+ if (!skb->len) {
+ dev_kfree_skb_any(skb);
+ bcs->tx_skb = NULL;
+ return -EINVAL;
+ }
+
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ if (tty->ops->write)
+ sent = tty->ops->write(tty, skb->data, skb->len);
+ gig_dbg(DEBUG_OUTPUT, "write_modem: sent %d", sent);
+ if (sent < 0) {
+ /* error */
+ flush_send_queue(cs);
+ return sent;
+ }
+ skb_pull(skb, sent);
+ if (!skb->len) {
+ /* skb sent completely */
+ gigaset_skb_sent(bcs, skb);
+
+ gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!",
+ (unsigned long) skb);
+ dev_kfree_skb_any(skb);
+ bcs->tx_skb = NULL;
+ }
+ return sent;
+}
+
+/*
+ * transmit first queued command buffer
+ * result: number of bytes sent or error code < 0
+ */
+static int send_cb(struct cardstate *cs)
+{
+ struct tty_struct *tty = cs->hw.ser->tty;
+ struct cmdbuf_t *cb, *tcb;
+ unsigned long flags;
+ int sent = 0;
+
+ WARN_ON(!tty || !tty->ops);
+
+ cb = cs->cmdbuf;
+ if (!cb)
+ return 0; /* nothing to do */
+
+ if (cb->len) {
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ sent = tty->ops->write(tty, cb->buf + cb->offset, cb->len);
+ if (sent < 0) {
+ /* error */
+ gig_dbg(DEBUG_OUTPUT, "send_cb: write error %d", sent);
+ flush_send_queue(cs);
+ return sent;
+ }
+ cb->offset += sent;
+ cb->len -= sent;
+ gig_dbg(DEBUG_OUTPUT, "send_cb: sent %d, left %u, queued %u",
+ sent, cb->len, cs->cmdbytes);
+ }
+
+ while (cb && !cb->len) {
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ cs->cmdbytes -= cs->curlen;
+ tcb = cb;
+ cs->cmdbuf = cb = cb->next;
+ if (cb) {
+ cb->prev = NULL;
+ cs->curlen = cb->len;
+ } else {
+ cs->lastcmdbuf = NULL;
+ cs->curlen = 0;
+ }
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+
+ if (tcb->wake_tasklet)
+ tasklet_schedule(tcb->wake_tasklet);
+ kfree(tcb);
+ }
+ return sent;
+}
+
+/*
+ * send queue tasklet
+ * If there is already a skb opened, put data to the transfer buffer
+ * by calling "write_modem".
+ * Otherwise take a new skb out of the queue.
+ */
+static void gigaset_modem_fill(unsigned long data)
+{
+ struct cardstate *cs = (struct cardstate *) data;
+ struct bc_state *bcs;
+ struct sk_buff *nextskb;
+ int sent = 0;
+
+ if (!cs) {
+ gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
+ return;
+ }
+ bcs = cs->bcs;
+ if (!bcs) {
+ gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
+ return;
+ }
+ if (!bcs->tx_skb) {
+ /* no skb is being sent; send command if any */
+ sent = send_cb(cs);
+ gig_dbg(DEBUG_OUTPUT, "%s: send_cb -> %d", __func__, sent);
+ if (sent)
+ /* something sent or error */
+ return;
+
+ /* no command to send; get skb */
+ nextskb = skb_dequeue(&bcs->squeue);
+ if (!nextskb)
+ /* no skb either, nothing to do */
+ return;
+ bcs->tx_skb = nextskb;
+
+ gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)",
+ (unsigned long) bcs->tx_skb);
+ }
+
+ /* send skb */
+ gig_dbg(DEBUG_OUTPUT, "%s: tx_skb", __func__);
+ if (write_modem(cs) < 0)
+ gig_dbg(DEBUG_OUTPUT, "%s: write_modem failed", __func__);
+}
+
+/*
+ * throw away all data queued for sending
+ */
+static void flush_send_queue(struct cardstate *cs)
+{
+ struct sk_buff *skb;
+ struct cmdbuf_t *cb;
+ unsigned long flags;
+
+ /* command queue */
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ while ((cb = cs->cmdbuf) != NULL) {
+ cs->cmdbuf = cb->next;
+ if (cb->wake_tasklet)
+ tasklet_schedule(cb->wake_tasklet);
+ kfree(cb);
+ }
+ cs->cmdbuf = cs->lastcmdbuf = NULL;
+ cs->cmdbytes = cs->curlen = 0;
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+
+ /* data queue */
+ if (cs->bcs->tx_skb)
+ dev_kfree_skb_any(cs->bcs->tx_skb);
+ while ((skb = skb_dequeue(&cs->bcs->squeue)) != NULL)
+ dev_kfree_skb_any(skb);
+}
+
+
+/* Gigaset Driver Interface */
+/* ======================== */
+
+/*
+ * queue an AT command string for transmission to the Gigaset device
+ * parameters:
+ * cs controller state structure
+ * buf buffer containing the string to send
+ * len number of characters to send
+ * wake_tasklet tasklet to run when transmission is complete, or NULL
+ * return value:
+ * number of bytes queued, or error code < 0
+ */
+static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
+{
+ unsigned long flags;
+
+ gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
+ DEBUG_TRANSCMD : DEBUG_LOCKCMD,
+ "CMD Transmit", cb->len, cb->buf);
+
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ cb->prev = cs->lastcmdbuf;
+ if (cs->lastcmdbuf)
+ cs->lastcmdbuf->next = cb;
+ else {
+ cs->cmdbuf = cb;
+ cs->curlen = cb->len;
+ }
+ cs->cmdbytes += cb->len;
+ cs->lastcmdbuf = cb;
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if (cs->connected)
+ tasklet_schedule(&cs->write_tasklet);
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return cb->len;
+}
+
+/*
+ * tty_driver.write_room interface routine
+ * return number of characters the driver will accept to be written
+ * parameter:
+ * controller state structure
+ * return value:
+ * number of characters
+ */
+static int gigaset_write_room(struct cardstate *cs)
+{
+ unsigned bytes;
+
+ bytes = cs->cmdbytes;
+ return bytes < IF_WRITEBUF ? IF_WRITEBUF - bytes : 0;
+}
+
+/*
+ * tty_driver.chars_in_buffer interface routine
+ * return number of characters waiting to be sent
+ * parameter:
+ * controller state structure
+ * return value:
+ * number of characters
+ */
+static int gigaset_chars_in_buffer(struct cardstate *cs)
+{
+ return cs->cmdbytes;
+}
+
+/*
+ * implementation of ioctl(GIGASET_BRKCHARS)
+ * parameter:
+ * controller state structure
+ * return value:
+ * -EINVAL (unimplemented function)
+ */
+static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
+{
+ /* not implemented */
+ return -EINVAL;
+}
+
+/*
+ * Open B channel
+ * Called by "do_action" in ev-layer.c
+ */
+static int gigaset_init_bchannel(struct bc_state *bcs)
+{
+ /* nothing to do for M10x */
+ gigaset_bchannel_up(bcs);
+ return 0;
+}
+
+/*
+ * Close B channel
+ * Called by "do_action" in ev-layer.c
+ */
+static int gigaset_close_bchannel(struct bc_state *bcs)
+{
+ /* nothing to do for M10x */
+ gigaset_bchannel_down(bcs);
+ return 0;
+}
+
+/*
+ * Set up B channel structure
+ * This is called by "gigaset_initcs" in common.c
+ */
+static int gigaset_initbcshw(struct bc_state *bcs)
+{
+ /* unused */
+ bcs->hw.ser = NULL;
+ return 0;
+}
+
+/*
+ * Free B channel structure
+ * Called by "gigaset_freebcs" in common.c
+ */
+static void gigaset_freebcshw(struct bc_state *bcs)
+{
+ /* unused */
+}
+
+/*
+ * Reinitialize B channel structure
+ * This is called by "bcs_reinit" in common.c
+ */
+static void gigaset_reinitbcshw(struct bc_state *bcs)
+{
+ /* nothing to do for M10x */
+}
+
+/*
+ * Free hardware specific device data
+ * This will be called by "gigaset_freecs" in common.c
+ */
+static void gigaset_freecshw(struct cardstate *cs)
+{
+ tasklet_kill(&cs->write_tasklet);
+ if (!cs->hw.ser)
+ return;
+ platform_device_unregister(&cs->hw.ser->dev);
+}
+
+static void gigaset_device_release(struct device *dev)
+{
+ kfree(container_of(dev, struct ser_cardstate, dev.dev));
+}
+
+/*
+ * Set up hardware specific device data
+ * This is called by "gigaset_initcs" in common.c
+ */
+static int gigaset_initcshw(struct cardstate *cs)
+{
+ int rc;
+ struct ser_cardstate *scs;
+
+ scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL);
+ if (!scs) {
+ pr_err("out of memory\n");
+ return -ENOMEM;
+ }
+ cs->hw.ser = scs;
+
+ cs->hw.ser->dev.name = GIGASET_MODULENAME;
+ cs->hw.ser->dev.id = cs->minor_index;
+ cs->hw.ser->dev.dev.release = gigaset_device_release;
+ rc = platform_device_register(&cs->hw.ser->dev);
+ if (rc != 0) {
+ pr_err("error %d registering platform device\n", rc);
+ kfree(cs->hw.ser);
+ cs->hw.ser = NULL;
+ return rc;
+ }
+
+ tasklet_init(&cs->write_tasklet,
+ gigaset_modem_fill, (unsigned long) cs);
+ return 0;
+}
+
+/*
+ * set modem control lines
+ * Parameters:
+ * card state structure
+ * modem control line state ([TIOCM_DTR]|[TIOCM_RTS])
+ * Called by "gigaset_start" and "gigaset_enterconfigmode" in common.c
+ * and by "if_lock" and "if_termios" in interface.c
+ */
+static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
+ unsigned new_state)
+{
+ struct tty_struct *tty = cs->hw.ser->tty;
+ unsigned int set, clear;
+
+ WARN_ON(!tty || !tty->ops);
+ /* tiocmset is an optional tty driver method */
+ if (!tty->ops->tiocmset)
+ return -EINVAL;
+ set = new_state & ~old_state;
+ clear = old_state & ~new_state;
+ if (!set && !clear)
+ return 0;
+ gig_dbg(DEBUG_IF, "tiocmset set %x clear %x", set, clear);
+ return tty->ops->tiocmset(tty, set, clear);
+}
+
+static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
+{
+ return -EINVAL;
+}
+
+static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
+{
+ return -EINVAL;
+}
+
+static const struct gigaset_ops ops = {
+ .write_cmd = gigaset_write_cmd,
+ .write_room = gigaset_write_room,
+ .chars_in_buffer = gigaset_chars_in_buffer,
+ .brkchars = gigaset_brkchars,
+ .init_bchannel = gigaset_init_bchannel,
+ .close_bchannel = gigaset_close_bchannel,
+ .initbcshw = gigaset_initbcshw,
+ .freebcshw = gigaset_freebcshw,
+ .reinitbcshw = gigaset_reinitbcshw,
+ .initcshw = gigaset_initcshw,
+ .freecshw = gigaset_freecshw,
+ .set_modem_ctrl = gigaset_set_modem_ctrl,
+ .baud_rate = gigaset_baud_rate,
+ .set_line_ctrl = gigaset_set_line_ctrl,
+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
+};
+
+
+/* Line Discipline Interface */
+/* ========================= */
+
+/* helper functions for cardstate refcounting */
+static struct cardstate *cs_get(struct tty_struct *tty)
+{
+ struct cardstate *cs = tty->disc_data;
+
+ if (!cs || !cs->hw.ser) {
+ gig_dbg(DEBUG_ANY, "%s: no cardstate", __func__);
+ return NULL;
+ }
+ atomic_inc(&cs->hw.ser->refcnt);
+ return cs;
+}
+
+static void cs_put(struct cardstate *cs)
+{
+ if (atomic_dec_and_test(&cs->hw.ser->refcnt))
+ complete(&cs->hw.ser->dead_cmp);
+}
+
+/*
+ * Called by the tty driver when the line discipline is pushed onto the tty.
+ * Called in process context.
+ */
+static int
+gigaset_tty_open(struct tty_struct *tty)
+{
+ struct cardstate *cs;
+ int rc;
+
+ gig_dbg(DEBUG_INIT, "Starting HLL for Gigaset M101");
+
+ pr_info(DRIVER_DESC "\n");
+
+ if (!driver) {
+ pr_err("%s: no driver structure\n", __func__);
+ return -ENODEV;
+ }
+
+ /* allocate memory for our device state and initialize it */
+ cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
+ if (!cs) {
+ rc = -ENODEV;
+ goto error;
+ }
+
+ cs->dev = &cs->hw.ser->dev.dev;
+ cs->hw.ser->tty = tty;
+ atomic_set(&cs->hw.ser->refcnt, 1);
+ init_completion(&cs->hw.ser->dead_cmp);
+ tty->disc_data = cs;
+
+ /* Set the amount of data we're willing to receive per call
+ * from the hardware driver to half of the input buffer size
+ * to leave some reserve.
+ * Note: We don't do flow control towards the hardware driver.
+ * If more data is received than will fit into the input buffer,
+ * it will be dropped and an error will be logged. This should
+ * never happen as the device is slow and the buffer size ample.
+ */
+ tty->receive_room = RBUFSIZE/2;
+
+ /* OK.. Initialization of the datastructures and the HW is done.. Now
+ * startup system and notify the LL that we are ready to run
+ */
+ if (startmode == SM_LOCKED)
+ cs->mstate = MS_LOCKED;
+ rc = gigaset_start(cs);
+ if (rc < 0) {
+ tasklet_kill(&cs->write_tasklet);
+ goto error;
+ }
+
+ gig_dbg(DEBUG_INIT, "Startup of HLL done");
+ return 0;
+
+error:
+ gig_dbg(DEBUG_INIT, "Startup of HLL failed");
+ tty->disc_data = NULL;
+ gigaset_freecs(cs);
+ return rc;
+}
+
+/*
+ * Called by the tty driver when the line discipline is removed.
+ * Called from process context.
+ */
+static void
+gigaset_tty_close(struct tty_struct *tty)
+{
+ struct cardstate *cs = tty->disc_data;
+
+ gig_dbg(DEBUG_INIT, "Stopping HLL for Gigaset M101");
+
+ if (!cs) {
+ gig_dbg(DEBUG_INIT, "%s: no cardstate", __func__);
+ return;
+ }
+
+ /* prevent other callers from entering ldisc methods */
+ tty->disc_data = NULL;
+
+ if (!cs->hw.ser)
+ pr_err("%s: no hw cardstate\n", __func__);
+ else {
+ /* wait for running methods to finish */
+ if (!atomic_dec_and_test(&cs->hw.ser->refcnt))
+ wait_for_completion(&cs->hw.ser->dead_cmp);
+ }
+
+ /* stop operations */
+ gigaset_stop(cs);
+ tasklet_kill(&cs->write_tasklet);
+ flush_send_queue(cs);
+ cs->dev = NULL;
+ gigaset_freecs(cs);
+
+ gig_dbg(DEBUG_INIT, "Shutdown of HLL done");
+}
+
+/*
+ * Called by the tty driver when the tty line is hung up.
+ * Wait for I/O to driver to complete and unregister ISDN device.
+ * This is already done by the close routine, so just call that.
+ * Called from process context.
+ */
+static int gigaset_tty_hangup(struct tty_struct *tty)
+{
+ gigaset_tty_close(tty);
+ return 0;
+}
+
+/*
+ * Ioctl on the tty.
+ * Called in process context only.
+ * May be re-entered by multiple ioctl calling threads.
+ */
+static int
+gigaset_tty_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cardstate *cs = cs_get(tty);
+ int rc, val;
+ int __user *p = (int __user *)arg;
+
+ if (!cs)
+ return -ENXIO;
+
+ switch (cmd) {
+
+ case FIONREAD:
+ /* unused, always return zero */
+ val = 0;
+ rc = put_user(val, p);
+ break;
+
+ case TCFLSH:
+ /* flush our buffers and the serial port's buffer */
+ switch (arg) {
+ case TCIFLUSH:
+ /* no own input buffer to flush */
+ break;
+ case TCIOFLUSH:
+ case TCOFLUSH:
+ flush_send_queue(cs);
+ break;
+ }
+ /* fall through */
+
+ default:
+ /* pass through to underlying serial device */
+ rc = n_tty_ioctl_helper(tty, file, cmd, arg);
+ break;
+ }
+ cs_put(cs);
+ return rc;
+}
+
+/*
+ * Called by the tty driver when a block of data has been received.
+ * Will not be re-entered while running but other ldisc functions
+ * may be called in parallel.
+ * Can be called from hard interrupt level as well as soft interrupt
+ * level or mainline.
+ * Parameters:
+ * tty tty structure
+ * buf buffer containing received characters
+ * cflags buffer containing error flags for received characters (ignored)
+ * count number of received characters
+ */
+static void
+gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
+ char *cflags, int count)
+{
+ struct cardstate *cs = cs_get(tty);
+ unsigned tail, head, n;
+ struct inbuf_t *inbuf;
+
+ if (!cs)
+ return;
+ inbuf = cs->inbuf;
+ if (!inbuf) {
+ dev_err(cs->dev, "%s: no inbuf\n", __func__);
+ cs_put(cs);
+ return;
+ }
+
+ tail = inbuf->tail;
+ head = inbuf->head;
+ gig_dbg(DEBUG_INTR, "buffer state: %u -> %u, receive %u bytes",
+ head, tail, count);
+
+ if (head <= tail) {
+ /* possible buffer wraparound */
+ n = min_t(unsigned, count, RBUFSIZE - tail);
+ memcpy(inbuf->data + tail, buf, n);
+ tail = (tail + n) % RBUFSIZE;
+ buf += n;
+ count -= n;
+ }
+
+ if (count > 0) {
+ /* tail < head and some data left */
+ n = head - tail - 1;
+ if (count > n) {
+ dev_err(cs->dev,
+ "inbuf overflow, discarding %d bytes\n",
+ count - n);
+ count = n;
+ }
+ memcpy(inbuf->data + tail, buf, count);
+ tail += count;
+ }
+
+ gig_dbg(DEBUG_INTR, "setting tail to %u", tail);
+ inbuf->tail = tail;
+
+ /* Everything was received .. Push data into handler */
+ gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
+ gigaset_schedule_event(cs);
+ cs_put(cs);
+}
+
+/*
+ * Called by the tty driver when there's room for more data to send.
+ */
+static void
+gigaset_tty_wakeup(struct tty_struct *tty)
+{
+ struct cardstate *cs = cs_get(tty);
+
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ if (!cs)
+ return;
+ tasklet_schedule(&cs->write_tasklet);
+ cs_put(cs);
+}
+
+static struct tty_ldisc_ops gigaset_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "ser_gigaset",
+ .open = gigaset_tty_open,
+ .close = gigaset_tty_close,
+ .hangup = gigaset_tty_hangup,
+ .ioctl = gigaset_tty_ioctl,
+ .receive_buf = gigaset_tty_receive,
+ .write_wakeup = gigaset_tty_wakeup,
+};
+
+
+/* Initialization / Shutdown */
+/* ========================= */
+
+static int __init ser_gigaset_init(void)
+{
+ int rc;
+
+ gig_dbg(DEBUG_INIT, "%s", __func__);
+ rc = platform_driver_register(&device_driver);
+ if (rc != 0) {
+ pr_err("error %d registering platform driver\n", rc);
+ return rc;
+ }
+
+ /* allocate memory for our driver state and initialize it */
+ driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
+ GIGASET_MODULENAME, GIGASET_DEVNAME,
+ &ops, THIS_MODULE);
+ if (!driver) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
+ if (rc != 0) {
+ pr_err("error %d registering line discipline\n", rc);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ if (driver) {
+ gigaset_freedriver(driver);
+ driver = NULL;
+ }
+ platform_driver_unregister(&device_driver);
+ return rc;
+}
+
+static void __exit ser_gigaset_exit(void)
+{
+ int rc;
+
+ gig_dbg(DEBUG_INIT, "%s", __func__);
+
+ if (driver) {
+ gigaset_freedriver(driver);
+ driver = NULL;
+ }
+
+ rc = tty_unregister_ldisc(N_GIGASET_M101);
+ if (rc != 0)
+ pr_err("error %d unregistering line discipline\n", rc);
+
+ platform_driver_unregister(&device_driver);
+}
+
+module_init(ser_gigaset_init);
+module_exit(ser_gigaset_exit);
diff --git a/drivers/staging/isdn/gigaset/usb-gigaset.c b/drivers/staging/isdn/gigaset/usb-gigaset.c
new file mode 100644
index 000000000000..1b9b43659bdf
--- /dev/null
+++ b/drivers/staging/isdn/gigaset/usb-gigaset.c
@@ -0,0 +1,946 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * USB driver for Gigaset 307x directly or using M105 Data.
+ *
+ * Copyright (c) 2001 by Stefan Eilers
+ * and Hansjoerg Lipp <hjlipp@web.de>.
+ *
+ * This driver was derived from the USB skeleton driver by
+ * Greg Kroah-Hartman <greg@kroah.com>
+ *
+ * =====================================================================
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+#include <linux/usb.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+/* Version Information */
+#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers"
+#define DRIVER_DESC "USB Driver for Gigaset 307x using M105"
+
+/* Module parameters */
+
+static int startmode = SM_ISDN;
+static int cidmode = 1;
+
+module_param(startmode, int, S_IRUGO);
+module_param(cidmode, int, S_IRUGO);
+MODULE_PARM_DESC(startmode, "start in isdn4linux mode");
+MODULE_PARM_DESC(cidmode, "Call-ID mode");
+
+#define GIGASET_MINORS 1
+#define GIGASET_MINOR 8
+#define GIGASET_MODULENAME "usb_gigaset"
+#define GIGASET_DEVNAME "ttyGU"
+
+/* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
+#define IF_WRITEBUF 264
+
+/* Values for the Gigaset M105 Data */
+#define USB_M105_VENDOR_ID 0x0681
+#define USB_M105_PRODUCT_ID 0x0009
+
+/* table of devices that work with this driver */
+static const struct usb_device_id gigaset_table[] = {
+ { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, gigaset_table);
+
+/*
+ * Control requests (empty fields: 00)
+ *
+ * RT|RQ|VALUE|INDEX|LEN |DATA
+ * In:
+ * C1 08 01
+ * Get flags (1 byte). Bits: 0=dtr,1=rts,3-7:?
+ * C1 0F ll ll
+ * Get device information/status (llll: 0x200 and 0x40 seen).
+ * Real size: I only saw MIN(llll,0x64).
+ * Contents: seems to be always the same...
+ * offset 0x00: Length of this structure (0x64) (len: 1,2,3 bytes)
+ * offset 0x3c: String (16 bit chars): "MCCI USB Serial V2.0"
+ * rest: ?
+ * Out:
+ * 41 11
+ * Initialize/reset device ?
+ * 41 00 xx 00
+ * ? (xx=00 or 01; 01 on start, 00 on close)
+ * 41 07 vv mm
+ * Set/clear flags vv=value, mm=mask (see RQ 08)
+ * 41 12 xx
+ * Used before the following configuration requests are issued
+ * (with xx=0x0f). I've seen other values<0xf, though.
+ * 41 01 xx xx
+ * Set baud rate. xxxx=ceil(0x384000/rate)=trunc(0x383fff/rate)+1.
+ * 41 03 ps bb
+ * Set byte size and parity. p: 0x20=even,0x10=odd,0x00=no parity
+ * [ 0x30: m, 0x40: s ]
+ * [s: 0: 1 stop bit; 1: 1.5; 2: 2]
+ * bb: bits/byte (seen 7 and 8)
+ * 41 13 -- -- -- -- 10 00 ww 00 00 00 xx 00 00 00 yy 00 00 00 zz 00 00 00
+ * ??
+ * Initialization: 01, 40, 00, 00
+ * Open device: 00 40, 00, 00
+ * yy and zz seem to be equal, either 0x00 or 0x0a
+ * (ww,xx) pairs seen: (00,00), (00,40), (01,40), (09,80), (19,80)
+ * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13
+ * Used after every "configuration sequence" (RQ 12, RQs 01/03/13).
+ * xx is usually 0x00 but was 0x7e before starting data transfer
+ * in unimodem mode. So, this might be an array of characters that
+ * need special treatment ("commit all bufferd data"?), 11=^Q, 13=^S.
+ *
+ * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two
+ * flags per packet.
+ */
+
+/* functions called if a device of this driver is connected/disconnected */
+static int gigaset_probe(struct usb_interface *interface,
+ const struct usb_device_id *id);
+static void gigaset_disconnect(struct usb_interface *interface);
+
+/* functions called before/after suspend */
+static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
+static int gigaset_resume(struct usb_interface *intf);
+static int gigaset_pre_reset(struct usb_interface *intf);
+
+static struct gigaset_driver *driver;
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver gigaset_usb_driver = {
+ .name = GIGASET_MODULENAME,
+ .probe = gigaset_probe,
+ .disconnect = gigaset_disconnect,
+ .id_table = gigaset_table,
+ .suspend = gigaset_suspend,
+ .resume = gigaset_resume,
+ .reset_resume = gigaset_resume,
+ .pre_reset = gigaset_pre_reset,
+ .post_reset = gigaset_resume,
+ .disable_hub_initiated_lpm = 1,
+};
+
+struct usb_cardstate {
+ struct usb_device *udev; /* usb device pointer */
+ struct usb_interface *interface; /* interface for this device */
+ int busy; /* bulk output in progress */
+
+ /* Output buffer */
+ unsigned char *bulk_out_buffer;
+ int bulk_out_size;
+ int bulk_out_epnum;
+ struct urb *bulk_out_urb;
+
+ /* Input buffer */
+ unsigned char *rcvbuf;
+ int rcvbuf_size;
+ struct urb *read_urb;
+
+ char bchars[6]; /* for request 0x19 */
+};
+
+static inline unsigned tiocm_to_gigaset(unsigned state)
+{
+ return ((state & TIOCM_DTR) ? 1 : 0) | ((state & TIOCM_RTS) ? 2 : 0);
+}
+
+static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
+ unsigned new_state)
+{
+ struct usb_device *udev = cs->hw.usb->udev;
+ unsigned mask, val;
+ int r;
+
+ mask = tiocm_to_gigaset(old_state ^ new_state);
+ val = tiocm_to_gigaset(new_state);
+
+ gig_dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask);
+ r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 7, 0x41,
+ (val & 0xff) | ((mask & 0xff) << 8), 0,
+ NULL, 0, 2000 /* timeout? */);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+/*
+ * Set M105 configuration value
+ * using undocumented device commands reverse engineered from USB traces
+ * of the Siemens Windows driver
+ */
+static int set_value(struct cardstate *cs, u8 req, u16 val)
+{
+ struct usb_device *udev = cs->hw.usb->udev;
+ int r, r2;
+
+ gig_dbg(DEBUG_USBREQ, "request %02x (%04x)",
+ (unsigned)req, (unsigned)val);
+ r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x12, 0x41,
+ 0xf /*?*/, 0, NULL, 0, 2000 /*?*/);
+ /* no idea what this does */
+ if (r < 0) {
+ dev_err(&udev->dev, "error %d on request 0x12\n", -r);
+ return r;
+ }
+
+ r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req, 0x41,
+ val, 0, NULL, 0, 2000 /*?*/);
+ if (r < 0)
+ dev_err(&udev->dev, "error %d on request 0x%02x\n",
+ -r, (unsigned)req);
+
+ r2 = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
+ 0, 0, cs->hw.usb->bchars, 6, 2000 /*?*/);
+ if (r2 < 0)
+ dev_err(&udev->dev, "error %d on request 0x19\n", -r2);
+
+ return r < 0 ? r : (r2 < 0 ? r2 : 0);
+}
+
+/*
+ * set the baud rate on the internal serial adapter
+ * using the undocumented parameter setting command
+ */
+static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
+{
+ u16 val;
+ u32 rate;
+
+ cflag &= CBAUD;
+
+ switch (cflag) {
+ case B300: rate = 300; break;
+ case B600: rate = 600; break;
+ case B1200: rate = 1200; break;
+ case B2400: rate = 2400; break;
+ case B4800: rate = 4800; break;
+ case B9600: rate = 9600; break;
+ case B19200: rate = 19200; break;
+ case B38400: rate = 38400; break;
+ case B57600: rate = 57600; break;
+ case B115200: rate = 115200; break;
+ default:
+ rate = 9600;
+ dev_err(cs->dev, "unsupported baudrate request 0x%x,"
+ " using default of B9600\n", cflag);
+ }
+
+ val = 0x383fff / rate + 1;
+
+ return set_value(cs, 1, val);
+}
+
+/*
+ * set the line format on the internal serial adapter
+ * using the undocumented parameter setting command
+ */
+static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
+{
+ u16 val = 0;
+
+ /* set the parity */
+ if (cflag & PARENB)
+ val |= (cflag & PARODD) ? 0x10 : 0x20;
+
+ /* set the number of data bits */
+ switch (cflag & CSIZE) {
+ case CS5:
+ val |= 5 << 8; break;
+ case CS6:
+ val |= 6 << 8; break;
+ case CS7:
+ val |= 7 << 8; break;
+ case CS8:
+ val |= 8 << 8; break;
+ default:
+ dev_err(cs->dev, "CSIZE was not CS5-CS8, using default of 8\n");
+ val |= 8 << 8;
+ break;
+ }
+
+ /* set the number of stop bits */
+ if (cflag & CSTOPB) {
+ if ((cflag & CSIZE) == CS5)
+ val |= 1; /* 1.5 stop bits */
+ else
+ val |= 2; /* 2 stop bits */
+ }
+
+ return set_value(cs, 3, val);
+}
+
+
+/*============================================================================*/
+static int gigaset_init_bchannel(struct bc_state *bcs)
+{
+ /* nothing to do for M10x */
+ gigaset_bchannel_up(bcs);
+ return 0;
+}
+
+static int gigaset_close_bchannel(struct bc_state *bcs)
+{
+ /* nothing to do for M10x */
+ gigaset_bchannel_down(bcs);
+ return 0;
+}
+
+static int write_modem(struct cardstate *cs);
+static int send_cb(struct cardstate *cs);
+
+
+/* Write tasklet handler: Continue sending current skb, or send command, or
+ * start sending an skb from the send queue.
+ */
+static void gigaset_modem_fill(unsigned long data)
+{
+ struct cardstate *cs = (struct cardstate *) data;
+ struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
+
+ gig_dbg(DEBUG_OUTPUT, "modem_fill");
+
+ if (cs->hw.usb->busy) {
+ gig_dbg(DEBUG_OUTPUT, "modem_fill: busy");
+ return;
+ }
+
+again:
+ if (!bcs->tx_skb) { /* no skb is being sent */
+ if (cs->cmdbuf) { /* commands to send? */
+ gig_dbg(DEBUG_OUTPUT, "modem_fill: cb");
+ if (send_cb(cs) < 0) {
+ gig_dbg(DEBUG_OUTPUT,
+ "modem_fill: send_cb failed");
+ goto again; /* no callback will be called! */
+ }
+ return;
+ }
+
+ /* skbs to send? */
+ bcs->tx_skb = skb_dequeue(&bcs->squeue);
+ if (!bcs->tx_skb)
+ return;
+
+ gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)!",
+ (unsigned long) bcs->tx_skb);
+ }
+
+ gig_dbg(DEBUG_OUTPUT, "modem_fill: tx_skb");
+ if (write_modem(cs) < 0) {
+ gig_dbg(DEBUG_OUTPUT, "modem_fill: write_modem failed");
+ goto again; /* no callback will be called! */
+ }
+}
+
+/*
+ * Interrupt Input URB completion routine
+ */
+static void gigaset_read_int_callback(struct urb *urb)
+{
+ struct cardstate *cs = urb->context;
+ struct inbuf_t *inbuf = cs->inbuf;
+ int status = urb->status;
+ int r;
+ unsigned numbytes;
+ unsigned char *src;
+ unsigned long flags;
+
+ if (!status) {
+ numbytes = urb->actual_length;
+
+ if (numbytes) {
+ src = cs->hw.usb->rcvbuf;
+ if (unlikely(*src))
+ dev_warn(cs->dev,
+ "%s: There was no leading 0, but 0x%02x!\n",
+ __func__, (unsigned) *src);
+ ++src; /* skip leading 0x00 */
+ --numbytes;
+ if (gigaset_fill_inbuf(inbuf, src, numbytes)) {
+ gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
+ gigaset_schedule_event(inbuf->cs);
+ }
+ } else
+ gig_dbg(DEBUG_INTR, "Received zero block length");
+ } else {
+ /* The urb might have been killed. */
+ gig_dbg(DEBUG_ANY, "%s - nonzero status received: %d",
+ __func__, status);
+ if (status == -ENOENT || status == -ESHUTDOWN)
+ /* killed or endpoint shutdown: don't resubmit */
+ return;
+ }
+
+ /* resubmit URB */
+ spin_lock_irqsave(&cs->lock, flags);
+ if (!cs->connected) {
+ spin_unlock_irqrestore(&cs->lock, flags);
+ pr_err("%s: disconnected\n", __func__);
+ return;
+ }
+ r = usb_submit_urb(urb, GFP_ATOMIC);
+ spin_unlock_irqrestore(&cs->lock, flags);
+ if (r)
+ dev_err(cs->dev, "error %d resubmitting URB\n", -r);
+}
+
+
+/* This callback routine is called when data was transmitted to the device. */
+static void gigaset_write_bulk_callback(struct urb *urb)
+{
+ struct cardstate *cs = urb->context;
+ int status = urb->status;
+ unsigned long flags;
+
+ switch (status) {
+ case 0: /* normal completion */
+ break;
+ case -ENOENT: /* killed */
+ gig_dbg(DEBUG_ANY, "%s: killed", __func__);
+ cs->hw.usb->busy = 0;
+ return;
+ default:
+ dev_err(cs->dev, "bulk transfer failed (status %d)\n",
+ -status);
+ /* That's all we can do. Communication problems
+ are handled by timeouts or network protocols. */
+ }
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if (!cs->connected) {
+ pr_err("%s: disconnected\n", __func__);
+ } else {
+ cs->hw.usb->busy = 0;
+ tasklet_schedule(&cs->write_tasklet);
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+}
+
+static int send_cb(struct cardstate *cs)
+{
+ struct cmdbuf_t *cb = cs->cmdbuf;
+ unsigned long flags;
+ int count;
+ int status = -ENOENT;
+ struct usb_cardstate *ucs = cs->hw.usb;
+
+ do {
+ if (!cb->len) {
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ cs->cmdbytes -= cs->curlen;
+ gig_dbg(DEBUG_OUTPUT, "send_cb: sent %u bytes, %u left",
+ cs->curlen, cs->cmdbytes);
+ cs->cmdbuf = cb->next;
+ if (cs->cmdbuf) {
+ cs->cmdbuf->prev = NULL;
+ cs->curlen = cs->cmdbuf->len;
+ } else {
+ cs->lastcmdbuf = NULL;
+ cs->curlen = 0;
+ }
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+
+ if (cb->wake_tasklet)
+ tasklet_schedule(cb->wake_tasklet);
+ kfree(cb);
+
+ cb = cs->cmdbuf;
+ }
+
+ if (cb) {
+ count = min(cb->len, ucs->bulk_out_size);
+ gig_dbg(DEBUG_OUTPUT, "send_cb: send %d bytes", count);
+
+ usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
+ usb_sndbulkpipe(ucs->udev,
+ ucs->bulk_out_epnum),
+ cb->buf + cb->offset, count,
+ gigaset_write_bulk_callback, cs);
+
+ cb->offset += count;
+ cb->len -= count;
+ ucs->busy = 1;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ status = cs->connected ?
+ usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) :
+ -ENODEV;
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ if (status) {
+ ucs->busy = 0;
+ dev_err(cs->dev,
+ "could not submit urb (error %d)\n",
+ -status);
+ cb->len = 0; /* skip urb => remove cb+wakeup
+ in next loop cycle */
+ }
+ }
+ } while (cb && status); /* next command on error */
+
+ return status;
+}
+
+/* Send command to device. */
+static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
+{
+ unsigned long flags;
+ int len;
+
+ gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
+ DEBUG_TRANSCMD : DEBUG_LOCKCMD,
+ "CMD Transmit", cb->len, cb->buf);
+
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ cb->prev = cs->lastcmdbuf;
+ if (cs->lastcmdbuf)
+ cs->lastcmdbuf->next = cb;
+ else {
+ cs->cmdbuf = cb;
+ cs->curlen = cb->len;
+ }
+ cs->cmdbytes += cb->len;
+ cs->lastcmdbuf = cb;
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+
+ spin_lock_irqsave(&cs->lock, flags);
+ len = cb->len;
+ if (cs->connected)
+ tasklet_schedule(&cs->write_tasklet);
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return len;
+}
+
+static int gigaset_write_room(struct cardstate *cs)
+{
+ unsigned bytes;
+
+ bytes = cs->cmdbytes;
+ return bytes < IF_WRITEBUF ? IF_WRITEBUF - bytes : 0;
+}
+
+static int gigaset_chars_in_buffer(struct cardstate *cs)
+{
+ return cs->cmdbytes;
+}
+
+/*
+ * set the break characters on the internal serial adapter
+ * using undocumented device commands reverse engineered from USB traces
+ * of the Siemens Windows driver
+ */
+static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
+{
+ struct usb_device *udev = cs->hw.usb->udev;
+
+ gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
+ memcpy(cs->hw.usb->bchars, buf, 6);
+ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
+ 0, 0, &buf, 6, 2000);
+}
+
+static void gigaset_freebcshw(struct bc_state *bcs)
+{
+ /* unused */
+}
+
+/* Initialize the b-channel structure */
+static int gigaset_initbcshw(struct bc_state *bcs)
+{
+ /* unused */
+ bcs->hw.usb = NULL;
+ return 0;
+}
+
+static void gigaset_reinitbcshw(struct bc_state *bcs)
+{
+ /* nothing to do for M10x */
+}
+
+static void gigaset_freecshw(struct cardstate *cs)
+{
+ tasklet_kill(&cs->write_tasklet);
+ kfree(cs->hw.usb);
+}
+
+static int gigaset_initcshw(struct cardstate *cs)
+{
+ struct usb_cardstate *ucs;
+
+ cs->hw.usb = ucs =
+ kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
+ if (!ucs) {
+ pr_err("out of memory\n");
+ return -ENOMEM;
+ }
+
+ ucs->bchars[0] = 0;
+ ucs->bchars[1] = 0;
+ ucs->bchars[2] = 0;
+ ucs->bchars[3] = 0;
+ ucs->bchars[4] = 0x11;
+ ucs->bchars[5] = 0x13;
+ ucs->bulk_out_buffer = NULL;
+ ucs->bulk_out_urb = NULL;
+ ucs->read_urb = NULL;
+ tasklet_init(&cs->write_tasklet,
+ gigaset_modem_fill, (unsigned long) cs);
+
+ return 0;
+}
+
+/* Send data from current skb to the device. */
+static int write_modem(struct cardstate *cs)
+{
+ int ret = 0;
+ int count;
+ struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
+ struct usb_cardstate *ucs = cs->hw.usb;
+ unsigned long flags;
+
+ gig_dbg(DEBUG_OUTPUT, "len: %d...", bcs->tx_skb->len);
+
+ if (!bcs->tx_skb->len) {
+ dev_kfree_skb_any(bcs->tx_skb);
+ bcs->tx_skb = NULL;
+ return -EINVAL;
+ }
+
+ /* Copy data to bulk out buffer and transmit data */
+ count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
+ skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count);
+ skb_pull(bcs->tx_skb, count);
+ ucs->busy = 1;
+ gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count);
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if (cs->connected) {
+ usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
+ usb_sndbulkpipe(ucs->udev,
+ ucs->bulk_out_epnum),
+ ucs->bulk_out_buffer, count,
+ gigaset_write_bulk_callback, cs);
+ ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC);
+ } else {
+ ret = -ENODEV;
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ if (ret) {
+ dev_err(cs->dev, "could not submit urb (error %d)\n", -ret);
+ ucs->busy = 0;
+ }
+
+ if (!bcs->tx_skb->len) {
+ /* skb sent completely */
+ gigaset_skb_sent(bcs, bcs->tx_skb);
+
+ gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!",
+ (unsigned long) bcs->tx_skb);
+ dev_kfree_skb_any(bcs->tx_skb);
+ bcs->tx_skb = NULL;
+ }
+
+ return ret;
+}
+
+static int gigaset_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ int retval;
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct usb_host_interface *hostif = interface->cur_altsetting;
+ struct cardstate *cs = NULL;
+ struct usb_cardstate *ucs = NULL;
+ struct usb_endpoint_descriptor *endpoint;
+ int buffer_size;
+
+ gig_dbg(DEBUG_ANY, "%s: Check if device matches ...", __func__);
+
+ /* See if the device offered us matches what we can accept */
+ if ((le16_to_cpu(udev->descriptor.idVendor) != USB_M105_VENDOR_ID) ||
+ (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID)) {
+ gig_dbg(DEBUG_ANY, "device ID (0x%x, 0x%x) not for me - skip",
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct));
+ return -ENODEV;
+ }
+ if (hostif->desc.bInterfaceNumber != 0) {
+ gig_dbg(DEBUG_ANY, "interface %d not for me - skip",
+ hostif->desc.bInterfaceNumber);
+ return -ENODEV;
+ }
+ if (hostif->desc.bAlternateSetting != 0) {
+ dev_notice(&udev->dev, "unsupported altsetting %d - skip",
+ hostif->desc.bAlternateSetting);
+ return -ENODEV;
+ }
+ if (hostif->desc.bInterfaceClass != 255) {
+ dev_notice(&udev->dev, "unsupported interface class %d - skip",
+ hostif->desc.bInterfaceClass);
+ return -ENODEV;
+ }
+
+ dev_info(&udev->dev, "%s: Device matched ... !\n", __func__);
+
+ /* allocate memory for our device state and initialize it */
+ cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
+ if (!cs)
+ return -ENODEV;
+ ucs = cs->hw.usb;
+
+ /* save off device structure ptrs for later use */
+ usb_get_dev(udev);
+ ucs->udev = udev;
+ ucs->interface = interface;
+ cs->dev = &interface->dev;
+
+ /* save address of controller structure */
+ usb_set_intfdata(interface, cs);
+
+ endpoint = &hostif->endpoint[0].desc;
+
+ buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ ucs->bulk_out_size = buffer_size;
+ ucs->bulk_out_epnum = usb_endpoint_num(endpoint);
+ ucs->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
+ if (!ucs->bulk_out_buffer) {
+ dev_err(cs->dev, "Couldn't allocate bulk_out_buffer\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ ucs->bulk_out_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!ucs->bulk_out_urb) {
+ dev_err(cs->dev, "Couldn't allocate bulk_out_urb\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ endpoint = &hostif->endpoint[1].desc;
+
+ ucs->busy = 0;
+
+ ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!ucs->read_urb) {
+ dev_err(cs->dev, "No free urbs available\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+ buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ ucs->rcvbuf_size = buffer_size;
+ ucs->rcvbuf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!ucs->rcvbuf) {
+ dev_err(cs->dev, "Couldn't allocate rcvbuf\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+ /* Fill the interrupt urb and send it to the core */
+ usb_fill_int_urb(ucs->read_urb, udev,
+ usb_rcvintpipe(udev, usb_endpoint_num(endpoint)),
+ ucs->rcvbuf, buffer_size,
+ gigaset_read_int_callback,
+ cs, endpoint->bInterval);
+
+ retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL);
+ if (retval) {
+ dev_err(cs->dev, "Could not submit URB (error %d)\n", -retval);
+ goto error;
+ }
+
+ /* tell common part that the device is ready */
+ if (startmode == SM_LOCKED)
+ cs->mstate = MS_LOCKED;
+
+ retval = gigaset_start(cs);
+ if (retval < 0) {
+ tasklet_kill(&cs->write_tasklet);
+ goto error;
+ }
+ return 0;
+
+error:
+ usb_kill_urb(ucs->read_urb);
+ kfree(ucs->bulk_out_buffer);
+ usb_free_urb(ucs->bulk_out_urb);
+ kfree(ucs->rcvbuf);
+ usb_free_urb(ucs->read_urb);
+ usb_set_intfdata(interface, NULL);
+ ucs->read_urb = ucs->bulk_out_urb = NULL;
+ ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
+ usb_put_dev(ucs->udev);
+ ucs->udev = NULL;
+ ucs->interface = NULL;
+ gigaset_freecs(cs);
+ return retval;
+}
+
+static void gigaset_disconnect(struct usb_interface *interface)
+{
+ struct cardstate *cs;
+ struct usb_cardstate *ucs;
+
+ cs = usb_get_intfdata(interface);
+ ucs = cs->hw.usb;
+
+ dev_info(cs->dev, "disconnecting Gigaset USB adapter\n");
+
+ usb_kill_urb(ucs->read_urb);
+
+ gigaset_stop(cs);
+
+ usb_set_intfdata(interface, NULL);
+ tasklet_kill(&cs->write_tasklet);
+
+ usb_kill_urb(ucs->bulk_out_urb);
+
+ kfree(ucs->bulk_out_buffer);
+ usb_free_urb(ucs->bulk_out_urb);
+ kfree(ucs->rcvbuf);
+ usb_free_urb(ucs->read_urb);
+ ucs->read_urb = ucs->bulk_out_urb = NULL;
+ ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
+
+ usb_put_dev(ucs->udev);
+ ucs->interface = NULL;
+ ucs->udev = NULL;
+ cs->dev = NULL;
+ gigaset_freecs(cs);
+}
+
+/* gigaset_suspend
+ * This function is called before the USB connection is suspended or reset.
+ */
+static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct cardstate *cs = usb_get_intfdata(intf);
+
+ /* stop activity */
+ cs->connected = 0; /* prevent rescheduling */
+ usb_kill_urb(cs->hw.usb->read_urb);
+ tasklet_kill(&cs->write_tasklet);
+ usb_kill_urb(cs->hw.usb->bulk_out_urb);
+
+ gig_dbg(DEBUG_SUSPEND, "suspend complete");
+ return 0;
+}
+
+/* gigaset_resume
+ * This function is called after the USB connection has been resumed or reset.
+ */
+static int gigaset_resume(struct usb_interface *intf)
+{
+ struct cardstate *cs = usb_get_intfdata(intf);
+ int rc;
+
+ /* resubmit interrupt URB */
+ cs->connected = 1;
+ rc = usb_submit_urb(cs->hw.usb->read_urb, GFP_KERNEL);
+ if (rc) {
+ dev_err(cs->dev, "Could not submit read URB (error %d)\n", -rc);
+ return rc;
+ }
+
+ gig_dbg(DEBUG_SUSPEND, "resume complete");
+ return 0;
+}
+
+/* gigaset_pre_reset
+ * This function is called before the USB connection is reset.
+ */
+static int gigaset_pre_reset(struct usb_interface *intf)
+{
+ /* same as suspend */
+ return gigaset_suspend(intf, PMSG_ON);
+}
+
+static const struct gigaset_ops ops = {
+ .write_cmd = gigaset_write_cmd,
+ .write_room = gigaset_write_room,
+ .chars_in_buffer = gigaset_chars_in_buffer,
+ .brkchars = gigaset_brkchars,
+ .init_bchannel = gigaset_init_bchannel,
+ .close_bchannel = gigaset_close_bchannel,
+ .initbcshw = gigaset_initbcshw,
+ .freebcshw = gigaset_freebcshw,
+ .reinitbcshw = gigaset_reinitbcshw,
+ .initcshw = gigaset_initcshw,
+ .freecshw = gigaset_freecshw,
+ .set_modem_ctrl = gigaset_set_modem_ctrl,
+ .baud_rate = gigaset_baud_rate,
+ .set_line_ctrl = gigaset_set_line_ctrl,
+ .send_skb = gigaset_m10x_send_skb,
+ .handle_input = gigaset_m10x_input,
+};
+
+/*
+ * This function is called while kernel-module is loaded
+ */
+static int __init usb_gigaset_init(void)
+{
+ int result;
+
+ /* allocate memory for our driver state and initialize it */
+ driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
+ GIGASET_MODULENAME, GIGASET_DEVNAME,
+ &ops, THIS_MODULE);
+ if (driver == NULL) {
+ result = -ENOMEM;
+ goto error;
+ }
+
+ /* register this driver with the USB subsystem */
+ result = usb_register(&gigaset_usb_driver);
+ if (result < 0) {
+ pr_err("error %d registering USB driver\n", -result);
+ goto error;
+ }
+
+ pr_info(DRIVER_DESC "\n");
+ return 0;
+
+error:
+ if (driver)
+ gigaset_freedriver(driver);
+ driver = NULL;
+ return result;
+}
+
+/*
+ * This function is called while unloading the kernel-module
+ */
+static void __exit usb_gigaset_exit(void)
+{
+ int i;
+
+ gigaset_blockdriver(driver); /* => probe will fail
+ * => no gigaset_start any more
+ */
+
+ /* stop all connected devices */
+ for (i = 0; i < driver->minors; i++)
+ gigaset_shutdown(driver->cs + i);
+
+ /* from now on, no isdn callback should be possible */
+
+ /* deregister this driver with the USB subsystem */
+ usb_deregister(&gigaset_usb_driver);
+ /* this will call the disconnect-callback */
+ /* from now on, no disconnect/probe callback should be running */
+
+ gigaset_freedriver(driver);
+ driver = NULL;
+}
+
+
+module_init(usb_gigaset_init);
+module_exit(usb_gigaset_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/isdn/hysdn/Kconfig b/drivers/staging/isdn/hysdn/Kconfig
new file mode 100644
index 000000000000..1971ef850c9a
--- /dev/null
+++ b/drivers/staging/isdn/hysdn/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config HYSDN
+ tristate "Hypercope HYSDN cards (Champ, Ergo, Metro) support (module only)"
+ depends on m && PROC_FS && PCI
+ help
+ Say Y here if you have one of Hypercope's active PCI ISDN cards
+ Champ, Ergo and Metro. You will then get a module called hysdn.
+ Please read the file <file:Documentation/isdn/README.hysdn> for more
+ information.
+
+config HYSDN_CAPI
+ bool "HYSDN CAPI 2.0 support"
+ depends on HYSDN && ISDN_CAPI
+ help
+ Say Y here if you like to use Hypercope's CAPI 2.0 interface.
diff --git a/drivers/staging/isdn/hysdn/Makefile b/drivers/staging/isdn/hysdn/Makefile
new file mode 100644
index 000000000000..e01f17f22ebb
--- /dev/null
+++ b/drivers/staging/isdn/hysdn/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Makefile for the hysdn ISDN device driver
+
+# Each configuration option enables a list of files.
+
+obj-$(CONFIG_HYSDN) += hysdn.o
+
+# Multipart objects.
+
+hysdn-y := hysdn_procconf.o hysdn_proclog.o boardergo.o \
+ hysdn_boot.o hysdn_sched.o hysdn_net.o hysdn_init.o
+hysdn-$(CONFIG_HYSDN_CAPI) += hycapi.o
diff --git a/drivers/staging/isdn/hysdn/boardergo.c b/drivers/staging/isdn/hysdn/boardergo.c
new file mode 100644
index 000000000000..2aa2a0e08247
--- /dev/null
+++ b/drivers/staging/isdn/hysdn/boardergo.c
@@ -0,0 +1,445 @@
+/* $Id: boardergo.c,v 1.5.6.7 2001/11/06 21:58:19 kai Exp $
+ *
+ * Linux driver for HYSDN cards, specific routines for ergo type boards.
+ *
+ * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
+ * Copyright 1999 by Werner Cornelius (werner@titro.de)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * As all Linux supported cards Champ2, Ergo and Metro2/4 use the same
+ * DPRAM interface and layout with only minor differences all related
+ * stuff is done here, not in separate modules.
+ *
+ */
+
+#include <linux/signal.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+#include "hysdn_defs.h"
+#include "boardergo.h"
+
+#define byteout(addr, val) outb(val, addr)
+#define bytein(addr) inb(addr)
+
+/***************************************************/
+/* The cards interrupt handler. Called from system */
+/***************************************************/
+static irqreturn_t
+ergo_interrupt(int intno, void *dev_id)
+{
+ hysdn_card *card = dev_id; /* parameter from irq */
+ tErgDpram *dpr;
+ unsigned long flags;
+ unsigned char volatile b;
+
+ if (!card)
+ return IRQ_NONE; /* error -> spurious interrupt */
+ if (!card->irq_enabled)
+ return IRQ_NONE; /* other device interrupting or irq switched off */
+
+ spin_lock_irqsave(&card->hysdn_lock, flags); /* no further irqs allowed */
+
+ if (!(bytein(card->iobase + PCI9050_INTR_REG) & PCI9050_INTR_REG_STAT1)) {
+ spin_unlock_irqrestore(&card->hysdn_lock, flags); /* restore old state */
+ return IRQ_NONE; /* no interrupt requested by E1 */
+ }
+ /* clear any pending ints on the board */
+ dpr = card->dpram;
+ b = dpr->ToPcInt; /* clear for ergo */
+ b |= dpr->ToPcIntMetro; /* same for metro */
+ b |= dpr->ToHyInt; /* and for champ */
+
+ /* start kernel task immediately after leaving all interrupts */
+ if (!card->hw_lock)
+ schedule_work(&card->irq_queue);
+ spin_unlock_irqrestore(&card->hysdn_lock, flags);
+ return IRQ_HANDLED;
+} /* ergo_interrupt */
+
+/******************************************************************************/
+/* ergo_irq_bh will be called as part of the kernel clearing its shared work */
+/* queue sometime after a call to schedule_work has been made passing our */
+/* work_struct. This task is the only one handling data transfer from or to */
+/* the card after booting. The task may be queued from everywhere */
+/* (interrupts included). */
+/******************************************************************************/
+static void
+ergo_irq_bh(struct work_struct *ugli_api)
+{
+ hysdn_card *card = container_of(ugli_api, hysdn_card, irq_queue);
+ tErgDpram *dpr;
+ int again;
+ unsigned long flags;
+
+ if (card->state != CARD_STATE_RUN)
+ return; /* invalid call */
+
+ dpr = card->dpram; /* point to DPRAM */
+
+ spin_lock_irqsave(&card->hysdn_lock, flags);
+ if (card->hw_lock) {
+ spin_unlock_irqrestore(&card->hysdn_lock, flags); /* hardware currently unavailable */
+ return;
+ }
+ card->hw_lock = 1; /* we now lock the hardware */
+
+ do {
+ again = 0; /* assume loop not to be repeated */
+
+ if (!dpr->ToHyFlag) {
+ /* we are able to send a buffer */
+
+ if (hysdn_sched_tx(card, dpr->ToHyBuf, &dpr->ToHySize, &dpr->ToHyChannel,
+ ERG_TO_HY_BUF_SIZE)) {
+ dpr->ToHyFlag = 1; /* enable tx */
+ again = 1; /* restart loop */
+ }
+ } /* we are able to send a buffer */
+ if (dpr->ToPcFlag) {
+ /* a message has arrived for us, handle it */
+
+ if (hysdn_sched_rx(card, dpr->ToPcBuf, dpr->ToPcSize, dpr->ToPcChannel)) {
+ dpr->ToPcFlag = 0; /* we worked the data */
+ again = 1; /* restart loop */
+ }
+ } /* a message has arrived for us */
+ if (again) {
+ dpr->ToHyInt = 1;
+ dpr->ToPcInt = 1; /* interrupt to E1 for all cards */
+ } else
+ card->hw_lock = 0; /* free hardware again */
+ } while (again); /* until nothing more to do */
+
+ spin_unlock_irqrestore(&card->hysdn_lock, flags);
+} /* ergo_irq_bh */
+
+
+/*********************************************************/
+/* stop the card (hardware reset) and disable interrupts */
+/*********************************************************/
+static void
+ergo_stopcard(hysdn_card *card)
+{
+ unsigned long flags;
+ unsigned char val;
+
+ hysdn_net_release(card); /* first release the net device if existing */
+#ifdef CONFIG_HYSDN_CAPI
+ hycapi_capi_stop(card);
+#endif /* CONFIG_HYSDN_CAPI */
+ spin_lock_irqsave(&card->hysdn_lock, flags);
+ val = bytein(card->iobase + PCI9050_INTR_REG); /* get actual value */
+ val &= ~(PCI9050_INTR_REG_ENPCI | PCI9050_INTR_REG_EN1); /* mask irq */
+ byteout(card->iobase + PCI9050_INTR_REG, val);
+ card->irq_enabled = 0;
+ byteout(card->iobase + PCI9050_USER_IO, PCI9050_E1_RESET); /* reset E1 processor */
+ card->state = CARD_STATE_UNUSED;
+ card->err_log_state = ERRLOG_STATE_OFF; /* currently no log active */
+
+ spin_unlock_irqrestore(&card->hysdn_lock, flags);
+} /* ergo_stopcard */
+
+/**************************************************************************/
+/* enable or disable the cards error log. The event is queued if possible */
+/**************************************************************************/
+static void
+ergo_set_errlog_state(hysdn_card *card, int on)
+{
+ unsigned long flags;
+
+ if (card->state != CARD_STATE_RUN) {
+ card->err_log_state = ERRLOG_STATE_OFF; /* must be off */
+ return;
+ }
+ spin_lock_irqsave(&card->hysdn_lock, flags);
+
+ if (((card->err_log_state == ERRLOG_STATE_OFF) && !on) ||
+ ((card->err_log_state == ERRLOG_STATE_ON) && on)) {
+ spin_unlock_irqrestore(&card->hysdn_lock, flags);
+ return; /* nothing to do */
+ }
+ if (on)
+ card->err_log_state = ERRLOG_STATE_START; /* request start */
+ else
+ card->err_log_state = ERRLOG_STATE_STOP; /* request stop */
+
+ spin_unlock_irqrestore(&card->hysdn_lock, flags);
+ schedule_work(&card->irq_queue);
+} /* ergo_set_errlog_state */
+
+/******************************************/
+/* test the cards RAM and return 0 if ok. */
+/******************************************/
+static const char TestText[36] = "This Message is filler, why read it";
+
+static int
+ergo_testram(hysdn_card *card)
+{
+ tErgDpram *dpr = card->dpram;
+
+ memset(dpr->TrapTable, 0, sizeof(dpr->TrapTable)); /* clear all Traps */
+ dpr->ToHyInt = 1; /* E1 INTR state forced */
+
+ memcpy(&dpr->ToHyBuf[ERG_TO_HY_BUF_SIZE - sizeof(TestText)], TestText,
+ sizeof(TestText));
+ if (memcmp(&dpr->ToHyBuf[ERG_TO_HY_BUF_SIZE - sizeof(TestText)], TestText,
+ sizeof(TestText)))
+ return (-1);
+
+ memcpy(&dpr->ToPcBuf[ERG_TO_PC_BUF_SIZE - sizeof(TestText)], TestText,
+ sizeof(TestText));
+ if (memcmp(&dpr->ToPcBuf[ERG_TO_PC_BUF_SIZE - sizeof(TestText)], TestText,
+ sizeof(TestText)))
+ return (-1);
+
+ return (0);
+} /* ergo_testram */
+
+/*****************************************************************************/
+/* this function is intended to write stage 1 boot image to the cards buffer */
+/* this is done in two steps. First the 1024 hi-words are written (offs=0), */
+/* then the 1024 lo-bytes are written. The remaining DPRAM is cleared, the */
+/* PCI-write-buffers flushed and the card is taken out of reset. */
+/* The function then waits for a reaction of the E1 processor or a timeout. */
+/* Negative return values are interpreted as errors. */
+/*****************************************************************************/
+static int
+ergo_writebootimg(struct HYSDN_CARD *card, unsigned char *buf,
+ unsigned long offs)
+{
+ unsigned char *dst;
+ tErgDpram *dpram;
+ int cnt = (BOOT_IMG_SIZE >> 2); /* number of words to move and swap (byte order!) */
+
+ if (card->debug_flags & LOG_POF_CARD)
+ hysdn_addlog(card, "ERGO: write bootldr offs=0x%lx ", offs);
+
+ dst = card->dpram; /* pointer to start of DPRAM */
+ dst += (offs + ERG_DPRAM_FILL_SIZE); /* offset in the DPRAM */
+ while (cnt--) {
+ *dst++ = *(buf + 1); /* high byte */
+ *dst++ = *buf; /* low byte */
+ dst += 2; /* point to next longword */
+ buf += 2; /* buffer only filled with words */
+ }
+
+ /* if low words (offs = 2) have been written, clear the rest of the DPRAM, */
+ /* flush the PCI-write-buffer and take the E1 out of reset */
+ if (offs) {
+ memset(card->dpram, 0, ERG_DPRAM_FILL_SIZE); /* fill the DPRAM still not cleared */
+ dpram = card->dpram; /* get pointer to dpram structure */
+ dpram->ToHyNoDpramErrLog = 0xFF; /* write a dpram register */
+ while (!dpram->ToHyNoDpramErrLog); /* reread volatile register to flush PCI */
+
+ byteout(card->iobase + PCI9050_USER_IO, PCI9050_E1_RUN); /* start E1 processor */
+ /* the interrupts are still masked */
+
+ msleep_interruptible(20); /* Timeout 20ms */
+
+ if (((tDpramBootSpooler *) card->dpram)->Len != DPRAM_SPOOLER_DATA_SIZE) {
+ if (card->debug_flags & LOG_POF_CARD)
+ hysdn_addlog(card, "ERGO: write bootldr no answer");
+ return (-ERR_BOOTIMG_FAIL);
+ }
+ } /* start_boot_img */
+ return (0); /* successful */
+} /* ergo_writebootimg */
+
+/********************************************************************************/
+/* ergo_writebootseq writes the buffer containing len bytes to the E1 processor */
+/* using the boot spool mechanism. If everything works fine 0 is returned. In */
+/* case of errors a negative error value is returned. */
+/********************************************************************************/
+static int
+ergo_writebootseq(struct HYSDN_CARD *card, unsigned char *buf, int len)
+{
+ tDpramBootSpooler *sp = (tDpramBootSpooler *) card->dpram;
+ unsigned char *dst;
+ unsigned char buflen;
+ int nr_write;
+ unsigned char tmp_rdptr;
+ unsigned char wr_mirror;
+ int i;
+
+ if (card->debug_flags & LOG_POF_CARD)
+ hysdn_addlog(card, "ERGO: write boot seq len=%d ", len);
+
+ dst = sp->Data; /* point to data in spool structure */
+ buflen = sp->Len; /* maximum len of spooled data */
+ wr_mirror = sp->WrPtr; /* only once read */
+
+ /* try until all bytes written or error */
+ i = 0x1000; /* timeout value */
+ while (len) {
+
+ /* first determine the number of bytes that may be buffered */
+ do {
+ tmp_rdptr = sp->RdPtr; /* first read the pointer */
+ i--; /* decrement timeout */
+ } while (i && (tmp_rdptr != sp->RdPtr)); /* wait for stable pointer */
+
+ if (!i) {
+ if (card->debug_flags & LOG_POF_CARD)
+ hysdn_addlog(card, "ERGO: write boot seq timeout");
+ return (-ERR_BOOTSEQ_FAIL); /* value not stable -> timeout */
+ }
+ if ((nr_write = tmp_rdptr - wr_mirror - 1) < 0)
+ nr_write += buflen; /* now we got number of free bytes - 1 in buffer */
+
+ if (!nr_write)
+ continue; /* no free bytes in buffer */
+
+ if (nr_write > len)
+ nr_write = len; /* limit if last few bytes */
+ i = 0x1000; /* reset timeout value */
+
+ /* now we know how much bytes we may put in the puffer */
+ len -= nr_write; /* we savely could adjust len before output */
+ while (nr_write--) {
+ *(dst + wr_mirror) = *buf++; /* output one byte */
+ if (++wr_mirror >= buflen)
+ wr_mirror = 0;
+ sp->WrPtr = wr_mirror; /* announce the next byte to E1 */
+ } /* while (nr_write) */
+
+ } /* while (len) */
+ return (0);
+} /* ergo_writebootseq */
+
+/***********************************************************************************/
+/* ergo_waitpofready waits for a maximum of 10 seconds for the completition of the */
+/* boot process. If the process has been successful 0 is returned otherwise a */
+/* negative error code is returned. */
+/***********************************************************************************/
+static int
+ergo_waitpofready(struct HYSDN_CARD *card)
+{
+ tErgDpram *dpr = card->dpram; /* pointer to DPRAM structure */
+ int timecnt = 10000 / 50; /* timeout is 10 secs max. */
+ unsigned long flags;
+ int msg_size;
+ int i;
+
+ if (card->debug_flags & LOG_POF_CARD)
+ hysdn_addlog(card, "ERGO: waiting for pof ready");
+ while (timecnt--) {
+ /* wait until timeout */
+
+ if (dpr->ToPcFlag) {
+ /* data has arrived */
+
+ if ((dpr->ToPcChannel != CHAN_SYSTEM) ||
+ (dpr->ToPcSize < MIN_RDY_MSG_SIZE) ||
+ (dpr->ToPcSize > MAX_RDY_MSG_SIZE) ||
+ ((*(unsigned long *) dpr->ToPcBuf) != RDY_MAGIC))
+ break; /* an error occurred */
+
+ /* Check for additional data delivered during SysReady */
+ msg_size = dpr->ToPcSize - RDY_MAGIC_SIZE;
+ if (msg_size > 0)
+ if (EvalSysrTokData(card, dpr->ToPcBuf + RDY_MAGIC_SIZE, msg_size))
+ break;
+
+ if (card->debug_flags & LOG_POF_RECORD)
+ hysdn_addlog(card, "ERGO: pof boot success");
+ spin_lock_irqsave(&card->hysdn_lock, flags);
+
+ card->state = CARD_STATE_RUN; /* now card is running */
+ /* enable the cards interrupt */
+ byteout(card->iobase + PCI9050_INTR_REG,
+ bytein(card->iobase + PCI9050_INTR_REG) |
+ (PCI9050_INTR_REG_ENPCI | PCI9050_INTR_REG_EN1));
+ card->irq_enabled = 1; /* we are ready to receive interrupts */
+
+ dpr->ToPcFlag = 0; /* reset data indicator */
+ dpr->ToHyInt = 1;
+ dpr->ToPcInt = 1; /* interrupt to E1 for all cards */
+
+ spin_unlock_irqrestore(&card->hysdn_lock, flags);
+ if ((hynet_enable & (1 << card->myid))
+ && (i = hysdn_net_create(card)))
+ {
+ ergo_stopcard(card);
+ card->state = CARD_STATE_BOOTERR;
+ return (i);
+ }
+#ifdef CONFIG_HYSDN_CAPI
+ if ((i = hycapi_capi_create(card))) {
+ printk(KERN_WARNING "HYSDN: failed to create capi-interface.\n");
+ }
+#endif /* CONFIG_HYSDN_CAPI */
+ return (0); /* success */
+ } /* data has arrived */
+ msleep_interruptible(50); /* Timeout 50ms */
+ } /* wait until timeout */
+
+ if (card->debug_flags & LOG_POF_CARD)
+ hysdn_addlog(card, "ERGO: pof boot ready timeout");
+ return (-ERR_POF_TIMEOUT);
+} /* ergo_waitpofready */
+
+
+
+/************************************************************************************/
+/* release the cards hardware. Before releasing do a interrupt disable and hardware */
+/* reset. Also unmap dpram. */
+/* Use only during module release. */
+/************************************************************************************/
+static void
+ergo_releasehardware(hysdn_card *card)
+{
+ ergo_stopcard(card); /* first stop the card if not already done */
+ free_irq(card->irq, card); /* release interrupt */
+ release_region(card->iobase + PCI9050_INTR_REG, 1); /* release all io ports */
+ release_region(card->iobase + PCI9050_USER_IO, 1);
+ iounmap(card->dpram);
+ card->dpram = NULL; /* release shared mem */
+} /* ergo_releasehardware */
+
+
+/*********************************************************************************/
+/* acquire the needed hardware ports and map dpram. If an error occurs a nonzero */
+/* value is returned. */
+/* Use only during module init. */
+/*********************************************************************************/
+int
+ergo_inithardware(hysdn_card *card)
+{
+ if (!request_region(card->iobase + PCI9050_INTR_REG, 1, "HYSDN"))
+ return (-1);
+ if (!request_region(card->iobase + PCI9050_USER_IO, 1, "HYSDN")) {
+ release_region(card->iobase + PCI9050_INTR_REG, 1);
+ return (-1); /* ports already in use */
+ }
+ card->memend = card->membase + ERG_DPRAM_PAGE_SIZE - 1;
+ if (!(card->dpram = ioremap(card->membase, ERG_DPRAM_PAGE_SIZE))) {
+ release_region(card->iobase + PCI9050_INTR_REG, 1);
+ release_region(card->iobase + PCI9050_USER_IO, 1);
+ return (-1);
+ }
+
+ ergo_stopcard(card); /* disable interrupts */
+ if (request_irq(card->irq, ergo_interrupt, IRQF_SHARED, "HYSDN", card)) {
+ ergo_releasehardware(card); /* return the acquired hardware */
+ return (-1);
+ }
+ /* success, now setup the function pointers */
+ card->stopcard = ergo_stopcard;
+ card->releasehardware = ergo_releasehardware;
+ card->testram = ergo_testram;
+ card->writebootimg = ergo_writebootimg;
+ card->writebootseq = ergo_writebootseq;
+ card->waitpofready = ergo_waitpofready;
+ card->set_errlog_state = ergo_set_errlog_state;
+ INIT_WORK(&card->irq_queue, ergo_irq_bh);
+ spin_lock_init(&card->hysdn_lock);
+
+ return (0);
+} /* ergo_inithardware */
diff --git a/drivers/staging/isdn/hysdn/boardergo.h b/drivers/staging/isdn/hysdn/boardergo.h
new file mode 100644
index 000000000000..e99bd81c4034
--- /dev/null
+++ b/drivers/staging/isdn/hysdn/boardergo.h
@@ -0,0 +1,100 @@
+/* $Id: boardergo.h,v 1.2.6.1 2001/09/23 22:24:54 kai Exp $
+ *
+ * Linux driver for HYSDN cards, definitions for ergo type boards (buffers..).
+ *
+ * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
+ * Copyright 1999 by Werner Cornelius (werner@titro.de)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+
+/************************************************/
+/* defines for the dual port memory of the card */
+/************************************************/
+#define ERG_DPRAM_PAGE_SIZE 0x2000 /* DPRAM occupies a 8K page */
+#define BOOT_IMG_SIZE 4096
+#define ERG_DPRAM_FILL_SIZE (ERG_DPRAM_PAGE_SIZE - BOOT_IMG_SIZE)
+
+#define ERG_TO_HY_BUF_SIZE 0x0E00 /* 3072 bytes buffer size to card */
+#define ERG_TO_PC_BUF_SIZE 0x0E00 /* 3072 bytes to PC, too */
+
+/* following DPRAM layout copied from OS2-driver boarderg.h */
+typedef struct ErgDpram_tag {
+ /*0000 */ unsigned char ToHyBuf[ERG_TO_HY_BUF_SIZE];
+ /*0E00 */ unsigned char ToPcBuf[ERG_TO_PC_BUF_SIZE];
+
+ /*1C00 */ unsigned char bSoftUart[SIZE_RSV_SOFT_UART];
+ /* size 0x1B0 */
+
+ /*1DB0 *//* tErrLogEntry */ unsigned char volatile ErrLogMsg[64];
+ /* size 64 bytes */
+ /*1DB0 unsigned long ulErrType; */
+ /*1DB4 unsigned long ulErrSubtype; */
+ /*1DB8 unsigned long ucTextSize; */
+ /*1DB9 unsigned long ucText[ERRLOG_TEXT_SIZE]; *//* ASCIIZ of len ucTextSize-1 */
+ /*1DF0 */
+
+ /*1DF0 */ unsigned short volatile ToHyChannel;
+ /*1DF2 */ unsigned short volatile ToHySize;
+ /*1DF4 */ unsigned char volatile ToHyFlag;
+ /* !=0: msg for Hy waiting */
+ /*1DF5 */ unsigned char volatile ToPcFlag;
+ /* !=0: msg for PC waiting */
+ /*1DF6 */ unsigned short volatile ToPcChannel;
+ /*1DF8 */ unsigned short volatile ToPcSize;
+ /*1DFA */ unsigned char bRes1DBA[0x1E00 - 0x1DFA];
+ /* 6 bytes */
+
+ /*1E00 */ unsigned char bRestOfEntryTbl[0x1F00 - 0x1E00];
+ /*1F00 */ unsigned long TrapTable[62];
+ /*1FF8 */ unsigned char bRes1FF8[0x1FFB - 0x1FF8];
+ /* low part of reset vetor */
+ /*1FFB */ unsigned char ToPcIntMetro;
+ /* notes:
+ * - metro has 32-bit boot ram - accessing
+ * ToPcInt and ToHyInt would be the same;
+ * so we moved ToPcInt to 1FFB.
+ * Because on the PC side both vars are
+ * readonly (reseting on int from E1 to PC),
+ * we can read both vars on both cards
+ * without destroying anything.
+ * - 1FFB is the high byte of the reset vector,
+ * so E1 side should NOT change this byte
+ * when writing!
+ */
+ /*1FFC */ unsigned char volatile ToHyNoDpramErrLog;
+ /* note: ToHyNoDpramErrLog is used to inform
+ * boot loader, not to use DPRAM based
+ * ErrLog; when DOS driver is rewritten
+ * this becomes obsolete
+ */
+ /*1FFD */ unsigned char bRes1FFD;
+ /*1FFE */ unsigned char ToPcInt;
+ /* E1_intclear; on CHAMP2: E1_intset */
+ /*1FFF */ unsigned char ToHyInt;
+ /* E1_intset; on CHAMP2: E1_intclear */
+} tErgDpram;
+
+/**********************************************/
+/* PCI9050 controller local register offsets: */
+/* copied from boarderg.c */
+/**********************************************/
+#define PCI9050_INTR_REG 0x4C /* Interrupt register */
+#define PCI9050_USER_IO 0x51 /* User I/O register */
+
+/* bitmask for PCI9050_INTR_REG: */
+#define PCI9050_INTR_REG_EN1 0x01 /* 1= enable (def.), 0= disable */
+#define PCI9050_INTR_REG_POL1 0x02 /* 1= active high (def.), 0= active low */
+#define PCI9050_INTR_REG_STAT1 0x04 /* 1= intr. active, 0= intr. not active (def.) */
+#define PCI9050_INTR_REG_ENPCI 0x40 /* 1= PCI interrupts enable (def.) */
+
+/* bitmask for PCI9050_USER_IO: */
+#define PCI9050_USER_IO_EN3 0x02 /* 1= disable , 0= enable (def.) */
+#define PCI9050_USER_IO_DIR3 0x04 /* 1= output (def.), 0= input */
+#define PCI9050_USER_IO_DAT3 0x08 /* 1= high (def.) , 0= low */
+
+#define PCI9050_E1_RESET (PCI9050_USER_IO_DIR3) /* 0x04 */
+#define PCI9050_E1_RUN (PCI9050_USER_IO_DAT3 | PCI9050_USER_IO_DIR3) /* 0x0C */
diff --git a/drivers/staging/isdn/hysdn/hycapi.c b/drivers/staging/isdn/hysdn/hycapi.c
new file mode 100644
index 000000000000..a2c15cd7bf67
--- /dev/null
+++ b/drivers/staging/isdn/hysdn/hycapi.c
@@ -0,0 +1,785 @@
+/* $Id: hycapi.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $
+ *
+ * Linux driver for HYSDN cards, CAPI2.0-Interface.
+ *
+ * Author Ulrich Albrecht <u.albrecht@hypercope.de> for Hypercope GmbH
+ * Copyright 2000 by Hypercope GmbH
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/signal.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+
+#define VER_DRIVER 0
+#define VER_CARDTYPE 1
+#define VER_HWID 2
+#define VER_SERIAL 3
+#define VER_OPTION 4
+#define VER_PROTO 5
+#define VER_PROFILE 6
+#define VER_CAPI 7
+
+#include "hysdn_defs.h"
+#include <linux/kernelcapi.h>
+
+static char hycapi_revision[] = "$Revision: 1.8.6.4 $";
+
+unsigned int hycapi_enable = 0xffffffff;
+module_param(hycapi_enable, uint, 0);
+
+typedef struct _hycapi_appl {
+ unsigned int ctrl_mask;
+ capi_register_params rp;
+ struct sk_buff *listen_req[CAPI_MAXCONTR];
+} hycapi_appl;
+
+static hycapi_appl hycapi_applications[CAPI_MAXAPPL];
+
+static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
+
+static inline int _hycapi_appCheck(int app_id, int ctrl_no)
+{
+ if ((ctrl_no <= 0) || (ctrl_no > CAPI_MAXCONTR) || (app_id <= 0) ||
+ (app_id > CAPI_MAXAPPL))
+ {
+ printk(KERN_ERR "HYCAPI: Invalid request app_id %d for controller %d", app_id, ctrl_no);
+ return -1;
+ }
+ return ((hycapi_applications[app_id - 1].ctrl_mask & (1 << (ctrl_no-1))) != 0);
+}
+
+/******************************
+Kernel-Capi callback reset_ctr
+******************************/
+
+static void
+hycapi_reset_ctr(struct capi_ctr *ctrl)
+{
+ hycapictrl_info *cinfo = ctrl->driverdata;
+
+#ifdef HYCAPI_PRINTFNAMES
+ printk(KERN_NOTICE "HYCAPI hycapi_reset_ctr\n");
+#endif
+ capilib_release(&cinfo->ncci_head);
+ capi_ctr_down(ctrl);
+}
+
+/******************************
+Kernel-Capi callback remove_ctr
+******************************/
+
+static void
+hycapi_remove_ctr(struct capi_ctr *ctrl)
+{
+ int i;
+ hycapictrl_info *cinfo = NULL;
+ hysdn_card *card = NULL;
+#ifdef HYCAPI_PRINTFNAMES
+ printk(KERN_NOTICE "HYCAPI hycapi_remove_ctr\n");
+#endif
+ cinfo = (hycapictrl_info *)(ctrl->driverdata);
+ if (!cinfo) {
+ printk(KERN_ERR "No hycapictrl_info set!");
+ return;
+ }
+ card = cinfo->card;
+ capi_ctr_suspend_output(ctrl);
+ for (i = 0; i < CAPI_MAXAPPL; i++) {
+ if (hycapi_applications[i].listen_req[ctrl->cnr - 1]) {
+ kfree_skb(hycapi_applications[i].listen_req[ctrl->cnr - 1]);
+ hycapi_applications[i].listen_req[ctrl->cnr - 1] = NULL;
+ }
+ }
+ detach_capi_ctr(ctrl);
+ ctrl->driverdata = NULL;
+ kfree(card->hyctrlinfo);
+
+
+ card->hyctrlinfo = NULL;
+}
+
+/***********************************************************
+
+Queue a CAPI-message to the controller.
+
+***********************************************************/
+
+static void
+hycapi_sendmsg_internal(struct capi_ctr *ctrl, struct sk_buff *skb)
+{
+ hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
+ hysdn_card *card = cinfo->card;
+
+ spin_lock_irq(&cinfo->lock);
+#ifdef HYCAPI_PRINTFNAMES
+ printk(KERN_NOTICE "hycapi_send_message\n");
+#endif
+ cinfo->skbs[cinfo->in_idx++] = skb; /* add to buffer list */
+ if (cinfo->in_idx >= HYSDN_MAX_CAPI_SKB)
+ cinfo->in_idx = 0; /* wrap around */
+ cinfo->sk_count++; /* adjust counter */
+ if (cinfo->sk_count >= HYSDN_MAX_CAPI_SKB) {
+ /* inform upper layers we're full */
+ printk(KERN_ERR "HYSDN Card%d: CAPI-buffer overrun!\n",
+ card->myid);
+ capi_ctr_suspend_output(ctrl);
+ }
+ cinfo->tx_skb = skb;
+ spin_unlock_irq(&cinfo->lock);
+ schedule_work(&card->irq_queue);
+}
+
+/***********************************************************
+hycapi_register_internal
+
+Send down the CAPI_REGISTER-Command to the controller.
+This functions will also be used if the adapter has been rebooted to
+re-register any applications in the private list.
+
+************************************************************/
+
+static void
+hycapi_register_internal(struct capi_ctr *ctrl, __u16 appl,
+ capi_register_params *rp)
+{
+ char ExtFeatureDefaults[] = "49 /0/0/0/0,*/1,*/2,*/3,*/4,*/5,*/6,*/7,*/8,*/9,*";
+ hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
+ hysdn_card *card = cinfo->card;
+ struct sk_buff *skb;
+ __u16 len;
+ __u8 _command = 0xa0, _subcommand = 0x80;
+ __u16 MessageNumber = 0x0000;
+ __u16 MessageBufferSize = 0;
+ int slen = strlen(ExtFeatureDefaults);
+#ifdef HYCAPI_PRINTFNAMES
+ printk(KERN_NOTICE "hycapi_register_appl\n");
+#endif
+ MessageBufferSize = rp->level3cnt * rp->datablkcnt * rp->datablklen;
+
+ len = CAPI_MSG_BASELEN + 8 + slen + 1;
+ if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
+ printk(KERN_ERR "HYSDN card%d: memory squeeze in hycapi_register_appl\n",
+ card->myid);
+ return;
+ }
+ skb_put_data(skb, &len, sizeof(__u16));
+ skb_put_data(skb, &appl, sizeof(__u16));
+ skb_put_data(skb, &_command, sizeof(__u8));
+ skb_put_data(skb, &_subcommand, sizeof(__u8));
+ skb_put_data(skb, &MessageNumber, sizeof(__u16));
+ skb_put_data(skb, &MessageBufferSize, sizeof(__u16));
+ skb_put_data(skb, &(rp->level3cnt), sizeof(__u16));
+ skb_put_data(skb, &(rp->datablkcnt), sizeof(__u16));
+ skb_put_data(skb, &(rp->datablklen), sizeof(__u16));
+ skb_put_data(skb, ExtFeatureDefaults, slen);
+ hycapi_applications[appl - 1].ctrl_mask |= (1 << (ctrl->cnr - 1));
+ hycapi_send_message(ctrl, skb);
+}
+
+/************************************************************
+hycapi_restart_internal
+
+After an adapter has been rebootet, re-register all applications and
+send a LISTEN_REQ (if there has been such a thing )
+
+*************************************************************/
+
+static void hycapi_restart_internal(struct capi_ctr *ctrl)
+{
+ int i;
+ struct sk_buff *skb;
+#ifdef HYCAPI_PRINTFNAMES
+ printk(KERN_WARNING "HYSDN: hycapi_restart_internal");
+#endif
+ for (i = 0; i < CAPI_MAXAPPL; i++) {
+ if (_hycapi_appCheck(i + 1, ctrl->cnr) == 1) {
+ hycapi_register_internal(ctrl, i + 1,
+ &hycapi_applications[i].rp);
+ if (hycapi_applications[i].listen_req[ctrl->cnr - 1]) {
+ skb = skb_copy(hycapi_applications[i].listen_req[ctrl->cnr - 1], GFP_ATOMIC);
+ hycapi_sendmsg_internal(ctrl, skb);
+ }
+ }
+ }
+}
+
+/*************************************************************
+Register an application.
+Error-checking is done for CAPI-compliance.
+
+The application is recorded in the internal list.
+*************************************************************/
+
+static void
+hycapi_register_appl(struct capi_ctr *ctrl, __u16 appl,
+ capi_register_params *rp)
+{
+ int MaxLogicalConnections = 0, MaxBDataBlocks = 0, MaxBDataLen = 0;
+ hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
+ hysdn_card *card = cinfo->card;
+ int chk = _hycapi_appCheck(appl, ctrl->cnr);
+ if (chk < 0) {
+ return;
+ }
+ if (chk == 1) {
+ printk(KERN_INFO "HYSDN: apl %d already registered\n", appl);
+ return;
+ }
+ MaxBDataBlocks = rp->datablkcnt > CAPI_MAXDATAWINDOW ? CAPI_MAXDATAWINDOW : rp->datablkcnt;
+ rp->datablkcnt = MaxBDataBlocks;
+ MaxBDataLen = rp->datablklen < 1024 ? 1024 : rp->datablklen;
+ rp->datablklen = MaxBDataLen;
+
+ MaxLogicalConnections = rp->level3cnt;
+ if (MaxLogicalConnections < 0) {
+ MaxLogicalConnections = card->bchans * -MaxLogicalConnections;
+ }
+ if (MaxLogicalConnections == 0) {
+ MaxLogicalConnections = card->bchans;
+ }
+
+ rp->level3cnt = MaxLogicalConnections;
+ memcpy(&hycapi_applications[appl - 1].rp,
+ rp, sizeof(capi_register_params));
+}
+
+/*********************************************************************
+
+hycapi_release_internal
+
+Send down a CAPI_RELEASE to the controller.
+*********************************************************************/
+
+static void hycapi_release_internal(struct capi_ctr *ctrl, __u16 appl)
+{
+ hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
+ hysdn_card *card = cinfo->card;
+ struct sk_buff *skb;
+ __u16 len;
+ __u8 _command = 0xa1, _subcommand = 0x80;
+ __u16 MessageNumber = 0x0000;
+
+ capilib_release_appl(&cinfo->ncci_head, appl);
+
+#ifdef HYCAPI_PRINTFNAMES
+ printk(KERN_NOTICE "hycapi_release_appl\n");
+#endif
+ len = CAPI_MSG_BASELEN;
+ if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
+ printk(KERN_ERR "HYSDN card%d: memory squeeze in hycapi_register_appl\n",
+ card->myid);
+ return;
+ }
+ skb_put_data(skb, &len, sizeof(__u16));
+ skb_put_data(skb, &appl, sizeof(__u16));
+ skb_put_data(skb, &_command, sizeof(__u8));
+ skb_put_data(skb, &_subcommand, sizeof(__u8));
+ skb_put_data(skb, &MessageNumber, sizeof(__u16));
+ hycapi_send_message(ctrl, skb);
+ hycapi_applications[appl - 1].ctrl_mask &= ~(1 << (ctrl->cnr - 1));
+}
+
+/******************************************************************
+hycapi_release_appl
+
+Release the application from the internal list an remove it's
+registration at controller-level
+******************************************************************/
+
+static void
+hycapi_release_appl(struct capi_ctr *ctrl, __u16 appl)
+{
+ int chk;
+
+ chk = _hycapi_appCheck(appl, ctrl->cnr);
+ if (chk < 0) {
+ printk(KERN_ERR "HYCAPI: Releasing invalid appl %d on controller %d\n", appl, ctrl->cnr);
+ return;
+ }
+ if (hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1]) {
+ kfree_skb(hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1]);
+ hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1] = NULL;
+ }
+ if (chk == 1)
+ {
+ hycapi_release_internal(ctrl, appl);
+ }
+}
+
+
+/**************************************************************
+Kill a single controller.
+**************************************************************/
+
+int hycapi_capi_release(hysdn_card *card)
+{
+ hycapictrl_info *cinfo = card->hyctrlinfo;
+ struct capi_ctr *ctrl;
+#ifdef HYCAPI_PRINTFNAMES
+ printk(KERN_NOTICE "hycapi_capi_release\n");
+#endif
+ if (cinfo) {
+ ctrl = &cinfo->capi_ctrl;
+ hycapi_remove_ctr(ctrl);
+ }
+ return 0;
+}
+
+/**************************************************************
+hycapi_capi_stop
+
+Stop CAPI-Output on a card. (e.g. during reboot)
+***************************************************************/
+
+int hycapi_capi_stop(hysdn_card *card)
+{
+ hycapictrl_info *cinfo = card->hyctrlinfo;
+ struct capi_ctr *ctrl;
+#ifdef HYCAPI_PRINTFNAMES
+ printk(KERN_NOTICE "hycapi_capi_stop\n");
+#endif
+ if (cinfo) {
+ ctrl = &cinfo->capi_ctrl;
+/* ctrl->suspend_output(ctrl); */
+ capi_ctr_down(ctrl);
+ }
+ return 0;
+}
+
+/***************************************************************
+hycapi_send_message
+
+Send a message to the controller.
+
+Messages are parsed for their Command/Subcommand-type, and appropriate
+action's are performed.
+
+Note that we have to muck around with a 64Bit-DATA_REQ as there are
+firmware-releases that do not check the MsgLen-Indication!
+
+***************************************************************/
+
+static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
+{
+ __u16 appl_id;
+ int _len, _len2;
+ __u8 msghead[64];
+ hycapictrl_info *cinfo = ctrl->driverdata;
+ u16 retval = CAPI_NOERROR;
+
+ appl_id = CAPIMSG_APPID(skb->data);
+ switch (_hycapi_appCheck(appl_id, ctrl->cnr))
+ {
+ case 0:
+/* printk(KERN_INFO "Need to register\n"); */
+ hycapi_register_internal(ctrl,
+ appl_id,
+ &(hycapi_applications[appl_id - 1].rp));
+ break;
+ case 1:
+ break;
+ default:
+ printk(KERN_ERR "HYCAPI: Controller mixup!\n");
+ retval = CAPI_ILLAPPNR;
+ goto out;
+ }
+ switch (CAPIMSG_CMD(skb->data)) {
+ case CAPI_DISCONNECT_B3_RESP:
+ capilib_free_ncci(&cinfo->ncci_head, appl_id,
+ CAPIMSG_NCCI(skb->data));
+ break;
+ case CAPI_DATA_B3_REQ:
+ _len = CAPIMSG_LEN(skb->data);
+ if (_len > 22) {
+ _len2 = _len - 22;
+ skb_copy_from_linear_data(skb, msghead, 22);
+ skb_copy_to_linear_data_offset(skb, _len2,
+ msghead, 22);
+ skb_pull(skb, _len2);
+ CAPIMSG_SETLEN(skb->data, 22);
+ retval = capilib_data_b3_req(&cinfo->ncci_head,
+ CAPIMSG_APPID(skb->data),
+ CAPIMSG_NCCI(skb->data),
+ CAPIMSG_MSGID(skb->data));
+ }
+ break;
+ case CAPI_LISTEN_REQ:
+ if (hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1])
+ {
+ kfree_skb(hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1]);
+ hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1] = NULL;
+ }
+ if (!(hycapi_applications[appl_id -1].listen_req[ctrl->cnr - 1] = skb_copy(skb, GFP_ATOMIC)))
+ {
+ printk(KERN_ERR "HYSDN: memory squeeze in private_listen\n");
+ }
+ break;
+ default:
+ break;
+ }
+out:
+ if (retval == CAPI_NOERROR)
+ hycapi_sendmsg_internal(ctrl, skb);
+ else
+ dev_kfree_skb_any(skb);
+
+ return retval;
+}
+
+static int hycapi_proc_show(struct seq_file *m, void *v)
+{
+ struct capi_ctr *ctrl = m->private;
+ hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
+ hysdn_card *card = cinfo->card;
+ char *s;
+
+ seq_printf(m, "%-16s %s\n", "name", cinfo->cardname);
+ seq_printf(m, "%-16s 0x%x\n", "io", card->iobase);
+ seq_printf(m, "%-16s %d\n", "irq", card->irq);
+
+ switch (card->brdtype) {
+ case BD_PCCARD: s = "HYSDN Hycard"; break;
+ case BD_ERGO: s = "HYSDN Ergo2"; break;
+ case BD_METRO: s = "HYSDN Metro4"; break;
+ case BD_CHAMP2: s = "HYSDN Champ2"; break;
+ case BD_PLEXUS: s = "HYSDN Plexus30"; break;
+ default: s = "???"; break;
+ }
+ seq_printf(m, "%-16s %s\n", "type", s);
+ if ((s = cinfo->version[VER_DRIVER]) != NULL)
+ seq_printf(m, "%-16s %s\n", "ver_driver", s);
+ if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
+ seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
+ if ((s = cinfo->version[VER_SERIAL]) != NULL)
+ seq_printf(m, "%-16s %s\n", "ver_serial", s);
+
+ seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
+
+ return 0;
+}
+
+/**************************************************************
+hycapi_load_firmware
+
+This does NOT load any firmware, but the callback somehow is needed
+on capi-interface registration.
+
+**************************************************************/
+
+static int hycapi_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
+{
+#ifdef HYCAPI_PRINTFNAMES
+ printk(KERN_NOTICE "hycapi_load_firmware\n");
+#endif
+ return 0;
+}
+
+
+static char *hycapi_procinfo(struct capi_ctr *ctrl)
+{
+ hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
+#ifdef HYCAPI_PRINTFNAMES
+ printk(KERN_NOTICE "%s\n", __func__);
+#endif
+ if (!cinfo)
+ return "";
+ sprintf(cinfo->infobuf, "%s %s 0x%x %d %s",
+ cinfo->cardname[0] ? cinfo->cardname : "-",
+ cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
+ cinfo->card ? cinfo->card->iobase : 0x0,
+ cinfo->card ? cinfo->card->irq : 0,
+ hycapi_revision
+ );
+ return cinfo->infobuf;
+}
+
+/******************************************************************
+hycapi_rx_capipkt
+
+Receive a capi-message.
+
+All B3_DATA_IND are converted to 64K-extension compatible format.
+New nccis are created if necessary.
+*******************************************************************/
+
+void
+hycapi_rx_capipkt(hysdn_card *card, unsigned char *buf, unsigned short len)
+{
+ struct sk_buff *skb;
+ hycapictrl_info *cinfo = card->hyctrlinfo;
+ struct capi_ctr *ctrl;
+ __u16 ApplId;
+ __u16 MsgLen, info;
+ __u16 len2, CapiCmd;
+ __u32 CP64[2] = {0, 0};
+#ifdef HYCAPI_PRINTFNAMES
+ printk(KERN_NOTICE "hycapi_rx_capipkt\n");
+#endif
+ if (!cinfo) {
+ return;
+ }
+ ctrl = &cinfo->capi_ctrl;
+ if (len < CAPI_MSG_BASELEN) {
+ printk(KERN_ERR "HYSDN Card%d: invalid CAPI-message, length %d!\n",
+ card->myid, len);
+ return;
+ }
+ MsgLen = CAPIMSG_LEN(buf);
+ ApplId = CAPIMSG_APPID(buf);
+ CapiCmd = CAPIMSG_CMD(buf);
+
+ if ((CapiCmd == CAPI_DATA_B3_IND) && (MsgLen < 30)) {
+ len2 = len + (30 - MsgLen);
+ if (!(skb = alloc_skb(len2, GFP_ATOMIC))) {
+ printk(KERN_ERR "HYSDN Card%d: incoming packet dropped\n",
+ card->myid);
+ return;
+ }
+ skb_put_data(skb, buf, MsgLen);
+ skb_put_data(skb, CP64, 2 * sizeof(__u32));
+ skb_put_data(skb, buf + MsgLen, len - MsgLen);
+ CAPIMSG_SETLEN(skb->data, 30);
+ } else {
+ if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
+ printk(KERN_ERR "HYSDN Card%d: incoming packet dropped\n",
+ card->myid);
+ return;
+ }
+ skb_put_data(skb, buf, len);
+ }
+ switch (CAPIMSG_CMD(skb->data))
+ {
+ case CAPI_CONNECT_B3_CONF:
+/* Check info-field for error-indication: */
+ info = CAPIMSG_U16(skb->data, 12);
+ switch (info)
+ {
+ case 0:
+ capilib_new_ncci(&cinfo->ncci_head, ApplId, CAPIMSG_NCCI(skb->data),
+ hycapi_applications[ApplId - 1].rp.datablkcnt);
+
+ break;
+ case 0x0001:
+ printk(KERN_ERR "HYSDN Card%d: NCPI not supported by current "
+ "protocol. NCPI ignored.\n", card->myid);
+ break;
+ case 0x2001:
+ printk(KERN_ERR "HYSDN Card%d: Message not supported in"
+ " current state\n", card->myid);
+ break;
+ case 0x2002:
+ printk(KERN_ERR "HYSDN Card%d: invalid PLCI\n", card->myid);
+ break;
+ case 0x2004:
+ printk(KERN_ERR "HYSDN Card%d: out of NCCI\n", card->myid);
+ break;
+ case 0x3008:
+ printk(KERN_ERR "HYSDN Card%d: NCPI not supported\n",
+ card->myid);
+ break;
+ default:
+ printk(KERN_ERR "HYSDN Card%d: Info in CONNECT_B3_CONF: %d\n",
+ card->myid, info);
+ break;
+ }
+ break;
+ case CAPI_CONNECT_B3_IND:
+ capilib_new_ncci(&cinfo->ncci_head, ApplId,
+ CAPIMSG_NCCI(skb->data),
+ hycapi_applications[ApplId - 1].rp.datablkcnt);
+ break;
+ case CAPI_DATA_B3_CONF:
+ capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
+ CAPIMSG_NCCI(skb->data),
+ CAPIMSG_MSGID(skb->data));
+ break;
+ default:
+ break;
+ }
+ capi_ctr_handle_message(ctrl, ApplId, skb);
+}
+
+/******************************************************************
+hycapi_tx_capiack
+
+Internally acknowledge a msg sent. This will remove the msg from the
+internal queue.
+
+*******************************************************************/
+
+void hycapi_tx_capiack(hysdn_card *card)
+{
+ hycapictrl_info *cinfo = card->hyctrlinfo;
+#ifdef HYCAPI_PRINTFNAMES
+ printk(KERN_NOTICE "hycapi_tx_capiack\n");
+#endif
+ if (!cinfo) {
+ return;
+ }
+ spin_lock_irq(&cinfo->lock);
+ kfree_skb(cinfo->skbs[cinfo->out_idx]); /* free skb */
+ cinfo->skbs[cinfo->out_idx++] = NULL;
+ if (cinfo->out_idx >= HYSDN_MAX_CAPI_SKB)
+ cinfo->out_idx = 0; /* wrap around */
+
+ if (cinfo->sk_count-- == HYSDN_MAX_CAPI_SKB) /* dec usage count */
+ capi_ctr_resume_output(&cinfo->capi_ctrl);
+ spin_unlock_irq(&cinfo->lock);
+}
+
+/***************************************************************
+hycapi_tx_capiget(hysdn_card *card)
+
+This is called when polling for messages to SEND.
+
+****************************************************************/
+
+struct sk_buff *
+hycapi_tx_capiget(hysdn_card *card)
+{
+ hycapictrl_info *cinfo = card->hyctrlinfo;
+ if (!cinfo) {
+ return (struct sk_buff *)NULL;
+ }
+ if (!cinfo->sk_count)
+ return (struct sk_buff *)NULL; /* nothing available */
+
+ return (cinfo->skbs[cinfo->out_idx]); /* next packet to send */
+}
+
+
+/**********************************************************
+int hycapi_init()
+
+attach the capi-driver to the kernel-capi.
+
+***********************************************************/
+
+int hycapi_init(void)
+{
+ int i;
+ for (i = 0; i < CAPI_MAXAPPL; i++) {
+ memset(&(hycapi_applications[i]), 0, sizeof(hycapi_appl));
+ }
+ return (0);
+}
+
+/**************************************************************
+hycapi_cleanup(void)
+
+detach the capi-driver to the kernel-capi. Actually this should
+free some more ressources. Do that later.
+**************************************************************/
+
+void
+hycapi_cleanup(void)
+{
+}
+
+/********************************************************************
+hycapi_capi_create(hysdn_card *card)
+
+Attach the card with its capi-ctrl.
+*********************************************************************/
+
+static void hycapi_fill_profile(hysdn_card *card)
+{
+ hycapictrl_info *cinfo = NULL;
+ struct capi_ctr *ctrl = NULL;
+ cinfo = card->hyctrlinfo;
+ if (!cinfo) return;
+ ctrl = &cinfo->capi_ctrl;
+ strcpy(ctrl->manu, "Hypercope");
+ ctrl->version.majorversion = 2;
+ ctrl->version.minorversion = 0;
+ ctrl->version.majormanuversion = 3;
+ ctrl->version.minormanuversion = 2;
+ ctrl->profile.ncontroller = card->myid;
+ ctrl->profile.nbchannel = card->bchans;
+ ctrl->profile.goptions = GLOBAL_OPTION_INTERNAL_CONTROLLER |
+ GLOBAL_OPTION_B_CHANNEL_OPERATION;
+ ctrl->profile.support1 = B1_PROT_64KBIT_HDLC |
+ (card->faxchans ? B1_PROT_T30 : 0) |
+ B1_PROT_64KBIT_TRANSPARENT;
+ ctrl->profile.support2 = B2_PROT_ISO7776 |
+ (card->faxchans ? B2_PROT_T30 : 0) |
+ B2_PROT_TRANSPARENT;
+ ctrl->profile.support3 = B3_PROT_TRANSPARENT |
+ B3_PROT_T90NL |
+ (card->faxchans ? B3_PROT_T30 : 0) |
+ (card->faxchans ? B3_PROT_T30EXT : 0) |
+ B3_PROT_ISO8208;
+}
+
+int
+hycapi_capi_create(hysdn_card *card)
+{
+ hycapictrl_info *cinfo = NULL;
+ struct capi_ctr *ctrl = NULL;
+ int retval;
+#ifdef HYCAPI_PRINTFNAMES
+ printk(KERN_NOTICE "hycapi_capi_create\n");
+#endif
+ if ((hycapi_enable & (1 << card->myid)) == 0) {
+ return 1;
+ }
+ if (!card->hyctrlinfo) {
+ cinfo = kzalloc(sizeof(hycapictrl_info), GFP_ATOMIC);
+ if (!cinfo) {
+ printk(KERN_WARNING "HYSDN: no memory for capi-ctrl.\n");
+ return -ENOMEM;
+ }
+ card->hyctrlinfo = cinfo;
+ cinfo->card = card;
+ spin_lock_init(&cinfo->lock);
+ INIT_LIST_HEAD(&cinfo->ncci_head);
+
+ switch (card->brdtype) {
+ case BD_PCCARD: strcpy(cinfo->cardname, "HYSDN Hycard"); break;
+ case BD_ERGO: strcpy(cinfo->cardname, "HYSDN Ergo2"); break;
+ case BD_METRO: strcpy(cinfo->cardname, "HYSDN Metro4"); break;
+ case BD_CHAMP2: strcpy(cinfo->cardname, "HYSDN Champ2"); break;
+ case BD_PLEXUS: strcpy(cinfo->cardname, "HYSDN Plexus30"); break;
+ default: strcpy(cinfo->cardname, "HYSDN ???"); break;
+ }
+
+ ctrl = &cinfo->capi_ctrl;
+ ctrl->driver_name = "hycapi";
+ ctrl->driverdata = cinfo;
+ ctrl->register_appl = hycapi_register_appl;
+ ctrl->release_appl = hycapi_release_appl;
+ ctrl->send_message = hycapi_send_message;
+ ctrl->load_firmware = hycapi_load_firmware;
+ ctrl->reset_ctr = hycapi_reset_ctr;
+ ctrl->procinfo = hycapi_procinfo;
+ ctrl->proc_show = hycapi_proc_show;
+ strcpy(ctrl->name, cinfo->cardname);
+ ctrl->owner = THIS_MODULE;
+
+ retval = attach_capi_ctr(ctrl);
+ if (retval) {
+ printk(KERN_ERR "hycapi: attach controller failed.\n");
+ return -EBUSY;
+ }
+ /* fill in the blanks: */
+ hycapi_fill_profile(card);
+ capi_ctr_ready(ctrl);
+ } else {
+ /* resume output on stopped ctrl */
+ ctrl = &card->hyctrlinfo->capi_ctrl;
+ hycapi_fill_profile(card);
+ capi_ctr_ready(ctrl);
+ hycapi_restart_internal(ctrl);
+/* ctrl->resume_output(ctrl); */
+ }
+ return 0;
+}
diff --git a/drivers/staging/isdn/hysdn/hysdn_boot.c b/drivers/staging/isdn/hysdn/hysdn_boot.c
new file mode 100644
index 000000000000..ba177c3a621b
--- /dev/null
+++ b/drivers/staging/isdn/hysdn/hysdn_boot.c
@@ -0,0 +1,400 @@
+/* $Id: hysdn_boot.c,v 1.4.6.4 2001/09/23 22:24:54 kai Exp $
+ *
+ * Linux driver for HYSDN cards
+ * specific routines for booting and pof handling
+ *
+ * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
+ * Copyright 1999 by Werner Cornelius (werner@titro.de)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "hysdn_defs.h"
+#include "hysdn_pof.h"
+
+/********************************/
+/* defines for pof read handler */
+/********************************/
+#define POF_READ_FILE_HEAD 0
+#define POF_READ_TAG_HEAD 1
+#define POF_READ_TAG_DATA 2
+
+/************************************************************/
+/* definition of boot specific data area. This data is only */
+/* needed during boot and so allocated dynamically. */
+/************************************************************/
+struct boot_data {
+ unsigned short Cryptor; /* for use with Decrypt function */
+ unsigned short Nrecs; /* records remaining in file */
+ unsigned char pof_state;/* actual state of read handler */
+ unsigned char is_crypted;/* card data is crypted */
+ int BufSize; /* actual number of bytes bufferd */
+ int last_error; /* last occurred error */
+ unsigned short pof_recid;/* actual pof recid */
+ unsigned long pof_reclen;/* total length of pof record data */
+ unsigned long pof_recoffset;/* actual offset inside pof record */
+ union {
+ unsigned char BootBuf[BOOT_BUF_SIZE];/* buffer as byte count */
+ tPofRecHdr PofRecHdr; /* header for actual record/chunk */
+ tPofFileHdr PofFileHdr; /* header from POF file */
+ tPofTimeStamp PofTime; /* time information */
+ } buf;
+};
+
+/*****************************************************/
+/* start decryption of successive POF file chuncks. */
+/* */
+/* to be called at start of POF file reading, */
+/* before starting any decryption on any POF record. */
+/*****************************************************/
+static void
+StartDecryption(struct boot_data *boot)
+{
+ boot->Cryptor = CRYPT_STARTTERM;
+} /* StartDecryption */
+
+
+/***************************************************************/
+/* decrypt complete BootBuf */
+/* NOTE: decryption must be applied to all or none boot tags - */
+/* to HI and LO boot loader and (all) seq tags, because */
+/* global Cryptor is started for whole POF. */
+/***************************************************************/
+static void
+DecryptBuf(struct boot_data *boot, int cnt)
+{
+ unsigned char *bufp = boot->buf.BootBuf;
+
+ while (cnt--) {
+ boot->Cryptor = (boot->Cryptor >> 1) ^ ((boot->Cryptor & 1U) ? CRYPT_FEEDTERM : 0);
+ *bufp++ ^= (unsigned char)boot->Cryptor;
+ }
+} /* DecryptBuf */
+
+/********************************************************************************/
+/* pof_handle_data executes the required actions dependent on the active record */
+/* id. If successful 0 is returned, a negative value shows an error. */
+/********************************************************************************/
+static int
+pof_handle_data(hysdn_card *card, int datlen)
+{
+ struct boot_data *boot = card->boot; /* pointer to boot specific data */
+ long l;
+ unsigned char *imgp;
+ int img_len;
+
+ /* handle the different record types */
+ switch (boot->pof_recid) {
+
+ case TAG_TIMESTMP:
+ if (card->debug_flags & LOG_POF_RECORD)
+ hysdn_addlog(card, "POF created %s", boot->buf.PofTime.DateTimeText);
+ break;
+
+ case TAG_CBOOTDTA:
+ DecryptBuf(boot, datlen); /* we need to encrypt the buffer */
+ /* fall through */
+ case TAG_BOOTDTA:
+ if (card->debug_flags & LOG_POF_RECORD)
+ hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
+ (boot->pof_recid == TAG_CBOOTDTA) ? "CBOOTDATA" : "BOOTDTA",
+ datlen, boot->pof_recoffset);
+
+ if (boot->pof_reclen != POF_BOOT_LOADER_TOTAL_SIZE) {
+ boot->last_error = EPOF_BAD_IMG_SIZE; /* invalid length */
+ return (boot->last_error);
+ }
+ imgp = boot->buf.BootBuf; /* start of buffer */
+ img_len = datlen; /* maximum length to transfer */
+
+ l = POF_BOOT_LOADER_OFF_IN_PAGE -
+ (boot->pof_recoffset & (POF_BOOT_LOADER_PAGE_SIZE - 1));
+ if (l > 0) {
+ /* buffer needs to be truncated */
+ imgp += l; /* advance pointer */
+ img_len -= l; /* adjust len */
+ }
+ /* at this point no special handling for data wrapping over buffer */
+ /* is necessary, because the boot image always will be adjusted to */
+ /* match a page boundary inside the buffer. */
+ /* The buffer for the boot image on the card is filled in 2 cycles */
+ /* first the 1024 hi-words are put in the buffer, then the low 1024 */
+ /* word are handled in the same way with different offset. */
+
+ if (img_len > 0) {
+ /* data available for copy */
+ if ((boot->last_error =
+ card->writebootimg(card, imgp,
+ (boot->pof_recoffset > POF_BOOT_LOADER_PAGE_SIZE) ? 2 : 0)) < 0)
+ return (boot->last_error);
+ }
+ break; /* end of case boot image hi/lo */
+
+ case TAG_CABSDATA:
+ DecryptBuf(boot, datlen); /* we need to encrypt the buffer */
+ /* fall through */
+ case TAG_ABSDATA:
+ if (card->debug_flags & LOG_POF_RECORD)
+ hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
+ (boot->pof_recid == TAG_CABSDATA) ? "CABSDATA" : "ABSDATA",
+ datlen, boot->pof_recoffset);
+
+ if ((boot->last_error = card->writebootseq(card, boot->buf.BootBuf, datlen)) < 0)
+ return (boot->last_error); /* error writing data */
+
+ if (boot->pof_recoffset + datlen >= boot->pof_reclen)
+ return (card->waitpofready(card)); /* data completely spooled, wait for ready */
+
+ break; /* end of case boot seq data */
+
+ default:
+ if (card->debug_flags & LOG_POF_RECORD)
+ hysdn_addlog(card, "POF got data(id=0x%lx) len=%d offs=0x%lx", boot->pof_recid,
+ datlen, boot->pof_recoffset);
+
+ break; /* simply skip record */
+ } /* switch boot->pof_recid */
+
+ return (0);
+} /* pof_handle_data */
+
+
+/******************************************************************************/
+/* pof_write_buffer is called when the buffer has been filled with the needed */
+/* number of data bytes. The number delivered is additionally supplied for */
+/* verification. The functions handles the data and returns the needed number */
+/* of bytes for the next action. If the returned value is 0 or less an error */
+/* occurred and booting must be aborted. */
+/******************************************************************************/
+int
+pof_write_buffer(hysdn_card *card, int datlen)
+{
+ struct boot_data *boot = card->boot; /* pointer to boot specific data */
+
+ if (!boot)
+ return (-EFAULT); /* invalid call */
+ if (boot->last_error < 0)
+ return (boot->last_error); /* repeated error */
+
+ if (card->debug_flags & LOG_POF_WRITE)
+ hysdn_addlog(card, "POF write: got %d bytes ", datlen);
+
+ switch (boot->pof_state) {
+ case POF_READ_FILE_HEAD:
+ if (card->debug_flags & LOG_POF_WRITE)
+ hysdn_addlog(card, "POF write: checking file header");
+
+ if (datlen != sizeof(tPofFileHdr)) {
+ boot->last_error = -EPOF_INTERNAL;
+ break;
+ }
+ if (boot->buf.PofFileHdr.Magic != TAGFILEMAGIC) {
+ boot->last_error = -EPOF_BAD_MAGIC;
+ break;
+ }
+ /* Setup the new state and vars */
+ boot->Nrecs = (unsigned short)(boot->buf.PofFileHdr.N_PofRecs); /* limited to 65535 */
+ boot->pof_state = POF_READ_TAG_HEAD; /* now start with single tags */
+ boot->last_error = sizeof(tPofRecHdr); /* new length */
+ break;
+
+ case POF_READ_TAG_HEAD:
+ if (card->debug_flags & LOG_POF_WRITE)
+ hysdn_addlog(card, "POF write: checking tag header");
+
+ if (datlen != sizeof(tPofRecHdr)) {
+ boot->last_error = -EPOF_INTERNAL;
+ break;
+ }
+ boot->pof_recid = boot->buf.PofRecHdr.PofRecId; /* actual pof recid */
+ boot->pof_reclen = boot->buf.PofRecHdr.PofRecDataLen; /* total length */
+ boot->pof_recoffset = 0; /* no starting offset */
+
+ if (card->debug_flags & LOG_POF_RECORD)
+ hysdn_addlog(card, "POF: got record id=0x%lx length=%ld ",
+ boot->pof_recid, boot->pof_reclen);
+
+ boot->pof_state = POF_READ_TAG_DATA; /* now start with tag data */
+ if (boot->pof_reclen < BOOT_BUF_SIZE)
+ boot->last_error = boot->pof_reclen; /* limit size */
+ else
+ boot->last_error = BOOT_BUF_SIZE; /* maximum */
+
+ if (!boot->last_error) { /* no data inside record */
+ boot->pof_state = POF_READ_TAG_HEAD; /* now start with single tags */
+ boot->last_error = sizeof(tPofRecHdr); /* new length */
+ }
+ break;
+
+ case POF_READ_TAG_DATA:
+ if (card->debug_flags & LOG_POF_WRITE)
+ hysdn_addlog(card, "POF write: getting tag data");
+
+ if (datlen != boot->last_error) {
+ boot->last_error = -EPOF_INTERNAL;
+ break;
+ }
+ if ((boot->last_error = pof_handle_data(card, datlen)) < 0)
+ return (boot->last_error); /* an error occurred */
+ boot->pof_recoffset += datlen;
+ if (boot->pof_recoffset >= boot->pof_reclen) {
+ boot->pof_state = POF_READ_TAG_HEAD; /* now start with single tags */
+ boot->last_error = sizeof(tPofRecHdr); /* new length */
+ } else {
+ if (boot->pof_reclen - boot->pof_recoffset < BOOT_BUF_SIZE)
+ boot->last_error = boot->pof_reclen - boot->pof_recoffset; /* limit size */
+ else
+ boot->last_error = BOOT_BUF_SIZE; /* maximum */
+ }
+ break;
+
+ default:
+ boot->last_error = -EPOF_INTERNAL; /* unknown state */
+ break;
+ } /* switch (boot->pof_state) */
+
+ return (boot->last_error);
+} /* pof_write_buffer */
+
+
+/*******************************************************************************/
+/* pof_write_open is called when an open for boot on the cardlog device occurs. */
+/* The function returns the needed number of bytes for the next operation. If */
+/* the returned number is less or equal 0 an error specified by this code */
+/* occurred. Additionally the pointer to the buffer data area is set on success */
+/*******************************************************************************/
+int
+pof_write_open(hysdn_card *card, unsigned char **bufp)
+{
+ struct boot_data *boot; /* pointer to boot specific data */
+
+ if (card->boot) {
+ if (card->debug_flags & LOG_POF_OPEN)
+ hysdn_addlog(card, "POF open: already opened for boot");
+ return (-ERR_ALREADY_BOOT); /* boot already active */
+ }
+ /* error no mem available */
+ if (!(boot = kzalloc(sizeof(struct boot_data), GFP_KERNEL))) {
+ if (card->debug_flags & LOG_MEM_ERR)
+ hysdn_addlog(card, "POF open: unable to allocate mem");
+ return (-EFAULT);
+ }
+ card->boot = boot;
+ card->state = CARD_STATE_BOOTING;
+
+ card->stopcard(card); /* first stop the card */
+ if (card->testram(card)) {
+ if (card->debug_flags & LOG_POF_OPEN)
+ hysdn_addlog(card, "POF open: DPRAM test failure");
+ boot->last_error = -ERR_BOARD_DPRAM;
+ card->state = CARD_STATE_BOOTERR; /* show boot error */
+ return (boot->last_error);
+ }
+ boot->BufSize = 0; /* Buffer is empty */
+ boot->pof_state = POF_READ_FILE_HEAD; /* read file header */
+ StartDecryption(boot); /* if POF File should be encrypted */
+
+ if (card->debug_flags & LOG_POF_OPEN)
+ hysdn_addlog(card, "POF open: success");
+
+ *bufp = boot->buf.BootBuf; /* point to buffer */
+ return (sizeof(tPofFileHdr));
+} /* pof_write_open */
+
+/********************************************************************************/
+/* pof_write_close is called when an close of boot on the cardlog device occurs. */
+/* The return value must be 0 if everything has happened as desired. */
+/********************************************************************************/
+int
+pof_write_close(hysdn_card *card)
+{
+ struct boot_data *boot = card->boot; /* pointer to boot specific data */
+
+ if (!boot)
+ return (-EFAULT); /* invalid call */
+
+ card->boot = NULL; /* no boot active */
+ kfree(boot);
+
+ if (card->state == CARD_STATE_RUN)
+ card->set_errlog_state(card, 1); /* activate error log */
+
+ if (card->debug_flags & LOG_POF_OPEN)
+ hysdn_addlog(card, "POF close: success");
+
+ return (0);
+} /* pof_write_close */
+
+/*********************************************************************************/
+/* EvalSysrTokData checks additional records delivered with the Sysready Message */
+/* when POF has been booted. A return value of 0 is used if no error occurred. */
+/*********************************************************************************/
+int
+EvalSysrTokData(hysdn_card *card, unsigned char *cp, int len)
+{
+ u_char *p;
+ u_char crc;
+
+ if (card->debug_flags & LOG_POF_RECORD)
+ hysdn_addlog(card, "SysReady Token data length %d", len);
+
+ if (len < 2) {
+ hysdn_addlog(card, "SysReady Token Data to short");
+ return (1);
+ }
+ for (p = cp, crc = 0; p < (cp + len - 2); p++)
+ if ((crc & 0x80))
+ crc = (((u_char) (crc << 1)) + 1) + *p;
+ else
+ crc = ((u_char) (crc << 1)) + *p;
+ crc = ~crc;
+ if (crc != *(cp + len - 1)) {
+ hysdn_addlog(card, "SysReady Token Data invalid CRC");
+ return (1);
+ }
+ len--; /* don't check CRC byte */
+ while (len > 0) {
+
+ if (*cp == SYSR_TOK_END)
+ return (0); /* End of Token stream */
+
+ if (len < (*(cp + 1) + 2)) {
+ hysdn_addlog(card, "token 0x%x invalid length %d", *cp, *(cp + 1));
+ return (1);
+ }
+ switch (*cp) {
+ case SYSR_TOK_B_CHAN: /* 1 */
+ if (*(cp + 1) != 1)
+ return (1); /* length invalid */
+ card->bchans = *(cp + 2);
+ break;
+
+ case SYSR_TOK_FAX_CHAN: /* 2 */
+ if (*(cp + 1) != 1)
+ return (1); /* length invalid */
+ card->faxchans = *(cp + 2);
+ break;
+
+ case SYSR_TOK_MAC_ADDR: /* 3 */
+ if (*(cp + 1) != 6)
+ return (1); /* length invalid */
+ memcpy(card->mac_addr, cp + 2, 6);
+ break;
+
+ default:
+ hysdn_addlog(card, "unknown token 0x%02x length %d", *cp, *(cp + 1));
+ break;
+ }
+ len -= (*(cp + 1) + 2); /* adjust len */
+ cp += (*(cp + 1) + 2); /* and pointer */
+ }
+
+ hysdn_addlog(card, "no end token found");
+ return (1);
+} /* EvalSysrTokData */
diff --git a/drivers/staging/isdn/hysdn/hysdn_defs.h b/drivers/staging/isdn/hysdn/hysdn_defs.h
new file mode 100644
index 000000000000..cdac46a21692
--- /dev/null
+++ b/drivers/staging/isdn/hysdn/hysdn_defs.h
@@ -0,0 +1,282 @@
+/* $Id: hysdn_defs.h,v 1.5.6.3 2001/09/23 22:24:54 kai Exp $
+ *
+ * Linux driver for HYSDN cards
+ * global definitions and exported vars and functions.
+ *
+ * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
+ * Copyright 1999 by Werner Cornelius (werner@titro.de)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#ifndef HYSDN_DEFS_H
+#define HYSDN_DEFS_H
+
+#include <linux/hysdn_if.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+
+#include "ince1pc.h"
+
+#ifdef CONFIG_HYSDN_CAPI
+#include <linux/capi.h>
+#include <linux/isdn/capicmd.h>
+#include <linux/isdn/capiutil.h>
+#include <linux/isdn/capilli.h>
+
+/***************************/
+/* CAPI-Profile values. */
+/***************************/
+
+#define GLOBAL_OPTION_INTERNAL_CONTROLLER 0x0001
+#define GLOBAL_OPTION_EXTERNAL_CONTROLLER 0x0002
+#define GLOBAL_OPTION_HANDSET 0x0004
+#define GLOBAL_OPTION_DTMF 0x0008
+#define GLOBAL_OPTION_SUPPL_SERVICES 0x0010
+#define GLOBAL_OPTION_CHANNEL_ALLOCATION 0x0020
+#define GLOBAL_OPTION_B_CHANNEL_OPERATION 0x0040
+
+#define B1_PROT_64KBIT_HDLC 0x0001
+#define B1_PROT_64KBIT_TRANSPARENT 0x0002
+#define B1_PROT_V110_ASYNCH 0x0004
+#define B1_PROT_V110_SYNCH 0x0008
+#define B1_PROT_T30 0x0010
+#define B1_PROT_64KBIT_INV_HDLC 0x0020
+#define B1_PROT_56KBIT_TRANSPARENT 0x0040
+
+#define B2_PROT_ISO7776 0x0001
+#define B2_PROT_TRANSPARENT 0x0002
+#define B2_PROT_SDLC 0x0004
+#define B2_PROT_LAPD 0x0008
+#define B2_PROT_T30 0x0010
+#define B2_PROT_PPP 0x0020
+#define B2_PROT_TRANSPARENT_IGNORE_B1_FRAMING_ERRORS 0x0040
+
+#define B3_PROT_TRANSPARENT 0x0001
+#define B3_PROT_T90NL 0x0002
+#define B3_PROT_ISO8208 0x0004
+#define B3_PROT_X25_DCE 0x0008
+#define B3_PROT_T30 0x0010
+#define B3_PROT_T30EXT 0x0020
+
+#define HYSDN_MAXVERSION 8
+
+/* Number of sendbuffers in CAPI-queue */
+#define HYSDN_MAX_CAPI_SKB 20
+
+#endif /* CONFIG_HYSDN_CAPI*/
+
+/************************************************/
+/* constants and bits for debugging/log outputs */
+/************************************************/
+#define LOG_MAX_LINELEN 120
+#define DEB_OUT_SYSLOG 0x80000000 /* output to syslog instead of proc fs */
+#define LOG_MEM_ERR 0x00000001 /* log memory errors like kmalloc failure */
+#define LOG_POF_OPEN 0x00000010 /* log pof open and close activities */
+#define LOG_POF_RECORD 0x00000020 /* log pof record parser */
+#define LOG_POF_WRITE 0x00000040 /* log detailed pof write operation */
+#define LOG_POF_CARD 0x00000080 /* log pof related card functions */
+#define LOG_CNF_LINE 0x00000100 /* all conf lines are put to procfs */
+#define LOG_CNF_DATA 0x00000200 /* non comment conf lines are shown with channel */
+#define LOG_CNF_MISC 0x00000400 /* additional conf line debug outputs */
+#define LOG_SCHED_ASYN 0x00001000 /* debug schedulers async tx routines */
+#define LOG_PROC_OPEN 0x00100000 /* open and close from procfs are logged */
+#define LOG_PROC_ALL 0x00200000 /* all actions from procfs are logged */
+#define LOG_NET_INIT 0x00010000 /* network init and deinit logging */
+
+#define DEF_DEB_FLAGS 0x7fff000f /* everything is logged to procfs */
+
+/**********************************/
+/* proc filesystem name constants */
+/**********************************/
+#define PROC_SUBDIR_NAME "hysdn"
+#define PROC_CONF_BASENAME "cardconf"
+#define PROC_LOG_BASENAME "cardlog"
+
+/***********************************/
+/* PCI 32 bit parms for IO and MEM */
+/***********************************/
+#define PCI_REG_PLX_MEM_BASE 0
+#define PCI_REG_PLX_IO_BASE 1
+#define PCI_REG_MEMORY_BASE 3
+
+/**************/
+/* card types */
+/**************/
+#define BD_NONE 0U
+#define BD_PERFORMANCE 1U
+#define BD_VALUE 2U
+#define BD_PCCARD 3U
+#define BD_ERGO 4U
+#define BD_METRO 5U
+#define BD_CHAMP2 6U
+#define BD_PLEXUS 7U
+
+/******************************************************/
+/* defined states for cards shown by reading cardconf */
+/******************************************************/
+#define CARD_STATE_UNUSED 0 /* never been used or booted */
+#define CARD_STATE_BOOTING 1 /* booting is in progress */
+#define CARD_STATE_BOOTERR 2 /* a previous boot was aborted */
+#define CARD_STATE_RUN 3 /* card is active */
+
+/*******************************/
+/* defines for error_log_state */
+/*******************************/
+#define ERRLOG_STATE_OFF 0 /* error log is switched off, nothing to do */
+#define ERRLOG_STATE_ON 1 /* error log is switched on, wait for data */
+#define ERRLOG_STATE_START 2 /* start error logging */
+#define ERRLOG_STATE_STOP 3 /* stop error logging */
+
+/*******************************/
+/* data structure for one card */
+/*******************************/
+typedef struct HYSDN_CARD {
+
+ /* general variables for the cards */
+ int myid; /* own driver card id */
+ unsigned char bus; /* pci bus the card is connected to */
+ unsigned char devfn; /* slot+function bit encoded */
+ unsigned short subsysid;/* PCI subsystem id */
+ unsigned char brdtype; /* type of card */
+ unsigned int bchans; /* number of available B-channels */
+ unsigned int faxchans; /* number of available fax-channels */
+ unsigned char mac_addr[6];/* MAC Address read from card */
+ unsigned int irq; /* interrupt number */
+ unsigned int iobase; /* IO-port base address */
+ unsigned long plxbase; /* PLX memory base */
+ unsigned long membase; /* DPRAM memory base */
+ unsigned long memend; /* DPRAM memory end */
+ void *dpram; /* mapped dpram */
+ int state; /* actual state of card -> CARD_STATE_** */
+ struct HYSDN_CARD *next; /* pointer to next card */
+
+ /* data areas for the /proc file system */
+ void *proclog; /* pointer to proclog filesystem specific data */
+ void *procconf; /* pointer to procconf filesystem specific data */
+
+ /* debugging and logging */
+ unsigned char err_log_state;/* actual error log state of the card */
+ unsigned long debug_flags;/* tells what should be debugged and where */
+ void (*set_errlog_state) (struct HYSDN_CARD *, int);
+
+ /* interrupt handler + interrupt synchronisation */
+ struct work_struct irq_queue; /* interrupt task queue */
+ unsigned char volatile irq_enabled;/* interrupt enabled if != 0 */
+ unsigned char volatile hw_lock;/* hardware is currently locked -> no access */
+
+ /* boot process */
+ void *boot; /* pointer to boot private data */
+ int (*writebootimg) (struct HYSDN_CARD *, unsigned char *, unsigned long);
+ int (*writebootseq) (struct HYSDN_CARD *, unsigned char *, int);
+ int (*waitpofready) (struct HYSDN_CARD *);
+ int (*testram) (struct HYSDN_CARD *);
+
+ /* scheduler for data transfer (only async parts) */
+ unsigned char async_data[256];/* async data to be sent (normally for config) */
+ unsigned short volatile async_len;/* length of data to sent */
+ unsigned short volatile async_channel;/* channel number for async transfer */
+ int volatile async_busy; /* flag != 0 sending in progress */
+ int volatile net_tx_busy; /* a network packet tx is in progress */
+
+ /* network interface */
+ void *netif; /* pointer to network structure */
+
+ /* init and deinit stopcard for booting, too */
+ void (*stopcard) (struct HYSDN_CARD *);
+ void (*releasehardware) (struct HYSDN_CARD *);
+
+ spinlock_t hysdn_lock;
+#ifdef CONFIG_HYSDN_CAPI
+ struct hycapictrl_info {
+ char cardname[32];
+ spinlock_t lock;
+ int versionlen;
+ char versionbuf[1024];
+ char *version[HYSDN_MAXVERSION];
+
+ char infobuf[128]; /* for function procinfo */
+
+ struct HYSDN_CARD *card;
+ struct capi_ctr capi_ctrl;
+ struct sk_buff *skbs[HYSDN_MAX_CAPI_SKB];
+ int in_idx, out_idx; /* indexes to buffer ring */
+ int sk_count; /* number of buffers currently in ring */
+ struct sk_buff *tx_skb; /* buffer for tx operation */
+
+ struct list_head ncci_head;
+ } *hyctrlinfo;
+#endif /* CONFIG_HYSDN_CAPI */
+} hysdn_card;
+
+#ifdef CONFIG_HYSDN_CAPI
+typedef struct hycapictrl_info hycapictrl_info;
+#endif /* CONFIG_HYSDN_CAPI */
+
+
+/*****************/
+/* exported vars */
+/*****************/
+extern hysdn_card *card_root; /* pointer to first card */
+
+
+
+/*************************/
+/* im/exported functions */
+/*************************/
+
+/* hysdn_procconf.c */
+extern int hysdn_procconf_init(void); /* init proc config filesys */
+extern void hysdn_procconf_release(void); /* deinit proc config filesys */
+
+/* hysdn_proclog.c */
+extern int hysdn_proclog_init(hysdn_card *); /* init proc log entry */
+extern void hysdn_proclog_release(hysdn_card *); /* deinit proc log entry */
+extern void hysdn_addlog(hysdn_card *, char *, ...); /* output data to log */
+extern void hysdn_card_errlog(hysdn_card *, tErrLogEntry *, int); /* output card log */
+
+/* boardergo.c */
+extern int ergo_inithardware(hysdn_card *card); /* get hardware -> module init */
+
+/* hysdn_boot.c */
+extern int pof_write_close(hysdn_card *); /* close proc file after writing pof */
+extern int pof_write_open(hysdn_card *, unsigned char **); /* open proc file for writing pof */
+extern int pof_write_buffer(hysdn_card *, int); /* write boot data to card */
+extern int EvalSysrTokData(hysdn_card *, unsigned char *, int); /* Check Sysready Token Data */
+
+/* hysdn_sched.c */
+extern int hysdn_sched_tx(hysdn_card *, unsigned char *,
+ unsigned short volatile *, unsigned short volatile *,
+ unsigned short);
+extern int hysdn_sched_rx(hysdn_card *, unsigned char *, unsigned short,
+ unsigned short);
+extern int hysdn_tx_cfgline(hysdn_card *, unsigned char *,
+ unsigned short); /* send one cfg line */
+
+/* hysdn_net.c */
+extern unsigned int hynet_enable;
+extern int hysdn_net_create(hysdn_card *); /* create a new net device */
+extern int hysdn_net_release(hysdn_card *); /* delete the device */
+extern char *hysdn_net_getname(hysdn_card *); /* get name of net interface */
+extern void hysdn_tx_netack(hysdn_card *); /* acknowledge a packet tx */
+extern struct sk_buff *hysdn_tx_netget(hysdn_card *); /* get next network packet */
+extern void hysdn_rx_netpkt(hysdn_card *, unsigned char *,
+ unsigned short); /* rxed packet from network */
+
+#ifdef CONFIG_HYSDN_CAPI
+extern unsigned int hycapi_enable;
+extern int hycapi_capi_create(hysdn_card *); /* create a new capi device */
+extern int hycapi_capi_release(hysdn_card *); /* delete the device */
+extern int hycapi_capi_stop(hysdn_card *card); /* suspend */
+extern void hycapi_rx_capipkt(hysdn_card *card, unsigned char *buf,
+ unsigned short len);
+extern void hycapi_tx_capiack(hysdn_card *card);
+extern struct sk_buff *hycapi_tx_capiget(hysdn_card *card);
+extern int hycapi_init(void);
+extern void hycapi_cleanup(void);
+#endif /* CONFIG_HYSDN_CAPI */
+
+#endif /* HYSDN_DEFS_H */
diff --git a/drivers/staging/isdn/hysdn/hysdn_init.c b/drivers/staging/isdn/hysdn/hysdn_init.c
new file mode 100644
index 000000000000..0db2f7506250
--- /dev/null
+++ b/drivers/staging/isdn/hysdn/hysdn_init.c
@@ -0,0 +1,213 @@
+/* $Id: hysdn_init.c,v 1.6.6.6 2001/09/23 22:24:54 kai Exp $
+ *
+ * Linux driver for HYSDN cards, init functions.
+ *
+ * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
+ * Copyright 1999 by Werner Cornelius (werner@titro.de)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+
+#include "hysdn_defs.h"
+
+static struct pci_device_id hysdn_pci_tbl[] = {
+ { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
+ PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_METRO, 0, 0, BD_METRO },
+ { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
+ PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2, 0, 0, BD_CHAMP2 },
+ { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
+ PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_ERGO, 0, 0, BD_ERGO },
+ { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
+ PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO, 0, 0, BD_ERGO },
+
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, hysdn_pci_tbl);
+MODULE_DESCRIPTION("ISDN4Linux: Driver for HYSDN cards");
+MODULE_AUTHOR("Werner Cornelius");
+MODULE_LICENSE("GPL");
+
+static int cardmax; /* number of found cards */
+hysdn_card *card_root = NULL; /* pointer to first card */
+static hysdn_card *card_last = NULL; /* pointer to first card */
+
+
+/****************************************************************************/
+/* The module startup and shutdown code. Only compiled when used as module. */
+/* Using the driver as module is always advisable, because the booting */
+/* image becomes smaller and the driver code is only loaded when needed. */
+/* Additionally newer versions may be activated without rebooting. */
+/****************************************************************************/
+
+/****************************************************************************/
+/* init_module is called once when the module is loaded to do all necessary */
+/* things like autodetect... */
+/* If the return value of this function is 0 the init has been successful */
+/* and the module is added to the list in /proc/modules, otherwise an error */
+/* is assumed and the module will not be kept in memory. */
+/****************************************************************************/
+
+static int hysdn_pci_init_one(struct pci_dev *akt_pcidev,
+ const struct pci_device_id *ent)
+{
+ hysdn_card *card;
+ int rc;
+
+ rc = pci_enable_device(akt_pcidev);
+ if (rc)
+ return rc;
+
+ if (!(card = kzalloc(sizeof(hysdn_card), GFP_KERNEL))) {
+ printk(KERN_ERR "HYSDN: unable to alloc device mem \n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ card->myid = cardmax; /* set own id */
+ card->bus = akt_pcidev->bus->number;
+ card->devfn = akt_pcidev->devfn; /* slot + function */
+ card->subsysid = akt_pcidev->subsystem_device;
+ card->irq = akt_pcidev->irq;
+ card->iobase = pci_resource_start(akt_pcidev, PCI_REG_PLX_IO_BASE);
+ card->plxbase = pci_resource_start(akt_pcidev, PCI_REG_PLX_MEM_BASE);
+ card->membase = pci_resource_start(akt_pcidev, PCI_REG_MEMORY_BASE);
+ card->brdtype = BD_NONE; /* unknown */
+ card->debug_flags = DEF_DEB_FLAGS; /* set default debug */
+ card->faxchans = 0; /* default no fax channels */
+ card->bchans = 2; /* and 2 b-channels */
+ card->brdtype = ent->driver_data;
+
+ if (ergo_inithardware(card)) {
+ printk(KERN_WARNING "HYSDN: card at io 0x%04x already in use\n", card->iobase);
+ rc = -EBUSY;
+ goto err_out_card;
+ }
+
+ cardmax++;
+ card->next = NULL; /*end of chain */
+ if (card_last)
+ card_last->next = card; /* pointer to next card */
+ else
+ card_root = card;
+ card_last = card; /* new chain end */
+
+ pci_set_drvdata(akt_pcidev, card);
+ return 0;
+
+err_out_card:
+ kfree(card);
+err_out:
+ pci_disable_device(akt_pcidev);
+ return rc;
+}
+
+static void hysdn_pci_remove_one(struct pci_dev *akt_pcidev)
+{
+ hysdn_card *card = pci_get_drvdata(akt_pcidev);
+
+ pci_set_drvdata(akt_pcidev, NULL);
+
+ if (card->stopcard)
+ card->stopcard(card);
+
+#ifdef CONFIG_HYSDN_CAPI
+ hycapi_capi_release(card);
+#endif
+
+ if (card->releasehardware)
+ card->releasehardware(card); /* free all hardware resources */
+
+ if (card == card_root) {
+ card_root = card_root->next;
+ if (!card_root)
+ card_last = NULL;
+ } else {
+ hysdn_card *tmp = card_root;
+ while (tmp) {
+ if (tmp->next == card)
+ tmp->next = card->next;
+ card_last = tmp;
+ tmp = tmp->next;
+ }
+ }
+
+ kfree(card);
+ pci_disable_device(akt_pcidev);
+}
+
+static struct pci_driver hysdn_pci_driver = {
+ .name = "hysdn",
+ .id_table = hysdn_pci_tbl,
+ .probe = hysdn_pci_init_one,
+ .remove = hysdn_pci_remove_one,
+};
+
+static int hysdn_have_procfs;
+
+static int __init
+hysdn_init(void)
+{
+ int rc;
+
+ printk(KERN_NOTICE "HYSDN: module loaded\n");
+
+ rc = pci_register_driver(&hysdn_pci_driver);
+ if (rc)
+ return rc;
+
+ printk(KERN_INFO "HYSDN: %d card(s) found.\n", cardmax);
+
+ if (!hysdn_procconf_init())
+ hysdn_have_procfs = 1;
+
+#ifdef CONFIG_HYSDN_CAPI
+ if (cardmax > 0) {
+ if (hycapi_init()) {
+ printk(KERN_ERR "HYCAPI: init failed\n");
+
+ if (hysdn_have_procfs)
+ hysdn_procconf_release();
+
+ pci_unregister_driver(&hysdn_pci_driver);
+ return -ESPIPE;
+ }
+ }
+#endif /* CONFIG_HYSDN_CAPI */
+
+ return 0; /* no error */
+} /* init_module */
+
+
+/***********************************************************************/
+/* cleanup_module is called when the module is released by the kernel. */
+/* The routine is only called if init_module has been successful and */
+/* the module counter has a value of 0. Otherwise this function will */
+/* not be called. This function must release all resources still allo- */
+/* cated as after the return from this function the module code will */
+/* be removed from memory. */
+/***********************************************************************/
+static void __exit
+hysdn_exit(void)
+{
+ if (hysdn_have_procfs)
+ hysdn_procconf_release();
+
+ pci_unregister_driver(&hysdn_pci_driver);
+
+#ifdef CONFIG_HYSDN_CAPI
+ hycapi_cleanup();
+#endif /* CONFIG_HYSDN_CAPI */
+
+ printk(KERN_NOTICE "HYSDN: module unloaded\n");
+} /* cleanup_module */
+
+module_init(hysdn_init);
+module_exit(hysdn_exit);
diff --git a/drivers/staging/isdn/hysdn/hysdn_net.c b/drivers/staging/isdn/hysdn/hysdn_net.c
new file mode 100644
index 000000000000..bea37ae30ebb
--- /dev/null
+++ b/drivers/staging/isdn/hysdn/hysdn_net.c
@@ -0,0 +1,330 @@
+/* $Id: hysdn_net.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $
+ *
+ * Linux driver for HYSDN cards, net (ethernet type) handling routines.
+ *
+ * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
+ * Copyright 1999 by Werner Cornelius (werner@titro.de)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * This net module has been inspired by the skeleton driver from
+ * Donald Becker (becker@CESDIS.gsfc.nasa.gov)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+
+#include "hysdn_defs.h"
+
+unsigned int hynet_enable = 0xffffffff;
+module_param(hynet_enable, uint, 0);
+
+#define MAX_SKB_BUFFERS 20 /* number of buffers for keeping TX-data */
+
+/****************************************************************************/
+/* structure containing the complete network data. The structure is aligned */
+/* in a way that both, the device and statistics are kept inside it. */
+/* for proper access, the device structure MUST be the first var/struct */
+/* inside the definition. */
+/****************************************************************************/
+struct net_local {
+ /* Tx control lock. This protects the transmit buffer ring
+ * state along with the "tx full" state of the driver. This
+ * means all netif_queue flow control actions are protected
+ * by this lock as well.
+ */
+ struct net_device *dev;
+ spinlock_t lock;
+ struct sk_buff *skbs[MAX_SKB_BUFFERS]; /* pointers to tx-skbs */
+ int in_idx, out_idx; /* indexes to buffer ring */
+ int sk_count; /* number of buffers currently in ring */
+}; /* net_local */
+
+
+
+/*********************************************************************/
+/* Open/initialize the board. This is called (in the current kernel) */
+/* sometime after booting when the 'ifconfig' program is run. */
+/* This routine should set everything up anew at each open, even */
+/* registers that "should" only need to be set once at boot, so that */
+/* there is non-reboot way to recover if something goes wrong. */
+/*********************************************************************/
+static int
+net_open(struct net_device *dev)
+{
+ struct in_device *in_dev;
+ hysdn_card *card = dev->ml_priv;
+ int i;
+
+ netif_start_queue(dev); /* start tx-queueing */
+
+ /* Fill in the MAC-level header (if not already set) */
+ if (!card->mac_addr[0]) {
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = 0xfc;
+ if ((in_dev = dev->ip_ptr) != NULL) {
+ const struct in_ifaddr *ifa;
+
+ rcu_read_lock();
+ ifa = rcu_dereference(in_dev->ifa_list);
+ if (ifa != NULL)
+ memcpy(dev->dev_addr + (ETH_ALEN - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local));
+ rcu_read_unlock();
+ }
+ } else
+ memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN);
+
+ return (0);
+} /* net_open */
+
+/*******************************************/
+/* flush the currently occupied tx-buffers */
+/* must only be called when device closed */
+/*******************************************/
+static void
+flush_tx_buffers(struct net_local *nl)
+{
+
+ while (nl->sk_count) {
+ dev_kfree_skb(nl->skbs[nl->out_idx++]); /* free skb */
+ if (nl->out_idx >= MAX_SKB_BUFFERS)
+ nl->out_idx = 0; /* wrap around */
+ nl->sk_count--;
+ }
+} /* flush_tx_buffers */
+
+
+/*********************************************************************/
+/* close/decativate the device. The device is not removed, but only */
+/* deactivated. */
+/*********************************************************************/
+static int
+net_close(struct net_device *dev)
+{
+
+ netif_stop_queue(dev); /* disable queueing */
+
+ flush_tx_buffers((struct net_local *) dev);
+
+ return (0); /* success */
+} /* net_close */
+
+/************************************/
+/* send a packet on this interface. */
+/* new style for kernel >= 2.3.33 */
+/************************************/
+static netdev_tx_t
+net_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev;
+
+ spin_lock_irq(&lp->lock);
+
+ lp->skbs[lp->in_idx++] = skb; /* add to buffer list */
+ if (lp->in_idx >= MAX_SKB_BUFFERS)
+ lp->in_idx = 0; /* wrap around */
+ lp->sk_count++; /* adjust counter */
+ netif_trans_update(dev);
+
+ /* If we just used up the very last entry in the
+ * TX ring on this device, tell the queueing
+ * layer to send no more.
+ */
+ if (lp->sk_count >= MAX_SKB_BUFFERS)
+ netif_stop_queue(dev);
+
+ /* When the TX completion hw interrupt arrives, this
+ * is when the transmit statistics are updated.
+ */
+
+ spin_unlock_irq(&lp->lock);
+
+ if (lp->sk_count <= 3) {
+ schedule_work(&((hysdn_card *) dev->ml_priv)->irq_queue);
+ }
+ return NETDEV_TX_OK; /* success */
+} /* net_send_packet */
+
+
+
+/***********************************************************************/
+/* acknowlegde a packet send. The network layer will be informed about */
+/* completion */
+/***********************************************************************/
+void
+hysdn_tx_netack(hysdn_card *card)
+{
+ struct net_local *lp = card->netif;
+
+ if (!lp)
+ return; /* non existing device */
+
+
+ if (!lp->sk_count)
+ return; /* error condition */
+
+ lp->dev->stats.tx_packets++;
+ lp->dev->stats.tx_bytes += lp->skbs[lp->out_idx]->len;
+
+ dev_kfree_skb(lp->skbs[lp->out_idx++]); /* free skb */
+ if (lp->out_idx >= MAX_SKB_BUFFERS)
+ lp->out_idx = 0; /* wrap around */
+
+ if (lp->sk_count-- == MAX_SKB_BUFFERS) /* dec usage count */
+ netif_start_queue((struct net_device *) lp);
+} /* hysdn_tx_netack */
+
+/*****************************************************/
+/* we got a packet from the network, go and queue it */
+/*****************************************************/
+void
+hysdn_rx_netpkt(hysdn_card *card, unsigned char *buf, unsigned short len)
+{
+ struct net_local *lp = card->netif;
+ struct net_device *dev;
+ struct sk_buff *skb;
+
+ if (!lp)
+ return; /* non existing device */
+
+ dev = lp->dev;
+ dev->stats.rx_bytes += len;
+
+ skb = dev_alloc_skb(len);
+ if (skb == NULL) {
+ printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ dev->stats.rx_dropped++;
+ return;
+ }
+ /* copy the data */
+ skb_put_data(skb, buf, len);
+
+ /* determine the used protocol */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ dev->stats.rx_packets++; /* adjust packet count */
+
+ netif_rx(skb);
+} /* hysdn_rx_netpkt */
+
+/*****************************************************/
+/* return the pointer to a network packet to be send */
+/*****************************************************/
+struct sk_buff *
+hysdn_tx_netget(hysdn_card *card)
+{
+ struct net_local *lp = card->netif;
+
+ if (!lp)
+ return (NULL); /* non existing device */
+
+ if (!lp->sk_count)
+ return (NULL); /* nothing available */
+
+ return (lp->skbs[lp->out_idx]); /* next packet to send */
+} /* hysdn_tx_netget */
+
+static const struct net_device_ops hysdn_netdev_ops = {
+ .ndo_open = net_open,
+ .ndo_stop = net_close,
+ .ndo_start_xmit = net_send_packet,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+
+/*****************************************************************************/
+/* hysdn_net_create creates a new net device for the given card. If a device */
+/* already exists, it will be deleted and created a new one. The return value */
+/* 0 announces success, else a negative error code will be returned. */
+/*****************************************************************************/
+int
+hysdn_net_create(hysdn_card *card)
+{
+ struct net_device *dev;
+ int i;
+ struct net_local *lp;
+
+ if (!card) {
+ printk(KERN_WARNING "No card-pt in hysdn_net_create!\n");
+ return (-ENOMEM);
+ }
+ hysdn_net_release(card); /* release an existing net device */
+
+ dev = alloc_etherdev(sizeof(struct net_local));
+ if (!dev) {
+ printk(KERN_WARNING "HYSDN: unable to allocate mem\n");
+ return (-ENOMEM);
+ }
+
+ lp = netdev_priv(dev);
+ lp->dev = dev;
+
+ dev->netdev_ops = &hysdn_netdev_ops;
+ spin_lock_init(&((struct net_local *) dev)->lock);
+
+ /* initialise necessary or informing fields */
+ dev->base_addr = card->iobase; /* IO address */
+ dev->irq = card->irq; /* irq */
+
+ dev->netdev_ops = &hysdn_netdev_ops;
+ if ((i = register_netdev(dev))) {
+ printk(KERN_WARNING "HYSDN: unable to create network device\n");
+ free_netdev(dev);
+ return (i);
+ }
+ dev->ml_priv = card; /* remember pointer to own data structure */
+ card->netif = dev; /* setup the local pointer */
+
+ if (card->debug_flags & LOG_NET_INIT)
+ hysdn_addlog(card, "network device created");
+ return (0); /* and return success */
+} /* hysdn_net_create */
+
+/***************************************************************************/
+/* hysdn_net_release deletes the net device for the given card. The return */
+/* value 0 announces success, else a negative error code will be returned. */
+/***************************************************************************/
+int
+hysdn_net_release(hysdn_card *card)
+{
+ struct net_device *dev = card->netif;
+
+ if (!dev)
+ return (0); /* non existing */
+
+ card->netif = NULL; /* clear out pointer */
+ net_close(dev);
+
+ flush_tx_buffers((struct net_local *) dev); /* empty buffers */
+
+ unregister_netdev(dev); /* release the device */
+ free_netdev(dev); /* release the memory allocated */
+ if (card->debug_flags & LOG_NET_INIT)
+ hysdn_addlog(card, "network device deleted");
+
+ return (0); /* always successful */
+} /* hysdn_net_release */
+
+/*****************************************************************************/
+/* hysdn_net_getname returns a pointer to the name of the network interface. */
+/* if the interface is not existing, a "-" is returned. */
+/*****************************************************************************/
+char *
+hysdn_net_getname(hysdn_card *card)
+{
+ struct net_device *dev = card->netif;
+
+ if (!dev)
+ return ("-"); /* non existing */
+
+ return (dev->name);
+} /* hysdn_net_getname */
diff --git a/drivers/staging/isdn/hysdn/hysdn_pof.h b/drivers/staging/isdn/hysdn/hysdn_pof.h
new file mode 100644
index 000000000000..f63f5fa59d7e
--- /dev/null
+++ b/drivers/staging/isdn/hysdn/hysdn_pof.h
@@ -0,0 +1,78 @@
+/* $Id: hysdn_pof.h,v 1.2.6.1 2001/09/23 22:24:54 kai Exp $
+ *
+ * Linux driver for HYSDN cards, definitions used for handling pof-files.
+ *
+ * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
+ * Copyright 1999 by Werner Cornelius (werner@titro.de)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+/************************/
+/* POF specific defines */
+/************************/
+#define BOOT_BUF_SIZE 0x1000 /* =4096, maybe moved to other h file */
+#define CRYPT_FEEDTERM 0x8142
+#define CRYPT_STARTTERM 0x81a5
+/* max. timeout time in seconds
+ * from end of booting to POF is ready
+ */
+#define POF_READY_TIME_OUT_SEC 10
+
+/**********************************/
+/* defines for 1.stage boot image */
+/**********************************/
+
+/* the POF file record containing the boot loader image
+ * has 2 pages a 16KB:
+ * 1. page contains the high 16-bit part of the 32-bit E1 words
+ * 2. page contains the low 16-bit part of the 32-bit E1 words
+ *
+ * In each 16KB page we assume the start of the boot loader code
+ * in the highest 2KB part (at offset 0x3800);
+ * the rest (0x0000..0x37FF) is assumed to contain 0 bytes.
+ */
+
+#define POF_BOOT_LOADER_PAGE_SIZE 0x4000 /* =16384U */
+#define POF_BOOT_LOADER_TOTAL_SIZE (2U * POF_BOOT_LOADER_PAGE_SIZE)
+
+#define POF_BOOT_LOADER_CODE_SIZE 0x0800 /* =2KB =2048U */
+
+/* offset in boot page, where loader code may start */
+/* =0x3800= 14336U */
+#define POF_BOOT_LOADER_OFF_IN_PAGE (POF_BOOT_LOADER_PAGE_SIZE-POF_BOOT_LOADER_CODE_SIZE)
+
+
+/*--------------------------------------POF file record structs------------*/
+typedef struct PofFileHdr_tag { /* Pof file header */
+ /*00 */ unsigned long Magic __attribute__((packed));
+ /*04 */ unsigned long N_PofRecs __attribute__((packed));
+/*08 */
+} tPofFileHdr;
+
+typedef struct PofRecHdr_tag { /* Pof record header */
+ /*00 */ unsigned short PofRecId __attribute__((packed));
+ /*02 */ unsigned long PofRecDataLen __attribute__((packed));
+/*06 */
+} tPofRecHdr;
+
+typedef struct PofTimeStamp_tag {
+ /*00 */ unsigned long UnixTime __attribute__((packed));
+ /*04 */ unsigned char DateTimeText[0x28];
+ /* =40 */
+/*2C */
+} tPofTimeStamp;
+
+/* tPofFileHdr.Magic value: */
+#define TAGFILEMAGIC 0x464F501AUL
+/* tPofRecHdr.PofRecId values: */
+#define TAG_ABSDATA 0x1000 /* abs. data */
+#define TAG_BOOTDTA 0x1001 /* boot data */
+#define TAG_COMMENT 0x0020
+#define TAG_SYSCALL 0x0021
+#define TAG_FLOWCTRL 0x0022
+#define TAG_TIMESTMP 0x0010 /* date/time stamp of version */
+#define TAG_CABSDATA 0x1100 /* crypted abs. data */
+#define TAG_CBOOTDTA 0x1101 /* crypted boot data */
diff --git a/drivers/staging/isdn/hysdn/hysdn_procconf.c b/drivers/staging/isdn/hysdn/hysdn_procconf.c
new file mode 100644
index 000000000000..73079213ec94
--- /dev/null
+++ b/drivers/staging/isdn/hysdn/hysdn_procconf.c
@@ -0,0 +1,411 @@
+/* $Id: hysdn_procconf.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $
+ *
+ * Linux driver for HYSDN cards, /proc/net filesystem dir and conf functions.
+ *
+ * written by Werner Cornelius (werner@titro.de) for Hypercope GmbH
+ *
+ * Copyright 1999 by Werner Cornelius (werner@titro.de)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/cred.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <net/net_namespace.h>
+
+#include "hysdn_defs.h"
+
+static DEFINE_MUTEX(hysdn_conf_mutex);
+
+#define INFO_OUT_LEN 80 /* length of info line including lf */
+
+/********************************************************/
+/* defines and data structure for conf write operations */
+/********************************************************/
+#define CONF_STATE_DETECT 0 /* waiting for detect */
+#define CONF_STATE_CONF 1 /* writing config data */
+#define CONF_STATE_POF 2 /* writing pof data */
+#define CONF_LINE_LEN 255 /* 255 chars max */
+
+struct conf_writedata {
+ hysdn_card *card; /* card the device is connected to */
+ int buf_size; /* actual number of bytes in the buffer */
+ int needed_size; /* needed size when reading pof */
+ int state; /* actual interface states from above constants */
+ unsigned char conf_line[CONF_LINE_LEN]; /* buffered conf line */
+ unsigned short channel; /* active channel number */
+ unsigned char *pof_buffer; /* buffer when writing pof */
+};
+
+/***********************************************************************/
+/* process_line parses one config line and transfers it to the card if */
+/* necessary. */
+/* if the return value is negative an error occurred. */
+/***********************************************************************/
+static int
+process_line(struct conf_writedata *cnf)
+{
+ unsigned char *cp = cnf->conf_line;
+ int i;
+
+ if (cnf->card->debug_flags & LOG_CNF_LINE)
+ hysdn_addlog(cnf->card, "conf line: %s", cp);
+
+ if (*cp == '-') { /* option */
+ cp++; /* point to option char */
+
+ if (*cp++ != 'c')
+ return (0); /* option unknown or used */
+ i = 0; /* start value for channel */
+ while ((*cp <= '9') && (*cp >= '0'))
+ i = i * 10 + *cp++ - '0'; /* get decimal number */
+ if (i > 65535) {
+ if (cnf->card->debug_flags & LOG_CNF_MISC)
+ hysdn_addlog(cnf->card, "conf channel invalid %d", i);
+ return (-ERR_INV_CHAN); /* invalid channel */
+ }
+ cnf->channel = i & 0xFFFF; /* set new channel number */
+ return (0); /* success */
+ } /* option */
+ if (*cp == '*') { /* line to send */
+ if (cnf->card->debug_flags & LOG_CNF_DATA)
+ hysdn_addlog(cnf->card, "conf chan=%d %s", cnf->channel, cp);
+ return (hysdn_tx_cfgline(cnf->card, cnf->conf_line + 1,
+ cnf->channel)); /* send the line without * */
+ } /* line to send */
+ return (0);
+} /* process_line */
+
+/***********************************/
+/* conf file operations and tables */
+/***********************************/
+
+/****************************************************/
+/* write conf file -> boot or send cfg line to card */
+/****************************************************/
+static ssize_t
+hysdn_conf_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
+{
+ struct conf_writedata *cnf;
+ int i;
+ unsigned char ch, *cp;
+
+ if (!count)
+ return (0); /* nothing to handle */
+
+ if (!(cnf = file->private_data))
+ return (-EFAULT); /* should never happen */
+
+ if (cnf->state == CONF_STATE_DETECT) { /* auto detect cnf or pof data */
+ if (copy_from_user(&ch, buf, 1)) /* get first char for detect */
+ return (-EFAULT);
+
+ if (ch == 0x1A) {
+ /* we detected a pof file */
+ if ((cnf->needed_size = pof_write_open(cnf->card, &cnf->pof_buffer)) <= 0)
+ return (cnf->needed_size); /* an error occurred -> exit */
+ cnf->buf_size = 0; /* buffer is empty */
+ cnf->state = CONF_STATE_POF; /* new state */
+ } else {
+ /* conf data has been detected */
+ cnf->buf_size = 0; /* buffer is empty */
+ cnf->state = CONF_STATE_CONF; /* requested conf data write */
+ if (cnf->card->state != CARD_STATE_RUN)
+ return (-ERR_NOT_BOOTED);
+ cnf->conf_line[CONF_LINE_LEN - 1] = 0; /* limit string length */
+ cnf->channel = 4098; /* default channel for output */
+ }
+ } /* state was auto detect */
+ if (cnf->state == CONF_STATE_POF) { /* pof write active */
+ i = cnf->needed_size - cnf->buf_size; /* bytes still missing for write */
+ if (i <= 0)
+ return (-EINVAL); /* size error handling pof */
+
+ if (i < count)
+ count = i; /* limit requested number of bytes */
+ if (copy_from_user(cnf->pof_buffer + cnf->buf_size, buf, count))
+ return (-EFAULT); /* error while copying */
+ cnf->buf_size += count;
+
+ if (cnf->needed_size == cnf->buf_size) {
+ cnf->needed_size = pof_write_buffer(cnf->card, cnf->buf_size); /* write data */
+ if (cnf->needed_size <= 0) {
+ cnf->card->state = CARD_STATE_BOOTERR; /* show boot error */
+ return (cnf->needed_size); /* an error occurred */
+ }
+ cnf->buf_size = 0; /* buffer is empty again */
+ }
+ }
+ /* pof write active */
+ else { /* conf write active */
+
+ if (cnf->card->state != CARD_STATE_RUN) {
+ if (cnf->card->debug_flags & LOG_CNF_MISC)
+ hysdn_addlog(cnf->card, "cnf write denied -> not booted");
+ return (-ERR_NOT_BOOTED);
+ }
+ i = (CONF_LINE_LEN - 1) - cnf->buf_size; /* bytes available in buffer */
+ if (i > 0) {
+ /* copy remaining bytes into buffer */
+
+ if (count > i)
+ count = i; /* limit transfer */
+ if (copy_from_user(cnf->conf_line + cnf->buf_size, buf, count))
+ return (-EFAULT); /* error while copying */
+
+ i = count; /* number of chars in buffer */
+ cp = cnf->conf_line + cnf->buf_size;
+ while (i) {
+ /* search for end of line */
+ if ((*cp < ' ') && (*cp != 9))
+ break; /* end of line found */
+ cp++;
+ i--;
+ } /* search for end of line */
+
+ if (i) {
+ /* delimiter found */
+ *cp++ = 0; /* string termination */
+ count -= (i - 1); /* subtract remaining bytes from count */
+ while ((i) && (*cp < ' ') && (*cp != 9)) {
+ i--; /* discard next char */
+ count++; /* mark as read */
+ cp++; /* next char */
+ }
+ cnf->buf_size = 0; /* buffer is empty after transfer */
+ if ((i = process_line(cnf)) < 0) /* handle the line */
+ count = i; /* return the error */
+ }
+ /* delimiter found */
+ else {
+ cnf->buf_size += count; /* add chars to string */
+ if (cnf->buf_size >= CONF_LINE_LEN - 1) {
+ if (cnf->card->debug_flags & LOG_CNF_MISC)
+ hysdn_addlog(cnf->card, "cnf line too long %d chars pos %d", cnf->buf_size, count);
+ return (-ERR_CONF_LONG);
+ }
+ } /* not delimited */
+
+ }
+ /* copy remaining bytes into buffer */
+ else {
+ if (cnf->card->debug_flags & LOG_CNF_MISC)
+ hysdn_addlog(cnf->card, "cnf line too long");
+ return (-ERR_CONF_LONG);
+ }
+ } /* conf write active */
+
+ return (count);
+} /* hysdn_conf_write */
+
+/*******************************************/
+/* read conf file -> output card info data */
+/*******************************************/
+static ssize_t
+hysdn_conf_read(struct file *file, char __user *buf, size_t count, loff_t *off)
+{
+ char *cp;
+
+ if (!(file->f_mode & FMODE_READ))
+ return -EPERM; /* no permission to read */
+
+ if (!(cp = file->private_data))
+ return -EFAULT; /* should never happen */
+
+ return simple_read_from_buffer(buf, count, off, cp, strlen(cp));
+} /* hysdn_conf_read */
+
+/******************/
+/* open conf file */
+/******************/
+static int
+hysdn_conf_open(struct inode *ino, struct file *filep)
+{
+ hysdn_card *card;
+ struct conf_writedata *cnf;
+ char *cp, *tmp;
+
+ /* now search the addressed card */
+ mutex_lock(&hysdn_conf_mutex);
+ card = PDE_DATA(ino);
+ if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL))
+ hysdn_addlog(card, "config open for uid=%d gid=%d mode=0x%x",
+ filep->f_cred->fsuid, filep->f_cred->fsgid,
+ filep->f_mode);
+
+ if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
+ /* write only access -> write boot file or conf line */
+
+ if (!(cnf = kmalloc(sizeof(struct conf_writedata), GFP_KERNEL))) {
+ mutex_unlock(&hysdn_conf_mutex);
+ return (-EFAULT);
+ }
+ cnf->card = card;
+ cnf->buf_size = 0; /* nothing buffered */
+ cnf->state = CONF_STATE_DETECT; /* start auto detect */
+ filep->private_data = cnf;
+
+ } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
+ /* read access -> output card info data */
+
+ if (!(tmp = kmalloc(INFO_OUT_LEN * 2 + 2, GFP_KERNEL))) {
+ mutex_unlock(&hysdn_conf_mutex);
+ return (-EFAULT); /* out of memory */
+ }
+ filep->private_data = tmp; /* start of string */
+
+ /* first output a headline */
+ sprintf(tmp, "id bus slot type irq iobase dp-mem b-chans fax-chans state device");
+ cp = tmp; /* start of string */
+ while (*cp)
+ cp++;
+ while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN)
+ *cp++ = ' ';
+ *cp++ = '\n';
+
+ /* and now the data */
+ sprintf(cp, "%d %3d %4d %4d %3d 0x%04x 0x%08lx %7d %9d %3d %s",
+ card->myid,
+ card->bus,
+ PCI_SLOT(card->devfn),
+ card->brdtype,
+ card->irq,
+ card->iobase,
+ card->membase,
+ card->bchans,
+ card->faxchans,
+ card->state,
+ hysdn_net_getname(card));
+ while (*cp)
+ cp++;
+ while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN)
+ *cp++ = ' ';
+ *cp++ = '\n';
+ *cp = 0; /* end of string */
+ } else { /* simultaneous read/write access forbidden ! */
+ mutex_unlock(&hysdn_conf_mutex);
+ return (-EPERM); /* no permission this time */
+ }
+ mutex_unlock(&hysdn_conf_mutex);
+ return nonseekable_open(ino, filep);
+} /* hysdn_conf_open */
+
+/***************************/
+/* close a config file. */
+/***************************/
+static int
+hysdn_conf_close(struct inode *ino, struct file *filep)
+{
+ hysdn_card *card;
+ struct conf_writedata *cnf;
+ int retval = 0;
+
+ mutex_lock(&hysdn_conf_mutex);
+ card = PDE_DATA(ino);
+ if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL))
+ hysdn_addlog(card, "config close for uid=%d gid=%d mode=0x%x",
+ filep->f_cred->fsuid, filep->f_cred->fsgid,
+ filep->f_mode);
+
+ if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
+ /* write only access -> write boot file or conf line */
+ if (filep->private_data) {
+ cnf = filep->private_data;
+
+ if (cnf->state == CONF_STATE_POF)
+ retval = pof_write_close(cnf->card); /* close the pof write */
+ kfree(filep->private_data); /* free allocated memory for buffer */
+
+ } /* handle write private data */
+ } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
+ /* read access -> output card info data */
+
+ kfree(filep->private_data); /* release memory */
+ }
+ mutex_unlock(&hysdn_conf_mutex);
+ return (retval);
+} /* hysdn_conf_close */
+
+/******************************************************/
+/* table for conf filesystem functions defined above. */
+/******************************************************/
+static const struct file_operations conf_fops =
+{
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = hysdn_conf_read,
+ .write = hysdn_conf_write,
+ .open = hysdn_conf_open,
+ .release = hysdn_conf_close,
+};
+
+/*****************************/
+/* hysdn subdir in /proc/net */
+/*****************************/
+struct proc_dir_entry *hysdn_proc_entry = NULL;
+
+/*******************************************************************************/
+/* hysdn_procconf_init is called when the module is loaded and after the cards */
+/* have been detected. The needed proc dir and card config files are created. */
+/* The log init is called at last. */
+/*******************************************************************************/
+int
+hysdn_procconf_init(void)
+{
+ hysdn_card *card;
+ unsigned char conf_name[20];
+
+ hysdn_proc_entry = proc_mkdir(PROC_SUBDIR_NAME, init_net.proc_net);
+ if (!hysdn_proc_entry) {
+ printk(KERN_ERR "HYSDN: unable to create hysdn subdir\n");
+ return (-1);
+ }
+ card = card_root; /* point to first card */
+ while (card) {
+
+ sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid);
+ if ((card->procconf = (void *) proc_create_data(conf_name,
+ S_IFREG | S_IRUGO | S_IWUSR,
+ hysdn_proc_entry,
+ &conf_fops,
+ card)) != NULL) {
+ hysdn_proclog_init(card); /* init the log file entry */
+ }
+ card = card->next; /* next entry */
+ }
+
+ printk(KERN_NOTICE "HYSDN: procfs initialised\n");
+ return (0);
+} /* hysdn_procconf_init */
+
+/*************************************************************************************/
+/* hysdn_procconf_release is called when the module is unloaded and before the cards */
+/* resources are released. The module counter is assumed to be 0 ! */
+/*************************************************************************************/
+void
+hysdn_procconf_release(void)
+{
+ hysdn_card *card;
+ unsigned char conf_name[20];
+
+ card = card_root; /* start with first card */
+ while (card) {
+
+ sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid);
+ if (card->procconf)
+ remove_proc_entry(conf_name, hysdn_proc_entry);
+
+ hysdn_proclog_release(card); /* init the log file entry */
+
+ card = card->next; /* point to next card */
+ }
+
+ remove_proc_entry(PROC_SUBDIR_NAME, init_net.proc_net);
+}
diff --git a/drivers/staging/isdn/hysdn/hysdn_proclog.c b/drivers/staging/isdn/hysdn/hysdn_proclog.c
new file mode 100644
index 000000000000..6e898b90e86e
--- /dev/null
+++ b/drivers/staging/isdn/hysdn/hysdn_proclog.c
@@ -0,0 +1,357 @@
+/* $Id: hysdn_proclog.c,v 1.9.6.3 2001/09/23 22:24:54 kai Exp $
+ *
+ * Linux driver for HYSDN cards, /proc/net filesystem log functions.
+ *
+ * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
+ * Copyright 1999 by Werner Cornelius (werner@titro.de)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+
+#include "hysdn_defs.h"
+
+/* the proc subdir for the interface is defined in the procconf module */
+extern struct proc_dir_entry *hysdn_proc_entry;
+
+static DEFINE_MUTEX(hysdn_log_mutex);
+static void put_log_buffer(hysdn_card *card, char *cp);
+
+/*************************************************/
+/* structure keeping ascii log for device output */
+/*************************************************/
+struct log_data {
+ struct log_data *next;
+ unsigned long usage_cnt;/* number of files still to work */
+ void *proc_ctrl; /* pointer to own control procdata structure */
+ char log_start[2]; /* log string start (final len aligned by size) */
+};
+
+/**********************************************/
+/* structure holding proc entrys for one card */
+/**********************************************/
+struct procdata {
+ struct proc_dir_entry *log; /* log entry */
+ char log_name[15]; /* log filename */
+ struct log_data *log_head, *log_tail; /* head and tail for queue */
+ int if_used; /* open count for interface */
+ unsigned char logtmp[LOG_MAX_LINELEN];
+ wait_queue_head_t rd_queue;
+};
+
+
+/**********************************************/
+/* log function for cards error log interface */
+/**********************************************/
+void
+hysdn_card_errlog(hysdn_card *card, tErrLogEntry *logp, int maxsize)
+{
+ char buf[ERRLOG_TEXT_SIZE + 40];
+
+ sprintf(buf, "LOG 0x%08lX 0x%08lX : %s\n", logp->ulErrType, logp->ulErrSubtype, logp->ucText);
+ put_log_buffer(card, buf); /* output the string */
+} /* hysdn_card_errlog */
+
+/***************************************************/
+/* Log function using format specifiers for output */
+/***************************************************/
+void
+hysdn_addlog(hysdn_card *card, char *fmt, ...)
+{
+ struct procdata *pd = card->proclog;
+ char *cp;
+ va_list args;
+
+ if (!pd)
+ return; /* log structure non existent */
+
+ cp = pd->logtmp;
+ cp += sprintf(cp, "HYSDN: card %d ", card->myid);
+
+ va_start(args, fmt);
+ cp += vsprintf(cp, fmt, args);
+ va_end(args);
+ *cp++ = '\n';
+ *cp = 0;
+
+ if (card->debug_flags & DEB_OUT_SYSLOG)
+ printk(KERN_INFO "%s", pd->logtmp);
+ else
+ put_log_buffer(card, pd->logtmp);
+
+} /* hysdn_addlog */
+
+/********************************************/
+/* put an log buffer into the log queue. */
+/* This buffer will be kept until all files */
+/* opened for read got the contents. */
+/* Flushes buffers not longer in use. */
+/********************************************/
+static void
+put_log_buffer(hysdn_card *card, char *cp)
+{
+ struct log_data *ib;
+ struct procdata *pd = card->proclog;
+ unsigned long flags;
+
+ if (!pd)
+ return;
+ if (!cp)
+ return;
+ if (!*cp)
+ return;
+ if (pd->if_used <= 0)
+ return; /* no open file for read */
+
+ if (!(ib = kmalloc(sizeof(struct log_data) + strlen(cp), GFP_ATOMIC)))
+ return; /* no memory */
+ strcpy(ib->log_start, cp); /* set output string */
+ ib->next = NULL;
+ ib->proc_ctrl = pd; /* point to own control structure */
+ spin_lock_irqsave(&card->hysdn_lock, flags);
+ ib->usage_cnt = pd->if_used;
+ if (!pd->log_head)
+ pd->log_head = ib; /* new head */
+ else
+ pd->log_tail->next = ib; /* follows existing messages */
+ pd->log_tail = ib; /* new tail */
+
+ /* delete old entrys */
+ while (pd->log_head->next) {
+ if ((pd->log_head->usage_cnt <= 0) &&
+ (pd->log_head->next->usage_cnt <= 0)) {
+ ib = pd->log_head;
+ pd->log_head = pd->log_head->next;
+ kfree(ib);
+ } else {
+ break;
+ }
+ } /* pd->log_head->next */
+
+ spin_unlock_irqrestore(&card->hysdn_lock, flags);
+
+ wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */
+} /* put_log_buffer */
+
+
+/******************************/
+/* file operations and tables */
+/******************************/
+
+/****************************************/
+/* write log file -> set log level bits */
+/****************************************/
+static ssize_t
+hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
+{
+ int rc;
+ hysdn_card *card = file->private_data;
+
+ rc = kstrtoul_from_user(buf, count, 0, &card->debug_flags);
+ if (rc < 0)
+ return rc;
+ hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags);
+ return (count);
+} /* hysdn_log_write */
+
+/******************/
+/* read log file */
+/******************/
+static ssize_t
+hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t *off)
+{
+ struct log_data *inf;
+ int len;
+ hysdn_card *card = PDE_DATA(file_inode(file));
+
+ if (!(inf = *((struct log_data **) file->private_data))) {
+ struct procdata *pd = card->proclog;
+ if (file->f_flags & O_NONBLOCK)
+ return (-EAGAIN);
+
+ wait_event_interruptible(pd->rd_queue, (inf =
+ *((struct log_data **) file->private_data)));
+ }
+ if (!inf)
+ return (0);
+
+ inf->usage_cnt--; /* new usage count */
+ file->private_data = &inf->next; /* next structure */
+ if ((len = strlen(inf->log_start)) <= count) {
+ if (copy_to_user(buf, inf->log_start, len))
+ return -EFAULT;
+ *off += len;
+ return (len);
+ }
+ return (0);
+} /* hysdn_log_read */
+
+/******************/
+/* open log file */
+/******************/
+static int
+hysdn_log_open(struct inode *ino, struct file *filep)
+{
+ hysdn_card *card = PDE_DATA(ino);
+
+ mutex_lock(&hysdn_log_mutex);
+ if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
+ /* write only access -> write log level only */
+ filep->private_data = card; /* remember our own card */
+ } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
+ struct procdata *pd = card->proclog;
+ unsigned long flags;
+
+ /* read access -> log/debug read */
+ spin_lock_irqsave(&card->hysdn_lock, flags);
+ pd->if_used++;
+ if (pd->log_head)
+ filep->private_data = &pd->log_tail->next;
+ else
+ filep->private_data = &pd->log_head;
+ spin_unlock_irqrestore(&card->hysdn_lock, flags);
+ } else { /* simultaneous read/write access forbidden ! */
+ mutex_unlock(&hysdn_log_mutex);
+ return (-EPERM); /* no permission this time */
+ }
+ mutex_unlock(&hysdn_log_mutex);
+ return nonseekable_open(ino, filep);
+} /* hysdn_log_open */
+
+/*******************************************************************************/
+/* close a cardlog file. If the file has been opened for exclusive write it is */
+/* assumed as pof data input and the pof loader is noticed about. */
+/* Otherwise file is handled as log output. In this case the interface usage */
+/* count is decremented and all buffers are noticed of closing. If this file */
+/* was the last one to be closed, all buffers are freed. */
+/*******************************************************************************/
+static int
+hysdn_log_close(struct inode *ino, struct file *filep)
+{
+ struct log_data *inf;
+ struct procdata *pd;
+ hysdn_card *card;
+ int retval = 0;
+
+ mutex_lock(&hysdn_log_mutex);
+ if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
+ /* write only access -> write debug level written */
+ retval = 0; /* success */
+ } else {
+ /* read access -> log/debug read, mark one further file as closed */
+
+ inf = *((struct log_data **) filep->private_data); /* get first log entry */
+ if (inf)
+ pd = (struct procdata *) inf->proc_ctrl; /* still entries there */
+ else {
+ /* no info available -> search card */
+ card = PDE_DATA(file_inode(filep));
+ pd = card->proclog; /* pointer to procfs log */
+ }
+ if (pd)
+ pd->if_used--; /* decrement interface usage count by one */
+
+ while (inf) {
+ inf->usage_cnt--; /* decrement usage count for buffers */
+ inf = inf->next;
+ }
+
+ if (pd)
+ if (pd->if_used <= 0) /* delete buffers if last file closed */
+ while (pd->log_head) {
+ inf = pd->log_head;
+ pd->log_head = pd->log_head->next;
+ kfree(inf);
+ }
+ } /* read access */
+ mutex_unlock(&hysdn_log_mutex);
+
+ return (retval);
+} /* hysdn_log_close */
+
+/*************************************************/
+/* select/poll routine to be able using select() */
+/*************************************************/
+static __poll_t
+hysdn_log_poll(struct file *file, poll_table *wait)
+{
+ __poll_t mask = 0;
+ hysdn_card *card = PDE_DATA(file_inode(file));
+ struct procdata *pd = card->proclog;
+
+ if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE)
+ return (mask); /* no polling for write supported */
+
+ poll_wait(file, &(pd->rd_queue), wait);
+
+ if (*((struct log_data **) file->private_data))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ return mask;
+} /* hysdn_log_poll */
+
+/**************************************************/
+/* table for log filesystem functions defined above. */
+/**************************************************/
+static const struct file_operations log_fops =
+{
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = hysdn_log_read,
+ .write = hysdn_log_write,
+ .poll = hysdn_log_poll,
+ .open = hysdn_log_open,
+ .release = hysdn_log_close,
+};
+
+
+/***********************************************************************************/
+/* hysdn_proclog_init is called when the module is loaded after creating the cards */
+/* conf files. */
+/***********************************************************************************/
+int
+hysdn_proclog_init(hysdn_card *card)
+{
+ struct procdata *pd;
+
+ /* create a cardlog proc entry */
+
+ if ((pd = kzalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) {
+ sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid);
+ pd->log = proc_create_data(pd->log_name,
+ S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry,
+ &log_fops, card);
+
+ init_waitqueue_head(&(pd->rd_queue));
+
+ card->proclog = (void *) pd; /* remember procfs structure */
+ }
+ return (0);
+} /* hysdn_proclog_init */
+
+/************************************************************************************/
+/* hysdn_proclog_release is called when the module is unloaded and before the cards */
+/* conf file is released */
+/* The module counter is assumed to be 0 ! */
+/************************************************************************************/
+void
+hysdn_proclog_release(hysdn_card *card)
+{
+ struct procdata *pd;
+
+ if ((pd = (struct procdata *) card->proclog) != NULL) {
+ if (pd->log)
+ remove_proc_entry(pd->log_name, hysdn_proc_entry);
+ kfree(pd); /* release memory */
+ card->proclog = NULL;
+ }
+} /* hysdn_proclog_release */
diff --git a/drivers/staging/isdn/hysdn/hysdn_sched.c b/drivers/staging/isdn/hysdn/hysdn_sched.c
new file mode 100644
index 000000000000..31d7c1415543
--- /dev/null
+++ b/drivers/staging/isdn/hysdn/hysdn_sched.c
@@ -0,0 +1,197 @@
+/* $Id: hysdn_sched.c,v 1.5.6.4 2001/11/06 21:58:19 kai Exp $
+ *
+ * Linux driver for HYSDN cards
+ * scheduler routines for handling exchange card <-> pc.
+ *
+ * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
+ * Copyright 1999 by Werner Cornelius (werner@titro.de)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/signal.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+#include "hysdn_defs.h"
+
+/*****************************************************************************/
+/* hysdn_sched_rx is called from the cards handler to announce new data is */
+/* available from the card. The routine has to handle the data and return */
+/* with a nonzero code if the data could be worked (or even thrown away), if */
+/* no room to buffer the data is available a zero return tells the card */
+/* to keep the data until later. */
+/*****************************************************************************/
+int
+hysdn_sched_rx(hysdn_card *card, unsigned char *buf, unsigned short len,
+ unsigned short chan)
+{
+
+ switch (chan) {
+ case CHAN_NDIS_DATA:
+ if (hynet_enable & (1 << card->myid)) {
+ /* give packet to network handler */
+ hysdn_rx_netpkt(card, buf, len);
+ }
+ break;
+
+ case CHAN_ERRLOG:
+ hysdn_card_errlog(card, (tErrLogEntry *) buf, len);
+ if (card->err_log_state == ERRLOG_STATE_ON)
+ card->err_log_state = ERRLOG_STATE_START; /* start new fetch */
+ break;
+#ifdef CONFIG_HYSDN_CAPI
+ case CHAN_CAPI:
+/* give packet to CAPI handler */
+ if (hycapi_enable & (1 << card->myid)) {
+ hycapi_rx_capipkt(card, buf, len);
+ }
+ break;
+#endif /* CONFIG_HYSDN_CAPI */
+ default:
+ printk(KERN_INFO "irq message channel %d len %d unhandled \n", chan, len);
+ break;
+
+ } /* switch rx channel */
+
+ return (1); /* always handled */
+} /* hysdn_sched_rx */
+
+/*****************************************************************************/
+/* hysdn_sched_tx is called from the cards handler to announce that there is */
+/* room in the tx-buffer to the card and data may be sent if needed. */
+/* If the routine wants to send data it must fill buf, len and chan with the */
+/* appropriate data and return a nonzero value. With a zero return no new */
+/* data to send is assumed. maxlen specifies the buffer size available for */
+/* sending. */
+/*****************************************************************************/
+int
+hysdn_sched_tx(hysdn_card *card, unsigned char *buf,
+ unsigned short volatile *len, unsigned short volatile *chan,
+ unsigned short maxlen)
+{
+ struct sk_buff *skb;
+
+ if (card->net_tx_busy) {
+ card->net_tx_busy = 0; /* reset flag */
+ hysdn_tx_netack(card); /* acknowledge packet send */
+ } /* a network packet has completely been transferred */
+ /* first of all async requests are handled */
+ if (card->async_busy) {
+ if (card->async_len <= maxlen) {
+ memcpy(buf, card->async_data, card->async_len);
+ *len = card->async_len;
+ *chan = card->async_channel;
+ card->async_busy = 0; /* reset request */
+ return (1);
+ }
+ card->async_busy = 0; /* in case of length error */
+ } /* async request */
+ if ((card->err_log_state == ERRLOG_STATE_START) &&
+ (maxlen >= ERRLOG_CMD_REQ_SIZE)) {
+ strcpy(buf, ERRLOG_CMD_REQ); /* copy the command */
+ *len = ERRLOG_CMD_REQ_SIZE; /* buffer length */
+ *chan = CHAN_ERRLOG; /* and channel */
+ card->err_log_state = ERRLOG_STATE_ON; /* new state is on */
+ return (1); /* tell that data should be send */
+ } /* error log start and able to send */
+ if ((card->err_log_state == ERRLOG_STATE_STOP) &&
+ (maxlen >= ERRLOG_CMD_STOP_SIZE)) {
+ strcpy(buf, ERRLOG_CMD_STOP); /* copy the command */
+ *len = ERRLOG_CMD_STOP_SIZE; /* buffer length */
+ *chan = CHAN_ERRLOG; /* and channel */
+ card->err_log_state = ERRLOG_STATE_OFF; /* new state is off */
+ return (1); /* tell that data should be send */
+ } /* error log start and able to send */
+ /* now handle network interface packets */
+ if ((hynet_enable & (1 << card->myid)) &&
+ (skb = hysdn_tx_netget(card)) != NULL)
+ {
+ if (skb->len <= maxlen) {
+ /* copy the packet to the buffer */
+ skb_copy_from_linear_data(skb, buf, skb->len);
+ *len = skb->len;
+ *chan = CHAN_NDIS_DATA;
+ card->net_tx_busy = 1; /* we are busy sending network data */
+ return (1); /* go and send the data */
+ } else
+ hysdn_tx_netack(card); /* aknowledge packet -> throw away */
+ } /* send a network packet if available */
+#ifdef CONFIG_HYSDN_CAPI
+ if (((hycapi_enable & (1 << card->myid))) &&
+ ((skb = hycapi_tx_capiget(card)) != NULL))
+ {
+ if (skb->len <= maxlen) {
+ skb_copy_from_linear_data(skb, buf, skb->len);
+ *len = skb->len;
+ *chan = CHAN_CAPI;
+ hycapi_tx_capiack(card);
+ return (1); /* go and send the data */
+ }
+ }
+#endif /* CONFIG_HYSDN_CAPI */
+ return (0); /* nothing to send */
+} /* hysdn_sched_tx */
+
+
+/*****************************************************************************/
+/* send one config line to the card and return 0 if successful, otherwise a */
+/* negative error code. */
+/* The function works with timeouts perhaps not giving the greatest speed */
+/* sending the line, but this should be meaningless because only some lines */
+/* are to be sent and this happens very seldom. */
+/*****************************************************************************/
+int
+hysdn_tx_cfgline(hysdn_card *card, unsigned char *line, unsigned short chan)
+{
+ int cnt = 50; /* timeout intervalls */
+ unsigned long flags;
+
+ if (card->debug_flags & LOG_SCHED_ASYN)
+ hysdn_addlog(card, "async tx-cfg chan=%d len=%d", chan, strlen(line) + 1);
+
+ while (card->async_busy) {
+
+ if (card->debug_flags & LOG_SCHED_ASYN)
+ hysdn_addlog(card, "async tx-cfg delayed");
+
+ msleep_interruptible(20); /* Timeout 20ms */
+ if (!--cnt)
+ return (-ERR_ASYNC_TIME); /* timed out */
+ } /* wait for buffer to become free */
+
+ spin_lock_irqsave(&card->hysdn_lock, flags);
+ strcpy(card->async_data, line);
+ card->async_len = strlen(line) + 1;
+ card->async_channel = chan;
+ card->async_busy = 1; /* request transfer */
+
+ /* now queue the task */
+ schedule_work(&card->irq_queue);
+ spin_unlock_irqrestore(&card->hysdn_lock, flags);
+
+ if (card->debug_flags & LOG_SCHED_ASYN)
+ hysdn_addlog(card, "async tx-cfg data queued");
+
+ cnt++; /* short delay */
+
+ while (card->async_busy) {
+
+ if (card->debug_flags & LOG_SCHED_ASYN)
+ hysdn_addlog(card, "async tx-cfg waiting for tx-ready");
+
+ msleep_interruptible(20); /* Timeout 20ms */
+ if (!--cnt)
+ return (-ERR_ASYNC_TIME); /* timed out */
+ } /* wait for buffer to become free again */
+
+ if (card->debug_flags & LOG_SCHED_ASYN)
+ hysdn_addlog(card, "async tx-cfg data send");
+
+ return (0); /* line send correctly */
+} /* hysdn_tx_cfgline */
diff --git a/drivers/staging/isdn/hysdn/ince1pc.h b/drivers/staging/isdn/hysdn/ince1pc.h
new file mode 100644
index 000000000000..cab68361de65
--- /dev/null
+++ b/drivers/staging/isdn/hysdn/ince1pc.h
@@ -0,0 +1,134 @@
+/*
+ * Linux driver for HYSDN cards
+ * common definitions for both sides of the bus:
+ * - conventions both spoolers must know
+ * - channel numbers agreed upon
+ *
+ * Author M. Steinkopf
+ * Copyright 1999 by M. Steinkopf
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#ifndef __INCE1PC_H__
+#define __INCE1PC_H__
+
+/* basic scalar definitions have same meanning,
+ * but their declaration location depends on environment
+ */
+
+/*--------------------------------------channel numbers---------------------*/
+#define CHAN_SYSTEM 0x0001 /* system channel (spooler to spooler) */
+#define CHAN_ERRLOG 0x0005 /* error logger */
+#define CHAN_CAPI 0x0064 /* CAPI interface */
+#define CHAN_NDIS_DATA 0x1001 /* NDIS data transfer */
+
+/*--------------------------------------POF ready msg-----------------------*/
+/* NOTE: after booting POF sends system ready message to PC: */
+#define RDY_MAGIC 0x52535953UL /* 'SYSR' reversed */
+#define RDY_MAGIC_SIZE 4 /* size in bytes */
+
+#define MAX_N_TOK_BYTES 255
+
+#define MIN_RDY_MSG_SIZE RDY_MAGIC_SIZE
+#define MAX_RDY_MSG_SIZE (RDY_MAGIC_SIZE + MAX_N_TOK_BYTES)
+
+#define SYSR_TOK_END 0
+#define SYSR_TOK_B_CHAN 1 /* nr. of B-Channels; DataLen=1; def: 2 */
+#define SYSR_TOK_FAX_CHAN 2 /* nr. of FAX Channels; DataLen=1; def: 0 */
+#define SYSR_TOK_MAC_ADDR 3 /* MAC-Address; DataLen=6; def: auto */
+#define SYSR_TOK_ESC 255 /* undefined data size yet */
+/* default values, if not corrected by token: */
+#define SYSR_TOK_B_CHAN_DEF 2 /* assume 2 B-Channels */
+#define SYSR_TOK_FAX_CHAN_DEF 1 /* assume 1 FAX Channel */
+
+/* syntax of new SYSR token stream:
+ * channel: CHAN_SYSTEM
+ * msgsize: MIN_RDY_MSG_SIZE <= x <= MAX_RDY_MSG_SIZE
+ * RDY_MAGIC_SIZE <= x <= (RDY_MAGIC_SIZE+MAX_N_TOK_BYTES)
+ * msg : 0 1 2 3 {4 5 6 ..}
+ * S Y S R MAX_N_TOK_BYTES bytes of TokenStream
+ *
+ * TokenStream := empty
+ * | {NonEndTokenChunk} EndToken RotlCRC
+ * NonEndTokenChunk:= NonEndTokenId DataLen [Data]
+ * NonEndTokenId := 0x01 .. 0xFE 1 BYTE
+ * DataLen := 0x00 .. 0xFF 1 BYTE
+ * Data := DataLen bytes
+ * EndToken := 0x00
+ * RotlCRC := special 1 byte CRC over all NonEndTokenChunk bytes
+ * s. RotlCRC algorithm
+ *
+ * RotlCRC algorithm:
+ * ucSum= 0 1 unsigned char
+ * for all NonEndTokenChunk bytes:
+ * ROTL(ucSum,1) rotate left by 1
+ * ucSum += Char; add current byte with swap around
+ * RotlCRC= ~ucSum; invert all bits for result
+ *
+ * note:
+ * - for 16-bit FIFO add padding 0 byte to achieve even token data bytes!
+ */
+
+/*--------------------------------------error logger------------------------*/
+/* note: pof needs final 0 ! */
+#define ERRLOG_CMD_REQ "ERRLOG ON"
+#define ERRLOG_CMD_REQ_SIZE 10 /* with final 0 byte ! */
+#define ERRLOG_CMD_STOP "ERRLOG OFF"
+#define ERRLOG_CMD_STOP_SIZE 11 /* with final 0 byte ! */
+
+#define ERRLOG_ENTRY_SIZE 64 /* sizeof(tErrLogEntry) */
+ /* remaining text size = 55 */
+#define ERRLOG_TEXT_SIZE (ERRLOG_ENTRY_SIZE - 2 * 4 - 1)
+
+typedef struct ErrLogEntry_tag {
+
+ /*00 */ unsigned long ulErrType;
+
+ /*04 */ unsigned long ulErrSubtype;
+
+ /*08 */ unsigned char ucTextSize;
+
+ /*09 */ unsigned char ucText[ERRLOG_TEXT_SIZE];
+ /* ASCIIZ of len ucTextSize-1 */
+
+/*40 */
+} tErrLogEntry;
+
+
+#if defined(__TURBOC__)
+#if sizeof(tErrLogEntry) != ERRLOG_ENTRY_SIZE
+#error size of tErrLogEntry != ERRLOG_ENTRY_SIZE
+#endif /* */
+#endif /* */
+
+/*--------------------------------------DPRAM boot spooler------------------*/
+/* this is the struture used between pc and
+ * hyperstone to exchange boot data
+ */
+#define DPRAM_SPOOLER_DATA_SIZE 0x20
+typedef struct DpramBootSpooler_tag {
+
+ /*00 */ unsigned char Len;
+
+ /*01 */ volatile unsigned char RdPtr;
+
+ /*02 */ unsigned char WrPtr;
+
+ /*03 */ unsigned char Data[DPRAM_SPOOLER_DATA_SIZE];
+
+/*23 */
+} tDpramBootSpooler;
+
+
+#define DPRAM_SPOOLER_MIN_SIZE 5 /* Len+RdPtr+Wrptr+2*data */
+#define DPRAM_SPOOLER_DEF_SIZE 0x23 /* current default size */
+
+/*--------------------------------------HYCARD/ERGO DPRAM SoftUart----------*/
+/* at DPRAM offset 0x1C00: */
+#define SIZE_RSV_SOFT_UART 0x1B0 /* 432 bytes reserved for SoftUart */
+
+
+#endif /* __INCE1PC_H__ */
diff --git a/drivers/staging/kpc2000/Kconfig b/drivers/staging/kpc2000/Kconfig
index fb5922928f47..3bb2efd511c4 100644
--- a/drivers/staging/kpc2000/Kconfig
+++ b/drivers/staging/kpc2000/Kconfig
@@ -2,7 +2,9 @@
config KPC2000
bool "Daktronics KPC Device support"
+ select MFD_CORE
depends on PCI
+ depends on UIO
help
Select this if you wish to use the Daktronics KPC PCI devices
diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c
index 5741d2b49a7d..616658709bd9 100644
--- a/drivers/staging/kpc2000/kpc_dma/fileops.c
+++ b/drivers/staging/kpc2000/kpc_dma/fileops.c
@@ -8,7 +8,7 @@
#include <linux/errno.h> /* error codes */
#include <linux/types.h> /* size_t */
#include <linux/cdev.h>
-#include <asm/uaccess.h> /* copy_*_user */
+#include <linux/uaccess.h> /* copy_*_user */
#include <linux/aio.h> /* aio stuff */
#include <linux/highmem.h>
#include <linux/pagemap.h>
@@ -116,13 +116,11 @@ int kpc_dma_transfer(struct dev_private_data *priv, struct kiocb *kcb, unsigned
if (desc_needed >= ldev->desc_pool_cnt){
dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d TOO MANY to ever complete!\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
rv = -EAGAIN;
- unlock_engine(ldev);
goto err_descr_too_many;
}
if (desc_needed > num_descrs_avail){
dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d Too many to complete right now.\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
rv = -EMSGSIZE;
- unlock_engine(ldev);
goto err_descr_too_many;
}
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index f77f5eee7fc2..534d85d6c5e3 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -20,15 +20,19 @@ menuconfig STAGING_MEDIA
if STAGING_MEDIA && MEDIA_SUPPORT
# Please keep them in alphabetic order
+source "drivers/staging/media/allegro-dvt/Kconfig"
+
source "drivers/staging/media/bcm2048/Kconfig"
source "drivers/staging/media/davinci_vpfe/Kconfig"
+source "drivers/staging/media/hantro/Kconfig"
+
source "drivers/staging/media/imx/Kconfig"
-source "drivers/staging/media/omap4iss/Kconfig"
+source "drivers/staging/media/meson/vdec/Kconfig"
-source "drivers/staging/media/rockchip/vpu/Kconfig"
+source "drivers/staging/media/omap4iss/Kconfig"
source "drivers/staging/media/sunxi/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 99218bfc997f..c486298194da 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,10 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_ALLEGRO_DVT) += allegro-dvt/
obj-$(CONFIG_I2C_BCM2048) += bcm2048/
obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
+obj-$(CONFIG_VIDEO_MESON_VDEC) += meson/vdec/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
obj-$(CONFIG_TEGRA_VDE) += tegra-vde/
-obj-$(CONFIG_VIDEO_ROCKCHIP_VPU) += rockchip/vpu/
+obj-$(CONFIG_VIDEO_HANTRO) += hantro/
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
obj-$(CONFIG_SOC_CAMERA) += soc_camera/
diff --git a/drivers/staging/media/allegro-dvt/Kconfig b/drivers/staging/media/allegro-dvt/Kconfig
new file mode 100644
index 000000000000..6b7107d9995c
--- /dev/null
+++ b/drivers/staging/media/allegro-dvt/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_ALLEGRO_DVT
+ tristate "Allegro DVT Video IP Core"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select V4L2_MEM2MEM_DEV
+ select VIDEOBUF2_DMA_CONTIG
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ Support for the encoder video IP core by Allegro DVT. This core is
+ found for example on the Xilinx ZynqMP SoC in the EV family and is
+ called VCU in the reference manual.
+
+ To compile this driver as a module, choose M here: the module
+ will be called allegro.
diff --git a/drivers/staging/media/allegro-dvt/Makefile b/drivers/staging/media/allegro-dvt/Makefile
new file mode 100644
index 000000000000..80817160815c
--- /dev/null
+++ b/drivers/staging/media/allegro-dvt/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+allegro-objs := allegro-core.o nal-h264.o
+
+obj-$(CONFIG_VIDEO_ALLEGRO_DVT) += allegro.o
diff --git a/drivers/staging/media/allegro-dvt/TODO b/drivers/staging/media/allegro-dvt/TODO
new file mode 100644
index 000000000000..99e19be0e45a
--- /dev/null
+++ b/drivers/staging/media/allegro-dvt/TODO
@@ -0,0 +1,4 @@
+TODO:
+
+- This driver is waiting for the stateful encoder spec and corresponding
+ v4l2-compliance tests to be finalized.
diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/staging/media/allegro-dvt/allegro-core.c
new file mode 100644
index 000000000000..f050c7347fd5
--- /dev/null
+++ b/drivers/staging/media/allegro-dvt/allegro-core.c
@@ -0,0 +1,3014 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ *
+ * Allegro DVT video encoder driver
+ */
+
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "nal-h264.h"
+
+/*
+ * Support up to 4k video streams. The hardware actually supports higher
+ * resolutions, which are specified in PG252 June 6, 2018 (H.264/H.265 Video
+ * Codec Unit v1.1) Chapter 3.
+ */
+#define ALLEGRO_WIDTH_MIN 128
+#define ALLEGRO_WIDTH_DEFAULT 1920
+#define ALLEGRO_WIDTH_MAX 3840
+#define ALLEGRO_HEIGHT_MIN 64
+#define ALLEGRO_HEIGHT_DEFAULT 1080
+#define ALLEGRO_HEIGHT_MAX 2160
+
+#define ALLEGRO_GOP_SIZE_DEFAULT 25
+#define ALLEGRO_GOP_SIZE_MAX 1000
+
+/*
+ * MCU Control Registers
+ *
+ * The Zynq UltraScale+ Devices Register Reference documents the registers
+ * with an offset of 0x9000, which equals the size of the SRAM and one page
+ * gap. The driver handles SRAM and registers separately and, therefore, is
+ * oblivious of the offset.
+ */
+#define AL5_MCU_RESET 0x0000
+#define AL5_MCU_RESET_SOFT BIT(0)
+#define AL5_MCU_RESET_REGS BIT(1)
+#define AL5_MCU_RESET_MODE 0x0004
+#define AL5_MCU_RESET_MODE_SLEEP BIT(0)
+#define AL5_MCU_RESET_MODE_HALT BIT(1)
+#define AL5_MCU_STA 0x0008
+#define AL5_MCU_STA_SLEEP BIT(0)
+#define AL5_MCU_WAKEUP 0x000c
+
+#define AL5_ICACHE_ADDR_OFFSET_MSB 0x0010
+#define AL5_ICACHE_ADDR_OFFSET_LSB 0x0014
+#define AL5_DCACHE_ADDR_OFFSET_MSB 0x0018
+#define AL5_DCACHE_ADDR_OFFSET_LSB 0x001c
+
+#define AL5_MCU_INTERRUPT 0x0100
+#define AL5_ITC_CPU_IRQ_MSK 0x0104
+#define AL5_ITC_CPU_IRQ_CLR 0x0108
+#define AL5_ITC_CPU_IRQ_STA 0x010C
+#define AL5_ITC_CPU_IRQ_STA_TRIGGERED BIT(0)
+
+#define AXI_ADDR_OFFSET_IP 0x0208
+
+/*
+ * The MCU accesses the system memory with a 2G offset compared to CPU
+ * physical addresses.
+ */
+#define MCU_CACHE_OFFSET SZ_2G
+
+/*
+ * The driver needs to reserve some space at the beginning of capture buffers,
+ * because it needs to write SPS/PPS NAL units. The encoder writes the actual
+ * frame data after the offset.
+ */
+#define ENCODER_STREAM_OFFSET SZ_64
+
+#define SIZE_MACROBLOCK 16
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+struct allegro_buffer {
+ void *vaddr;
+ dma_addr_t paddr;
+ size_t size;
+ struct list_head head;
+};
+
+struct allegro_channel;
+
+struct allegro_mbox {
+ unsigned int head;
+ unsigned int tail;
+ unsigned int data;
+ size_t size;
+ /* protect mailbox from simultaneous accesses */
+ struct mutex lock;
+};
+
+struct allegro_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device video_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct platform_device *plat_dev;
+
+ /* mutex protecting vb2_queue structure */
+ struct mutex lock;
+
+ struct regmap *regmap;
+ struct regmap *sram;
+
+ struct allegro_buffer firmware;
+ struct allegro_buffer suballocator;
+
+ struct completion init_complete;
+
+ /* The mailbox interface */
+ struct allegro_mbox mbox_command;
+ struct allegro_mbox mbox_status;
+
+ /*
+ * The downstream driver limits the users to 64 users, thus I can use
+ * a bitfield for the user_ids that are in use. See also user_id in
+ * struct allegro_channel.
+ */
+ unsigned long channel_user_ids;
+ struct list_head channels;
+};
+
+static struct regmap_config allegro_regmap_config = {
+ .name = "regmap",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0xfff,
+ .cache_type = REGCACHE_NONE,
+};
+
+static struct regmap_config allegro_sram_config = {
+ .name = "sram",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x7fff,
+ .cache_type = REGCACHE_NONE,
+};
+
+enum allegro_state {
+ ALLEGRO_STATE_ENCODING,
+ ALLEGRO_STATE_DRAIN,
+ ALLEGRO_STATE_WAIT_FOR_BUFFER,
+ ALLEGRO_STATE_STOPPED,
+};
+
+#define fh_to_channel(__fh) container_of(__fh, struct allegro_channel, fh)
+
+struct allegro_channel {
+ struct allegro_dev *dev;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ unsigned int width;
+ unsigned int height;
+ unsigned int stride;
+
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ enum v4l2_xfer_func xfer_func;
+
+ u32 pixelformat;
+ unsigned int sizeimage_raw;
+ unsigned int osequence;
+
+ u32 codec;
+ enum v4l2_mpeg_video_h264_profile profile;
+ enum v4l2_mpeg_video_h264_level level;
+ unsigned int sizeimage_encoded;
+ unsigned int csequence;
+
+ enum v4l2_mpeg_video_bitrate_mode bitrate_mode;
+ unsigned int bitrate;
+ unsigned int bitrate_peak;
+ unsigned int cpb_size;
+ unsigned int gop_size;
+
+ struct v4l2_ctrl *mpeg_video_h264_profile;
+ struct v4l2_ctrl *mpeg_video_h264_level;
+ struct v4l2_ctrl *mpeg_video_bitrate_mode;
+ struct v4l2_ctrl *mpeg_video_bitrate;
+ struct v4l2_ctrl *mpeg_video_bitrate_peak;
+ struct v4l2_ctrl *mpeg_video_cpb_size;
+ struct v4l2_ctrl *mpeg_video_gop_size;
+
+ /* user_id is used to identify the channel during CREATE_CHANNEL */
+ /* not sure, what to set here and if this is actually required */
+ int user_id;
+ /* channel_id is set by the mcu and used by all later commands */
+ int mcu_channel_id;
+
+ struct list_head buffers_reference;
+ struct list_head buffers_intermediate;
+
+ struct list_head list;
+ struct completion completion;
+
+ unsigned int error;
+ enum allegro_state state;
+};
+
+static inline int
+allegro_set_state(struct allegro_channel *channel, enum allegro_state state)
+{
+ channel->state = state;
+
+ return 0;
+}
+
+static inline enum allegro_state
+allegro_get_state(struct allegro_channel *channel)
+{
+ return channel->state;
+}
+
+struct fw_info {
+ unsigned int id;
+ unsigned int id_codec;
+ char *version;
+ unsigned int mailbox_cmd;
+ unsigned int mailbox_status;
+ size_t mailbox_size;
+ size_t suballocator_size;
+};
+
+static const struct fw_info supported_firmware[] = {
+ {
+ .id = 18296,
+ .id_codec = 96272,
+ .version = "v2018.2",
+ .mailbox_cmd = 0x7800,
+ .mailbox_status = 0x7c00,
+ .mailbox_size = 0x400 - 0x8,
+ .suballocator_size = SZ_16M,
+ },
+};
+
+enum mcu_msg_type {
+ MCU_MSG_TYPE_INIT = 0x0000,
+ MCU_MSG_TYPE_CREATE_CHANNEL = 0x0005,
+ MCU_MSG_TYPE_DESTROY_CHANNEL = 0x0006,
+ MCU_MSG_TYPE_ENCODE_FRAME = 0x0007,
+ MCU_MSG_TYPE_PUT_STREAM_BUFFER = 0x0012,
+ MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE = 0x000e,
+ MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE = 0x000f,
+};
+
+static const char *msg_type_name(enum mcu_msg_type type)
+{
+ static char buf[9];
+
+ switch (type) {
+ case MCU_MSG_TYPE_INIT:
+ return "INIT";
+ case MCU_MSG_TYPE_CREATE_CHANNEL:
+ return "CREATE_CHANNEL";
+ case MCU_MSG_TYPE_DESTROY_CHANNEL:
+ return "DESTROY_CHANNEL";
+ case MCU_MSG_TYPE_ENCODE_FRAME:
+ return "ENCODE_FRAME";
+ case MCU_MSG_TYPE_PUT_STREAM_BUFFER:
+ return "PUT_STREAM_BUFFER";
+ case MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE:
+ return "PUSH_BUFFER_INTERMEDIATE";
+ case MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE:
+ return "PUSH_BUFFER_REFERENCE";
+ default:
+ snprintf(buf, sizeof(buf), "(0x%04x)", type);
+ return buf;
+ }
+}
+
+struct mcu_msg_header {
+ u16 length; /* length of the body in bytes */
+ u16 type;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_init_request {
+ struct mcu_msg_header header;
+ u32 reserved0; /* maybe a unused channel id */
+ u32 suballoc_dma;
+ u32 suballoc_size;
+ s32 l2_cache[3];
+} __attribute__ ((__packed__));
+
+struct mcu_msg_init_response {
+ struct mcu_msg_header header;
+ u32 reserved0;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_create_channel {
+ struct mcu_msg_header header;
+ u32 user_id;
+ u16 width;
+ u16 height;
+ u32 format;
+ u32 colorspace;
+ u32 src_mode;
+ u8 profile;
+ u16 constraint_set_flags;
+ s8 codec;
+ u16 level;
+ u16 tier;
+ u32 sps_param;
+ u32 pps_param;
+
+ u32 enc_option;
+#define AL_OPT_WPP BIT(0)
+#define AL_OPT_TILE BIT(1)
+#define AL_OPT_LF BIT(2)
+#define AL_OPT_LF_X_SLICE BIT(3)
+#define AL_OPT_LF_X_TILE BIT(4)
+#define AL_OPT_SCL_LST BIT(5)
+#define AL_OPT_CONST_INTRA_PRED BIT(6)
+#define AL_OPT_QP_TAB_RELATIVE BIT(7)
+#define AL_OPT_FIX_PREDICTOR BIT(8)
+#define AL_OPT_CUSTOM_LDA BIT(9)
+#define AL_OPT_ENABLE_AUTO_QP BIT(10)
+#define AL_OPT_ADAPT_AUTO_QP BIT(11)
+#define AL_OPT_TRANSFO_SKIP BIT(13)
+#define AL_OPT_FORCE_REC BIT(15)
+#define AL_OPT_FORCE_MV_OUT BIT(16)
+#define AL_OPT_FORCE_MV_CLIP BIT(17)
+#define AL_OPT_LOWLAT_SYNC BIT(18)
+#define AL_OPT_LOWLAT_INT BIT(19)
+#define AL_OPT_RDO_COST_MODE BIT(20)
+
+ s8 beta_offset;
+ s8 tc_offset;
+ u16 reserved10;
+ u32 unknown11;
+ u32 unknown12;
+ u16 num_slices;
+ u16 prefetch_auto;
+ u32 prefetch_mem_offset;
+ u32 prefetch_mem_size;
+ u16 clip_hrz_range;
+ u16 clip_vrt_range;
+ u16 me_range[4];
+ u8 max_cu_size;
+ u8 min_cu_size;
+ u8 max_tu_size;
+ u8 min_tu_size;
+ u8 max_transfo_depth_inter;
+ u8 max_transfo_depth_intra;
+ u16 reserved20;
+ u32 entropy_mode;
+ u32 wp_mode;
+
+ /* rate control param */
+ u32 rate_control_mode;
+ u32 initial_rem_delay;
+ u32 cpb_size;
+ u16 framerate;
+ u16 clk_ratio;
+ u32 target_bitrate;
+ u32 max_bitrate;
+ u16 initial_qp;
+ u16 min_qp;
+ u16 max_qp;
+ s16 ip_delta;
+ s16 pb_delta;
+ u16 golden_ref;
+ u16 golden_delta;
+ u16 golden_ref_frequency;
+ u32 rate_control_option;
+
+ /* gop param */
+ u32 gop_ctrl_mode;
+ u32 freq_ird;
+ u32 freq_lt;
+ u32 gdr_mode;
+ u32 gop_length;
+ u32 unknown39;
+
+ u32 subframe_latency;
+ u32 lda_control_mode;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_create_channel_response {
+ struct mcu_msg_header header;
+ u32 channel_id;
+ u32 user_id;
+ u32 options;
+ u32 num_core;
+ u32 pps_param;
+ u32 int_buffers_count;
+ u32 int_buffers_size;
+ u32 rec_buffers_count;
+ u32 rec_buffers_size;
+ u32 reserved;
+ u32 error_code;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_destroy_channel {
+ struct mcu_msg_header header;
+ u32 channel_id;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_destroy_channel_response {
+ struct mcu_msg_header header;
+ u32 channel_id;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_push_buffers_internal_buffer {
+ u32 dma_addr;
+ u32 mcu_addr;
+ u32 size;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_push_buffers_internal {
+ struct mcu_msg_header header;
+ u32 channel_id;
+ struct mcu_msg_push_buffers_internal_buffer buffer[0];
+} __attribute__ ((__packed__));
+
+struct mcu_msg_put_stream_buffer {
+ struct mcu_msg_header header;
+ u32 channel_id;
+ u32 dma_addr;
+ u32 mcu_addr;
+ u32 size;
+ u32 offset;
+ u64 stream_id;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_encode_frame {
+ struct mcu_msg_header header;
+ u32 channel_id;
+ u32 reserved;
+
+ u32 encoding_options;
+#define AL_OPT_USE_QP_TABLE BIT(0)
+#define AL_OPT_FORCE_LOAD BIT(1)
+#define AL_OPT_USE_L2 BIT(2)
+#define AL_OPT_DISABLE_INTRA BIT(3)
+#define AL_OPT_DEPENDENT_SLICES BIT(4)
+
+ s16 pps_qp;
+ u16 padding;
+ u64 user_param;
+ u64 src_handle;
+
+ u32 request_options;
+#define AL_OPT_SCENE_CHANGE BIT(0)
+#define AL_OPT_RESTART_GOP BIT(1)
+#define AL_OPT_USE_LONG_TERM BIT(2)
+#define AL_OPT_UPDATE_PARAMS BIT(3)
+
+ /* u32 scene_change_delay (optional) */
+ /* rate control param (optional) */
+ /* gop param (optional) */
+ u32 src_y;
+ u32 src_uv;
+ u32 stride;
+ u32 ep2;
+ u64 ep2_v;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_encode_frame_response {
+ struct mcu_msg_header header;
+ u32 channel_id;
+ u64 stream_id; /* see mcu_msg_put_stream_buffer */
+ u64 user_param; /* see mcu_msg_encode_frame */
+ u64 src_handle; /* see mcu_msg_encode_frame */
+ u16 skip;
+ u16 is_ref;
+ u32 initial_removal_delay;
+ u32 dpb_output_delay;
+ u32 size;
+ u32 frame_tag_size;
+ s32 stuffing;
+ s32 filler;
+ u16 num_column;
+ u16 num_row;
+ u16 qp;
+ u8 num_ref_idx_l0;
+ u8 num_ref_idx_l1;
+ u32 partition_table_offset;
+ s32 partition_table_size;
+ u32 sum_complex;
+ s32 tile_width[4];
+ s32 tile_height[22];
+ u32 error_code;
+
+ u32 slice_type;
+#define AL_ENC_SLICE_TYPE_B 0
+#define AL_ENC_SLICE_TYPE_P 1
+#define AL_ENC_SLICE_TYPE_I 2
+
+ u32 pic_struct;
+ u8 is_idr;
+ u8 is_first_slice;
+ u8 is_last_slice;
+ u8 reserved;
+ u16 pps_qp;
+ u16 reserved1;
+ u32 reserved2;
+} __attribute__ ((__packed__));
+
+union mcu_msg_response {
+ struct mcu_msg_header header;
+ struct mcu_msg_init_response init;
+ struct mcu_msg_create_channel_response create_channel;
+ struct mcu_msg_destroy_channel_response destroy_channel;
+ struct mcu_msg_encode_frame_response encode_frame;
+};
+
+/* Helper functions for channel and user operations */
+
+static unsigned long allegro_next_user_id(struct allegro_dev *dev)
+{
+ if (dev->channel_user_ids == ~0UL)
+ return -EBUSY;
+
+ return ffz(dev->channel_user_ids);
+}
+
+static struct allegro_channel *
+allegro_find_channel_by_user_id(struct allegro_dev *dev,
+ unsigned int user_id)
+{
+ struct allegro_channel *channel;
+
+ list_for_each_entry(channel, &dev->channels, list) {
+ if (channel->user_id == user_id)
+ return channel;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct allegro_channel *
+allegro_find_channel_by_channel_id(struct allegro_dev *dev,
+ unsigned int channel_id)
+{
+ struct allegro_channel *channel;
+
+ list_for_each_entry(channel, &dev->channels, list) {
+ if (channel->mcu_channel_id == channel_id)
+ return channel;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static inline bool channel_exists(struct allegro_channel *channel)
+{
+ return channel->mcu_channel_id != -1;
+}
+
+static unsigned int estimate_stream_size(unsigned int width,
+ unsigned int height)
+{
+ unsigned int offset = ENCODER_STREAM_OFFSET;
+ unsigned int num_blocks = DIV_ROUND_UP(width, SIZE_MACROBLOCK) *
+ DIV_ROUND_UP(height, SIZE_MACROBLOCK);
+ unsigned int pcm_size = SZ_256;
+ unsigned int partition_table = SZ_256;
+
+ return round_up(offset + num_blocks * pcm_size + partition_table, 32);
+}
+
+static enum v4l2_mpeg_video_h264_level
+select_minimum_h264_level(unsigned int width, unsigned int height)
+{
+ unsigned int pic_width_in_mb = DIV_ROUND_UP(width, SIZE_MACROBLOCK);
+ unsigned int frame_height_in_mb = DIV_ROUND_UP(height, SIZE_MACROBLOCK);
+ unsigned int frame_size_in_mb = pic_width_in_mb * frame_height_in_mb;
+ enum v4l2_mpeg_video_h264_level level = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
+
+ /*
+ * The level limits are specified in Rec. ITU-T H.264 Annex A.3.1 and
+ * also specify limits regarding bit rate and CBP size. Only approximate
+ * the levels using the frame size.
+ *
+ * Level 5.1 allows up to 4k video resolution.
+ */
+ if (frame_size_in_mb <= 99)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_1_0;
+ else if (frame_size_in_mb <= 396)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_1_1;
+ else if (frame_size_in_mb <= 792)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_2_1;
+ else if (frame_size_in_mb <= 1620)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_2_2;
+ else if (frame_size_in_mb <= 3600)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_3_1;
+ else if (frame_size_in_mb <= 5120)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_3_2;
+ else if (frame_size_in_mb <= 8192)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
+ else if (frame_size_in_mb <= 8704)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_4_2;
+ else if (frame_size_in_mb <= 22080)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_5_0;
+ else
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_5_1;
+
+ return level;
+}
+
+static unsigned int maximum_bitrate(enum v4l2_mpeg_video_h264_level level)
+{
+ switch (level) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return 64000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ return 128000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return 192000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return 384000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return 768000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return 2000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return 4000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return 4000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return 10000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return 14000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return 20000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return 20000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return 50000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return 50000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ return 135000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ default:
+ return 240000000;
+ }
+}
+
+static unsigned int maximum_cpb_size(enum v4l2_mpeg_video_h264_level level)
+{
+ switch (level) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return 175;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ return 350;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return 500;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return 1000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return 2000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return 2000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return 4000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return 4000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return 10000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return 14000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return 20000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return 25000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return 62500;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return 62500;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ return 135000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ default:
+ return 240000;
+ }
+}
+
+static const struct fw_info *
+allegro_get_firmware_info(struct allegro_dev *dev,
+ const struct firmware *fw,
+ const struct firmware *fw_codec)
+{
+ int i;
+ unsigned int id = fw->size;
+ unsigned int id_codec = fw_codec->size;
+
+ for (i = 0; i < ARRAY_SIZE(supported_firmware); i++)
+ if (supported_firmware[i].id == id &&
+ supported_firmware[i].id_codec == id_codec)
+ return &supported_firmware[i];
+
+ return NULL;
+}
+
+/*
+ * Buffers that are used internally by the MCU.
+ */
+
+static int allegro_alloc_buffer(struct allegro_dev *dev,
+ struct allegro_buffer *buffer, size_t size)
+{
+ buffer->vaddr = dma_alloc_coherent(&dev->plat_dev->dev, size,
+ &buffer->paddr, GFP_KERNEL);
+ if (!buffer->vaddr)
+ return -ENOMEM;
+ buffer->size = size;
+
+ return 0;
+}
+
+static void allegro_free_buffer(struct allegro_dev *dev,
+ struct allegro_buffer *buffer)
+{
+ if (buffer->vaddr) {
+ dma_free_coherent(&dev->plat_dev->dev, buffer->size,
+ buffer->vaddr, buffer->paddr);
+ buffer->vaddr = NULL;
+ buffer->size = 0;
+ }
+}
+
+/*
+ * Mailbox interface to send messages to the MCU.
+ */
+
+static int allegro_mbox_init(struct allegro_dev *dev,
+ struct allegro_mbox *mbox,
+ unsigned int base, size_t size)
+{
+ if (!mbox)
+ return -EINVAL;
+
+ mbox->head = base;
+ mbox->tail = base + 0x4;
+ mbox->data = base + 0x8;
+ mbox->size = size;
+ mutex_init(&mbox->lock);
+
+ regmap_write(dev->sram, mbox->head, 0);
+ regmap_write(dev->sram, mbox->tail, 0);
+
+ return 0;
+}
+
+static int allegro_mbox_write(struct allegro_dev *dev,
+ struct allegro_mbox *mbox, void *src, size_t size)
+{
+ struct mcu_msg_header *header = src;
+ unsigned int tail;
+ size_t size_no_wrap;
+ int err = 0;
+
+ if (!src)
+ return -EINVAL;
+
+ if (size > mbox->size) {
+ v4l2_err(&dev->v4l2_dev,
+ "message (%zu bytes) to large for mailbox (%zu bytes)\n",
+ size, mbox->size);
+ return -EINVAL;
+ }
+
+ if (header->length != size - sizeof(*header)) {
+ v4l2_err(&dev->v4l2_dev,
+ "invalid message length: %u bytes (expected %zu bytes)\n",
+ header->length, size - sizeof(*header));
+ return -EINVAL;
+ }
+
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "write command message: type %s, body length %d\n",
+ msg_type_name(header->type), header->length);
+
+ mutex_lock(&mbox->lock);
+ regmap_read(dev->sram, mbox->tail, &tail);
+ if (tail > mbox->size) {
+ v4l2_err(&dev->v4l2_dev,
+ "invalid tail (0x%x): must be smaller than mailbox size (0x%zx)\n",
+ tail, mbox->size);
+ err = -EIO;
+ goto out;
+ }
+ size_no_wrap = min(size, mbox->size - (size_t)tail);
+ regmap_bulk_write(dev->sram, mbox->data + tail, src, size_no_wrap / 4);
+ regmap_bulk_write(dev->sram, mbox->data,
+ src + size_no_wrap, (size - size_no_wrap) / 4);
+ regmap_write(dev->sram, mbox->tail, (tail + size) % mbox->size);
+
+out:
+ mutex_unlock(&mbox->lock);
+
+ return err;
+}
+
+static ssize_t allegro_mbox_read(struct allegro_dev *dev,
+ struct allegro_mbox *mbox,
+ void *dst, size_t nbyte)
+{
+ struct mcu_msg_header *header;
+ unsigned int head;
+ ssize_t size;
+ size_t body_no_wrap;
+
+ regmap_read(dev->sram, mbox->head, &head);
+ if (head > mbox->size) {
+ v4l2_err(&dev->v4l2_dev,
+ "invalid head (0x%x): must be smaller than mailbox size (0x%zx)\n",
+ head, mbox->size);
+ return -EIO;
+ }
+
+ /* Assume that the header does not wrap. */
+ regmap_bulk_read(dev->sram, mbox->data + head,
+ dst, sizeof(*header) / 4);
+ header = dst;
+ size = header->length + sizeof(*header);
+ if (size > mbox->size || size & 0x3) {
+ v4l2_err(&dev->v4l2_dev,
+ "invalid message length: %zu bytes (maximum %zu bytes)\n",
+ header->length + sizeof(*header), mbox->size);
+ return -EIO;
+ }
+ if (size > nbyte) {
+ v4l2_err(&dev->v4l2_dev,
+ "destination buffer too small: %zu bytes (need %zu bytes)\n",
+ nbyte, size);
+ return -EINVAL;
+ }
+
+ /*
+ * The message might wrap within the mailbox. If the message does not
+ * wrap, the first read will read the entire message, otherwise the
+ * first read will read message until the end of the mailbox and the
+ * second read will read the remaining bytes from the beginning of the
+ * mailbox.
+ *
+ * Skip the header, as was already read to get the size of the body.
+ */
+ body_no_wrap = min((size_t)header->length,
+ (size_t)(mbox->size - (head + sizeof(*header))));
+ regmap_bulk_read(dev->sram, mbox->data + head + sizeof(*header),
+ dst + sizeof(*header), body_no_wrap / 4);
+ regmap_bulk_read(dev->sram, mbox->data,
+ dst + sizeof(*header) + body_no_wrap,
+ (header->length - body_no_wrap) / 4);
+
+ regmap_write(dev->sram, mbox->head, (head + size) % mbox->size);
+
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "read status message: type %s, body length %d\n",
+ msg_type_name(header->type), header->length);
+
+ return size;
+}
+
+static void allegro_mcu_interrupt(struct allegro_dev *dev)
+{
+ regmap_write(dev->regmap, AL5_MCU_INTERRUPT, BIT(0));
+}
+
+static void allegro_mcu_send_init(struct allegro_dev *dev,
+ dma_addr_t suballoc_dma, size_t suballoc_size)
+{
+ struct mcu_msg_init_request msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.header.type = MCU_MSG_TYPE_INIT;
+ msg.header.length = sizeof(msg) - sizeof(msg.header);
+
+ msg.suballoc_dma = lower_32_bits(suballoc_dma) | MCU_CACHE_OFFSET;
+ msg.suballoc_size = suballoc_size;
+
+ /* disable L2 cache */
+ msg.l2_cache[0] = -1;
+ msg.l2_cache[1] = -1;
+ msg.l2_cache[2] = -1;
+
+ allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg));
+ allegro_mcu_interrupt(dev);
+}
+
+static u32 v4l2_pixelformat_to_mcu_format(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ /* AL_420_8BITS: 0x100 -> NV12, 0x88 -> 8 bit */
+ return 0x100 | 0x88;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u32 v4l2_colorspace_to_mcu_colorspace(enum v4l2_colorspace colorspace)
+{
+ switch (colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ return 2;
+ case V4L2_COLORSPACE_SMPTE170M:
+ return 3;
+ case V4L2_COLORSPACE_SMPTE240M:
+ return 4;
+ case V4L2_COLORSPACE_SRGB:
+ return 7;
+ default:
+ /* UNKNOWN */
+ return 0;
+ }
+}
+
+static s8 v4l2_pixelformat_to_mcu_codec(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_H264:
+ default:
+ return 1;
+ }
+}
+
+static u8 v4l2_profile_to_mcu_profile(enum v4l2_mpeg_video_h264_profile profile)
+{
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ default:
+ return 66;
+ }
+}
+
+static u16 v4l2_level_to_mcu_level(enum v4l2_mpeg_video_h264_level level)
+{
+ switch (level) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return 10;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return 11;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return 12;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return 13;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return 20;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return 21;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return 22;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return 30;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return 31;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return 32;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return 40;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return 41;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return 42;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ return 50;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ default:
+ return 51;
+ }
+}
+
+static u32
+v4l2_bitrate_mode_to_mcu_mode(enum v4l2_mpeg_video_bitrate_mode mode)
+{
+ switch (mode) {
+ case V4L2_MPEG_VIDEO_BITRATE_MODE_VBR:
+ return 2;
+ case V4L2_MPEG_VIDEO_BITRATE_MODE_CBR:
+ default:
+ return 1;
+ }
+}
+
+static int allegro_mcu_send_create_channel(struct allegro_dev *dev,
+ struct allegro_channel *channel)
+{
+ struct mcu_msg_create_channel msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.header.type = MCU_MSG_TYPE_CREATE_CHANNEL;
+ msg.header.length = sizeof(msg) - sizeof(msg.header);
+
+ msg.user_id = channel->user_id;
+ msg.width = channel->width;
+ msg.height = channel->height;
+ msg.format = v4l2_pixelformat_to_mcu_format(channel->pixelformat);
+ msg.colorspace = v4l2_colorspace_to_mcu_colorspace(channel->colorspace);
+ msg.src_mode = 0x0;
+ msg.profile = v4l2_profile_to_mcu_profile(channel->profile);
+ msg.constraint_set_flags = BIT(1);
+ msg.codec = v4l2_pixelformat_to_mcu_codec(channel->codec);
+ msg.level = v4l2_level_to_mcu_level(channel->level);
+ msg.tier = 0;
+ msg.sps_param = BIT(20) | 0x4a;
+ msg.pps_param = BIT(2);
+ msg.enc_option = AL_OPT_RDO_COST_MODE | AL_OPT_LF_X_TILE |
+ AL_OPT_LF_X_SLICE | AL_OPT_LF;
+ msg.beta_offset = -1;
+ msg.tc_offset = -1;
+ msg.num_slices = 1;
+ msg.me_range[0] = 8;
+ msg.me_range[1] = 8;
+ msg.me_range[2] = 16;
+ msg.me_range[3] = 16;
+ msg.max_cu_size = ilog2(SIZE_MACROBLOCK);
+ msg.min_cu_size = ilog2(8);
+ msg.max_tu_size = 2;
+ msg.min_tu_size = 2;
+ msg.max_transfo_depth_intra = 1;
+ msg.max_transfo_depth_inter = 1;
+
+ msg.rate_control_mode =
+ v4l2_bitrate_mode_to_mcu_mode(channel->bitrate_mode);
+ /* Shall be ]0;cpb_size in 90 kHz units]. Use maximum value. */
+ msg.initial_rem_delay =
+ ((channel->cpb_size * 1000) / channel->bitrate_peak) * 90000;
+ /* Encoder expects cpb_size in units of a 90 kHz clock. */
+ msg.cpb_size =
+ ((channel->cpb_size * 1000) / channel->bitrate_peak) * 90000;
+ msg.framerate = 25;
+ msg.clk_ratio = 1000;
+ msg.target_bitrate = channel->bitrate;
+ msg.max_bitrate = channel->bitrate_peak;
+ msg.initial_qp = 25;
+ msg.min_qp = 10;
+ msg.max_qp = 51;
+ msg.ip_delta = -1;
+ msg.pb_delta = -1;
+ msg.golden_ref = 0;
+ msg.golden_delta = 2;
+ msg.golden_ref_frequency = 10;
+ msg.rate_control_option = 0x00000000;
+
+ msg.gop_ctrl_mode = 0x00000000;
+ msg.freq_ird = 0x7fffffff;
+ msg.freq_lt = 0;
+ msg.gdr_mode = 0x00000000;
+ msg.gop_length = channel->gop_size;
+ msg.subframe_latency = 0x00000000;
+ msg.lda_control_mode = 0x700d0000;
+
+ allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg));
+ allegro_mcu_interrupt(dev);
+
+ return 0;
+}
+
+static int allegro_mcu_send_destroy_channel(struct allegro_dev *dev,
+ struct allegro_channel *channel)
+{
+ struct mcu_msg_destroy_channel msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.header.type = MCU_MSG_TYPE_DESTROY_CHANNEL;
+ msg.header.length = sizeof(msg) - sizeof(msg.header);
+
+ msg.channel_id = channel->mcu_channel_id;
+
+ allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg));
+ allegro_mcu_interrupt(dev);
+
+ return 0;
+}
+
+static int allegro_mcu_send_put_stream_buffer(struct allegro_dev *dev,
+ struct allegro_channel *channel,
+ dma_addr_t paddr,
+ unsigned long size)
+{
+ struct mcu_msg_put_stream_buffer msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.header.type = MCU_MSG_TYPE_PUT_STREAM_BUFFER;
+ msg.header.length = sizeof(msg) - sizeof(msg.header);
+
+ msg.channel_id = channel->mcu_channel_id;
+ msg.dma_addr = paddr;
+ msg.mcu_addr = paddr | MCU_CACHE_OFFSET;
+ msg.size = size;
+ msg.offset = ENCODER_STREAM_OFFSET;
+ msg.stream_id = 0; /* copied to mcu_msg_encode_frame_response */
+
+ allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg));
+ allegro_mcu_interrupt(dev);
+
+ return 0;
+}
+
+static int allegro_mcu_send_encode_frame(struct allegro_dev *dev,
+ struct allegro_channel *channel,
+ dma_addr_t src_y, dma_addr_t src_uv)
+{
+ struct mcu_msg_encode_frame msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.header.type = MCU_MSG_TYPE_ENCODE_FRAME;
+ msg.header.length = sizeof(msg) - sizeof(msg.header);
+
+ msg.channel_id = channel->mcu_channel_id;
+ msg.encoding_options = AL_OPT_FORCE_LOAD;
+ msg.pps_qp = 26; /* qp are relative to 26 */
+ msg.user_param = 0; /* copied to mcu_msg_encode_frame_response */
+ msg.src_handle = 0; /* copied to mcu_msg_encode_frame_response */
+ msg.src_y = src_y;
+ msg.src_uv = src_uv;
+ msg.stride = channel->stride;
+ msg.ep2 = 0x0;
+ msg.ep2_v = msg.ep2 | MCU_CACHE_OFFSET;
+
+ allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg));
+ allegro_mcu_interrupt(dev);
+
+ return 0;
+}
+
+static int allegro_mcu_wait_for_init_timeout(struct allegro_dev *dev,
+ unsigned long timeout_ms)
+{
+ unsigned long tmo;
+
+ tmo = wait_for_completion_timeout(&dev->init_complete,
+ msecs_to_jiffies(timeout_ms));
+ if (tmo == 0)
+ return -ETIMEDOUT;
+
+ reinit_completion(&dev->init_complete);
+ return 0;
+}
+
+static int allegro_mcu_push_buffer_internal(struct allegro_channel *channel,
+ enum mcu_msg_type type)
+{
+ struct allegro_dev *dev = channel->dev;
+ struct mcu_msg_push_buffers_internal *msg;
+ struct mcu_msg_push_buffers_internal_buffer *buffer;
+ unsigned int num_buffers = 0;
+ size_t size;
+ struct allegro_buffer *al_buffer;
+ struct list_head *list;
+ int err;
+
+ switch (type) {
+ case MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE:
+ list = &channel->buffers_reference;
+ break;
+ case MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE:
+ list = &channel->buffers_intermediate;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ list_for_each_entry(al_buffer, list, head)
+ num_buffers++;
+ size = struct_size(msg, buffer, num_buffers);
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->header.length = size - sizeof(msg->header);
+ msg->header.type = type;
+ msg->channel_id = channel->mcu_channel_id;
+
+ buffer = msg->buffer;
+ list_for_each_entry(al_buffer, list, head) {
+ buffer->dma_addr = lower_32_bits(al_buffer->paddr);
+ buffer->mcu_addr =
+ lower_32_bits(al_buffer->paddr) | MCU_CACHE_OFFSET;
+ buffer->size = al_buffer->size;
+ buffer++;
+ }
+
+ err = allegro_mbox_write(dev, &dev->mbox_command, msg, size);
+ if (err)
+ goto out;
+ allegro_mcu_interrupt(dev);
+
+out:
+ kfree(msg);
+ return err;
+}
+
+static int allegro_mcu_push_buffer_intermediate(struct allegro_channel *channel)
+{
+ enum mcu_msg_type type = MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE;
+
+ return allegro_mcu_push_buffer_internal(channel, type);
+}
+
+static int allegro_mcu_push_buffer_reference(struct allegro_channel *channel)
+{
+ enum mcu_msg_type type = MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE;
+
+ return allegro_mcu_push_buffer_internal(channel, type);
+}
+
+static int allocate_buffers_internal(struct allegro_channel *channel,
+ struct list_head *list,
+ size_t n, size_t size)
+{
+ struct allegro_dev *dev = channel->dev;
+ unsigned int i;
+ int err;
+ struct allegro_buffer *buffer, *tmp;
+
+ for (i = 0; i < n; i++) {
+ buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ err = -ENOMEM;
+ goto err;
+ }
+ INIT_LIST_HEAD(&buffer->head);
+
+ err = allegro_alloc_buffer(dev, buffer, size);
+ if (err)
+ goto err;
+ list_add(&buffer->head, list);
+ }
+
+ return 0;
+
+err:
+ list_for_each_entry_safe(buffer, tmp, list, head) {
+ list_del(&buffer->head);
+ allegro_free_buffer(dev, buffer);
+ kfree(buffer);
+ }
+ return err;
+}
+
+static void destroy_buffers_internal(struct allegro_channel *channel,
+ struct list_head *list)
+{
+ struct allegro_dev *dev = channel->dev;
+ struct allegro_buffer *buffer, *tmp;
+
+ list_for_each_entry_safe(buffer, tmp, list, head) {
+ list_del(&buffer->head);
+ allegro_free_buffer(dev, buffer);
+ kfree(buffer);
+ }
+}
+
+static void destroy_reference_buffers(struct allegro_channel *channel)
+{
+ return destroy_buffers_internal(channel, &channel->buffers_reference);
+}
+
+static void destroy_intermediate_buffers(struct allegro_channel *channel)
+{
+ return destroy_buffers_internal(channel,
+ &channel->buffers_intermediate);
+}
+
+static int allocate_intermediate_buffers(struct allegro_channel *channel,
+ size_t n, size_t size)
+{
+ return allocate_buffers_internal(channel,
+ &channel->buffers_intermediate,
+ n, size);
+}
+
+static int allocate_reference_buffers(struct allegro_channel *channel,
+ size_t n, size_t size)
+{
+ return allocate_buffers_internal(channel,
+ &channel->buffers_reference,
+ n, PAGE_ALIGN(size));
+}
+
+static ssize_t allegro_h264_write_sps(struct allegro_channel *channel,
+ void *dest, size_t n)
+{
+ struct allegro_dev *dev = channel->dev;
+ struct nal_h264_sps *sps;
+ ssize_t size;
+ unsigned int size_mb = SIZE_MACROBLOCK;
+ /* Calculation of crop units in Rec. ITU-T H.264 (04/2017) p. 76 */
+ unsigned int crop_unit_x = 2;
+ unsigned int crop_unit_y = 2;
+
+ sps = kzalloc(sizeof(*sps), GFP_KERNEL);
+ if (!sps)
+ return -ENOMEM;
+
+ sps->profile_idc = nal_h264_profile_from_v4l2(channel->profile);
+ sps->constraint_set0_flag = 0;
+ sps->constraint_set1_flag = 1;
+ sps->constraint_set2_flag = 0;
+ sps->constraint_set3_flag = 0;
+ sps->constraint_set4_flag = 0;
+ sps->constraint_set5_flag = 0;
+ sps->level_idc = nal_h264_level_from_v4l2(channel->level);
+ sps->seq_parameter_set_id = 0;
+ sps->log2_max_frame_num_minus4 = 0;
+ sps->pic_order_cnt_type = 0;
+ sps->log2_max_pic_order_cnt_lsb_minus4 = 6;
+ sps->max_num_ref_frames = 3;
+ sps->gaps_in_frame_num_value_allowed_flag = 0;
+ sps->pic_width_in_mbs_minus1 =
+ DIV_ROUND_UP(channel->width, size_mb) - 1;
+ sps->pic_height_in_map_units_minus1 =
+ DIV_ROUND_UP(channel->height, size_mb) - 1;
+ sps->frame_mbs_only_flag = 1;
+ sps->mb_adaptive_frame_field_flag = 0;
+ sps->direct_8x8_inference_flag = 1;
+ sps->frame_cropping_flag =
+ (channel->width % size_mb) || (channel->height % size_mb);
+ if (sps->frame_cropping_flag) {
+ sps->crop_left = 0;
+ sps->crop_right = (round_up(channel->width, size_mb) - channel->width) / crop_unit_x;
+ sps->crop_top = 0;
+ sps->crop_bottom = (round_up(channel->height, size_mb) - channel->height) / crop_unit_y;
+ }
+ sps->vui_parameters_present_flag = 1;
+ sps->vui.aspect_ratio_info_present_flag = 0;
+ sps->vui.overscan_info_present_flag = 0;
+ sps->vui.video_signal_type_present_flag = 1;
+ sps->vui.video_format = 1;
+ sps->vui.video_full_range_flag = 0;
+ sps->vui.colour_description_present_flag = 1;
+ sps->vui.colour_primaries = 5;
+ sps->vui.transfer_characteristics = 5;
+ sps->vui.matrix_coefficients = 5;
+ sps->vui.chroma_loc_info_present_flag = 1;
+ sps->vui.chroma_sample_loc_type_top_field = 0;
+ sps->vui.chroma_sample_loc_type_bottom_field = 0;
+ sps->vui.timing_info_present_flag = 1;
+ sps->vui.num_units_in_tick = 1;
+ sps->vui.time_scale = 50;
+ sps->vui.fixed_frame_rate_flag = 1;
+ sps->vui.nal_hrd_parameters_present_flag = 0;
+ sps->vui.vcl_hrd_parameters_present_flag = 1;
+ sps->vui.vcl_hrd_parameters.cpb_cnt_minus1 = 0;
+ sps->vui.vcl_hrd_parameters.bit_rate_scale = 0;
+ sps->vui.vcl_hrd_parameters.cpb_size_scale = 1;
+ /* See Rec. ITU-T H.264 (04/2017) p. 410 E-53 */
+ sps->vui.vcl_hrd_parameters.bit_rate_value_minus1[0] =
+ channel->bitrate_peak / (1 << (6 + sps->vui.vcl_hrd_parameters.bit_rate_scale)) - 1;
+ /* See Rec. ITU-T H.264 (04/2017) p. 410 E-54 */
+ sps->vui.vcl_hrd_parameters.cpb_size_value_minus1[0] =
+ (channel->cpb_size * 1000) / (1 << (4 + sps->vui.vcl_hrd_parameters.cpb_size_scale)) - 1;
+ sps->vui.vcl_hrd_parameters.cbr_flag[0] = 1;
+ sps->vui.vcl_hrd_parameters.initial_cpb_removal_delay_length_minus1 = 31;
+ sps->vui.vcl_hrd_parameters.cpb_removal_delay_length_minus1 = 31;
+ sps->vui.vcl_hrd_parameters.dpb_output_delay_length_minus1 = 31;
+ sps->vui.vcl_hrd_parameters.time_offset_length = 0;
+ sps->vui.low_delay_hrd_flag = 0;
+ sps->vui.pic_struct_present_flag = 1;
+ sps->vui.bitstream_restriction_flag = 0;
+
+ size = nal_h264_write_sps(&dev->plat_dev->dev, dest, n, sps);
+
+ kfree(sps);
+
+ return size;
+}
+
+static ssize_t allegro_h264_write_pps(struct allegro_channel *channel,
+ void *dest, size_t n)
+{
+ struct allegro_dev *dev = channel->dev;
+ struct nal_h264_pps *pps;
+ ssize_t size;
+
+ pps = kzalloc(sizeof(*pps), GFP_KERNEL);
+ if (!pps)
+ return -ENOMEM;
+
+ pps->pic_parameter_set_id = 0;
+ pps->seq_parameter_set_id = 0;
+ pps->entropy_coding_mode_flag = 0;
+ pps->bottom_field_pic_order_in_frame_present_flag = 0;
+ pps->num_slice_groups_minus1 = 0;
+ pps->num_ref_idx_l0_default_active_minus1 = 2;
+ pps->num_ref_idx_l1_default_active_minus1 = 2;
+ pps->weighted_pred_flag = 0;
+ pps->weighted_bipred_idc = 0;
+ pps->pic_init_qp_minus26 = 0;
+ pps->pic_init_qs_minus26 = 0;
+ pps->chroma_qp_index_offset = 0;
+ pps->deblocking_filter_control_present_flag = 1;
+ pps->constrained_intra_pred_flag = 0;
+ pps->redundant_pic_cnt_present_flag = 0;
+ pps->transform_8x8_mode_flag = 0;
+ pps->pic_scaling_matrix_present_flag = 0;
+ pps->second_chroma_qp_index_offset = 0;
+
+ size = nal_h264_write_pps(&dev->plat_dev->dev, dest, n, pps);
+
+ kfree(pps);
+
+ return size;
+}
+
+static bool allegro_channel_is_at_eos(struct allegro_channel *channel)
+{
+ bool is_at_eos = false;
+
+ switch (allegro_get_state(channel)) {
+ case ALLEGRO_STATE_STOPPED:
+ is_at_eos = true;
+ break;
+ case ALLEGRO_STATE_DRAIN:
+ case ALLEGRO_STATE_WAIT_FOR_BUFFER:
+ if (v4l2_m2m_num_src_bufs_ready(channel->fh.m2m_ctx) == 0)
+ is_at_eos = true;
+ break;
+ default:
+ break;
+ }
+
+ return is_at_eos;
+}
+
+static void allegro_channel_buf_done(struct allegro_channel *channel,
+ struct vb2_v4l2_buffer *buf,
+ enum vb2_buffer_state state)
+{
+ const struct v4l2_event eos_event = {
+ .type = V4L2_EVENT_EOS
+ };
+
+ if (allegro_channel_is_at_eos(channel)) {
+ buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_event_queue_fh(&channel->fh, &eos_event);
+
+ allegro_set_state(channel, ALLEGRO_STATE_STOPPED);
+ }
+
+ v4l2_m2m_buf_done(buf, state);
+}
+
+static void allegro_channel_finish_frame(struct allegro_channel *channel,
+ struct mcu_msg_encode_frame_response *msg)
+{
+ struct allegro_dev *dev = channel->dev;
+ struct vb2_v4l2_buffer *src_buf;
+ struct vb2_v4l2_buffer *dst_buf;
+ struct {
+ u32 offset;
+ u32 size;
+ } *partition;
+ enum vb2_buffer_state state = VB2_BUF_STATE_ERROR;
+ char *curr;
+ ssize_t len;
+ ssize_t free;
+
+ src_buf = v4l2_m2m_src_buf_remove(channel->fh.m2m_ctx);
+
+ dst_buf = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx);
+ dst_buf->sequence = channel->csequence++;
+
+ if (msg->error_code) {
+ v4l2_err(&dev->v4l2_dev,
+ "channel %d: error while encoding frame: %x\n",
+ channel->mcu_channel_id, msg->error_code);
+ goto err;
+ }
+
+ if (msg->partition_table_size != 1) {
+ v4l2_warn(&dev->v4l2_dev,
+ "channel %d: only handling first partition table entry (%d entries)\n",
+ channel->mcu_channel_id, msg->partition_table_size);
+ }
+
+ if (msg->partition_table_offset +
+ msg->partition_table_size * sizeof(*partition) >
+ vb2_plane_size(&dst_buf->vb2_buf, 0)) {
+ v4l2_err(&dev->v4l2_dev,
+ "channel %d: partition table outside of dst_buf\n",
+ channel->mcu_channel_id);
+ goto err;
+ }
+
+ partition =
+ vb2_plane_vaddr(&dst_buf->vb2_buf, 0) + msg->partition_table_offset;
+ if (partition->offset + partition->size >
+ vb2_plane_size(&dst_buf->vb2_buf, 0)) {
+ v4l2_err(&dev->v4l2_dev,
+ "channel %d: encoded frame is outside of dst_buf (offset 0x%x, size 0x%x)\n",
+ channel->mcu_channel_id, partition->offset,
+ partition->size);
+ goto err;
+ }
+
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "channel %d: encoded frame of size %d is at offset 0x%x\n",
+ channel->mcu_channel_id, partition->size, partition->offset);
+
+ /*
+ * The payload must include the data before the partition offset,
+ * because we will put the sps and pps data there.
+ */
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ partition->offset + partition->size);
+
+ curr = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ free = partition->offset;
+ if (msg->is_idr) {
+ len = allegro_h264_write_sps(channel, curr, free);
+ if (len < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "not enough space for sequence parameter set: %zd left\n",
+ free);
+ goto err;
+ }
+ curr += len;
+ free -= len;
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: wrote %zd byte SPS nal unit\n",
+ channel->mcu_channel_id, len);
+ }
+
+ if (msg->slice_type == AL_ENC_SLICE_TYPE_I) {
+ len = allegro_h264_write_pps(channel, curr, free);
+ if (len < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "not enough space for picture parameter set: %zd left\n",
+ free);
+ goto err;
+ }
+ curr += len;
+ free -= len;
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: wrote %zd byte PPS nal unit\n",
+ channel->mcu_channel_id, len);
+ }
+
+ len = nal_h264_write_filler(&dev->plat_dev->dev, curr, free);
+ if (len < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to write %zd filler data\n", free);
+ goto err;
+ }
+ curr += len;
+ free -= len;
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "channel %d: wrote %zd bytes filler nal unit\n",
+ channel->mcu_channel_id, len);
+
+ if (free != 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "non-VCL NAL units do not fill space until VCL NAL unit: %zd bytes left\n",
+ free);
+ goto err;
+ }
+
+ state = VB2_BUF_STATE_DONE;
+
+ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, false);
+ if (msg->is_idr)
+ dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ else
+ dst_buf->flags |= V4L2_BUF_FLAG_PFRAME;
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: encoded frame #%03d (%s%s, %d bytes)\n",
+ channel->mcu_channel_id,
+ dst_buf->sequence,
+ msg->is_idr ? "IDR, " : "",
+ msg->slice_type == AL_ENC_SLICE_TYPE_I ? "I slice" :
+ msg->slice_type == AL_ENC_SLICE_TYPE_P ? "P slice" : "unknown",
+ partition->size);
+
+err:
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+
+ allegro_channel_buf_done(channel, dst_buf, state);
+
+ v4l2_m2m_job_finish(dev->m2m_dev, channel->fh.m2m_ctx);
+}
+
+static int allegro_handle_init(struct allegro_dev *dev,
+ struct mcu_msg_init_response *msg)
+{
+ complete(&dev->init_complete);
+
+ return 0;
+}
+
+static int
+allegro_handle_create_channel(struct allegro_dev *dev,
+ struct mcu_msg_create_channel_response *msg)
+{
+ struct allegro_channel *channel;
+ int err = 0;
+
+ channel = allegro_find_channel_by_user_id(dev, msg->user_id);
+ if (IS_ERR(channel)) {
+ v4l2_warn(&dev->v4l2_dev,
+ "received %s for unknown user %d\n",
+ msg_type_name(msg->header.type),
+ msg->user_id);
+ return -EINVAL;
+ }
+
+ if (msg->error_code) {
+ v4l2_err(&dev->v4l2_dev,
+ "user %d: mcu failed to create channel: error %x\n",
+ channel->user_id, msg->error_code);
+ err = -EIO;
+ goto out;
+ }
+
+ channel->mcu_channel_id = msg->channel_id;
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "user %d: channel has channel id %d\n",
+ channel->user_id, channel->mcu_channel_id);
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: intermediate buffers: %d x %d bytes\n",
+ channel->mcu_channel_id,
+ msg->int_buffers_count, msg->int_buffers_size);
+ err = allocate_intermediate_buffers(channel, msg->int_buffers_count,
+ msg->int_buffers_size);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev,
+ "channel %d: failed to allocate intermediate buffers\n",
+ channel->mcu_channel_id);
+ goto out;
+ }
+ err = allegro_mcu_push_buffer_intermediate(channel);
+ if (err)
+ goto out;
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: reference buffers: %d x %d bytes\n",
+ channel->mcu_channel_id,
+ msg->rec_buffers_count, msg->rec_buffers_size);
+ err = allocate_reference_buffers(channel, msg->rec_buffers_count,
+ msg->rec_buffers_size);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev,
+ "channel %d: failed to allocate reference buffers\n",
+ channel->mcu_channel_id);
+ goto out;
+ }
+ err = allegro_mcu_push_buffer_reference(channel);
+ if (err)
+ goto out;
+
+out:
+ channel->error = err;
+ complete(&channel->completion);
+
+ /* Handled successfully, error is passed via channel->error */
+ return 0;
+}
+
+static int
+allegro_handle_destroy_channel(struct allegro_dev *dev,
+ struct mcu_msg_destroy_channel_response *msg)
+{
+ struct allegro_channel *channel;
+
+ channel = allegro_find_channel_by_channel_id(dev, msg->channel_id);
+ if (IS_ERR(channel)) {
+ v4l2_err(&dev->v4l2_dev,
+ "received %s for unknown channel %d\n",
+ msg_type_name(msg->header.type),
+ msg->channel_id);
+ return -EINVAL;
+ }
+
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "user %d: vcu destroyed channel %d\n",
+ channel->user_id, channel->mcu_channel_id);
+ complete(&channel->completion);
+
+ return 0;
+}
+
+static int
+allegro_handle_encode_frame(struct allegro_dev *dev,
+ struct mcu_msg_encode_frame_response *msg)
+{
+ struct allegro_channel *channel;
+
+ channel = allegro_find_channel_by_channel_id(dev, msg->channel_id);
+ if (IS_ERR(channel)) {
+ v4l2_err(&dev->v4l2_dev,
+ "received %s for unknown channel %d\n",
+ msg_type_name(msg->header.type),
+ msg->channel_id);
+ return -EINVAL;
+ }
+
+ allegro_channel_finish_frame(channel, msg);
+
+ return 0;
+}
+
+static int allegro_receive_message(struct allegro_dev *dev)
+{
+ union mcu_msg_response *msg;
+ ssize_t size;
+ int err = 0;
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ size = allegro_mbox_read(dev, &dev->mbox_status, msg, sizeof(*msg));
+ if (size < sizeof(msg->header)) {
+ v4l2_err(&dev->v4l2_dev,
+ "invalid mbox message (%zd): must be at least %zu\n",
+ size, sizeof(msg->header));
+ err = -EINVAL;
+ goto out;
+ }
+
+ switch (msg->header.type) {
+ case MCU_MSG_TYPE_INIT:
+ err = allegro_handle_init(dev, &msg->init);
+ break;
+ case MCU_MSG_TYPE_CREATE_CHANNEL:
+ err = allegro_handle_create_channel(dev, &msg->create_channel);
+ break;
+ case MCU_MSG_TYPE_DESTROY_CHANNEL:
+ err = allegro_handle_destroy_channel(dev,
+ &msg->destroy_channel);
+ break;
+ case MCU_MSG_TYPE_ENCODE_FRAME:
+ err = allegro_handle_encode_frame(dev, &msg->encode_frame);
+ break;
+ default:
+ v4l2_warn(&dev->v4l2_dev,
+ "%s: unknown message %s\n",
+ __func__, msg_type_name(msg->header.type));
+ err = -EINVAL;
+ break;
+ }
+
+out:
+ kfree(msg);
+
+ return err;
+}
+
+static irqreturn_t allegro_hardirq(int irq, void *data)
+{
+ struct allegro_dev *dev = data;
+ unsigned int status;
+
+ regmap_read(dev->regmap, AL5_ITC_CPU_IRQ_STA, &status);
+ if (!(status & AL5_ITC_CPU_IRQ_STA_TRIGGERED))
+ return IRQ_NONE;
+
+ regmap_write(dev->regmap, AL5_ITC_CPU_IRQ_CLR, status);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t allegro_irq_thread(int irq, void *data)
+{
+ struct allegro_dev *dev = data;
+
+ allegro_receive_message(dev);
+
+ return IRQ_HANDLED;
+}
+
+static void allegro_copy_firmware(struct allegro_dev *dev,
+ const u8 * const buf, size_t size)
+{
+ int err = 0;
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "copy mcu firmware (%zu B) to SRAM\n", size);
+ err = regmap_bulk_write(dev->sram, 0x0, buf, size / 4);
+ if (err)
+ v4l2_err(&dev->v4l2_dev,
+ "failed to copy firmware: %d\n", err);
+}
+
+static void allegro_copy_fw_codec(struct allegro_dev *dev,
+ const u8 * const buf, size_t size)
+{
+ int err;
+ dma_addr_t icache_offset, dcache_offset;
+
+ /*
+ * The downstream allocates 600 KB for the codec firmware to have some
+ * extra space for "possible extensions." My tests were fine with
+ * allocating just enough memory for the actual firmware, but I am not
+ * sure that the firmware really does not use the remaining space.
+ */
+ err = allegro_alloc_buffer(dev, &dev->firmware, size);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to allocate %zu bytes for firmware\n", size);
+ return;
+ }
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "copy codec firmware (%zd B) to phys %pad\n",
+ size, &dev->firmware.paddr);
+ memcpy(dev->firmware.vaddr, buf, size);
+
+ regmap_write(dev->regmap, AXI_ADDR_OFFSET_IP,
+ upper_32_bits(dev->firmware.paddr));
+
+ icache_offset = dev->firmware.paddr - MCU_CACHE_OFFSET;
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "icache_offset: msb = 0x%x, lsb = 0x%x\n",
+ upper_32_bits(icache_offset), lower_32_bits(icache_offset));
+ regmap_write(dev->regmap, AL5_ICACHE_ADDR_OFFSET_MSB,
+ upper_32_bits(icache_offset));
+ regmap_write(dev->regmap, AL5_ICACHE_ADDR_OFFSET_LSB,
+ lower_32_bits(icache_offset));
+
+ dcache_offset =
+ (dev->firmware.paddr & 0xffffffff00000000ULL) - MCU_CACHE_OFFSET;
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "dcache_offset: msb = 0x%x, lsb = 0x%x\n",
+ upper_32_bits(dcache_offset), lower_32_bits(dcache_offset));
+ regmap_write(dev->regmap, AL5_DCACHE_ADDR_OFFSET_MSB,
+ upper_32_bits(dcache_offset));
+ regmap_write(dev->regmap, AL5_DCACHE_ADDR_OFFSET_LSB,
+ lower_32_bits(dcache_offset));
+}
+
+static void allegro_free_fw_codec(struct allegro_dev *dev)
+{
+ allegro_free_buffer(dev, &dev->firmware);
+}
+
+/*
+ * Control functions for the MCU
+ */
+
+static int allegro_mcu_enable_interrupts(struct allegro_dev *dev)
+{
+ return regmap_write(dev->regmap, AL5_ITC_CPU_IRQ_MSK, BIT(0));
+}
+
+static int allegro_mcu_disable_interrupts(struct allegro_dev *dev)
+{
+ return regmap_write(dev->regmap, AL5_ITC_CPU_IRQ_MSK, 0);
+}
+
+static int allegro_mcu_wait_for_sleep(struct allegro_dev *dev)
+{
+ unsigned long timeout;
+ unsigned int status;
+
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (regmap_read(dev->regmap, AL5_MCU_STA, &status) == 0 &&
+ status != AL5_MCU_STA_SLEEP) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+static int allegro_mcu_start(struct allegro_dev *dev)
+{
+ unsigned long timeout;
+ unsigned int status;
+ int err;
+
+ err = regmap_write(dev->regmap, AL5_MCU_WAKEUP, BIT(0));
+ if (err)
+ return err;
+
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (regmap_read(dev->regmap, AL5_MCU_STA, &status) == 0 &&
+ status == AL5_MCU_STA_SLEEP) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cpu_relax();
+ }
+
+ err = regmap_write(dev->regmap, AL5_MCU_WAKEUP, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int allegro_mcu_reset(struct allegro_dev *dev)
+{
+ int err;
+
+ err = regmap_write(dev->regmap,
+ AL5_MCU_RESET_MODE, AL5_MCU_RESET_MODE_SLEEP);
+ if (err < 0)
+ return err;
+
+ err = regmap_write(dev->regmap, AL5_MCU_RESET, AL5_MCU_RESET_SOFT);
+ if (err < 0)
+ return err;
+
+ return allegro_mcu_wait_for_sleep(dev);
+}
+
+static void allegro_destroy_channel(struct allegro_channel *channel)
+{
+ struct allegro_dev *dev = channel->dev;
+ unsigned long timeout;
+
+ if (channel_exists(channel)) {
+ reinit_completion(&channel->completion);
+ allegro_mcu_send_destroy_channel(dev, channel);
+ timeout = wait_for_completion_timeout(&channel->completion,
+ msecs_to_jiffies(5000));
+ if (timeout == 0)
+ v4l2_warn(&dev->v4l2_dev,
+ "channel %d: timeout while destroying\n",
+ channel->mcu_channel_id);
+
+ channel->mcu_channel_id = -1;
+ }
+
+ destroy_intermediate_buffers(channel);
+ destroy_reference_buffers(channel);
+
+ v4l2_ctrl_grab(channel->mpeg_video_h264_profile, false);
+ v4l2_ctrl_grab(channel->mpeg_video_h264_level, false);
+ v4l2_ctrl_grab(channel->mpeg_video_bitrate_mode, false);
+ v4l2_ctrl_grab(channel->mpeg_video_bitrate, false);
+ v4l2_ctrl_grab(channel->mpeg_video_bitrate_peak, false);
+ v4l2_ctrl_grab(channel->mpeg_video_cpb_size, false);
+ v4l2_ctrl_grab(channel->mpeg_video_gop_size, false);
+
+ if (channel->user_id != -1) {
+ clear_bit(channel->user_id, &dev->channel_user_ids);
+ channel->user_id = -1;
+ }
+}
+
+/*
+ * Create the MCU channel
+ *
+ * After the channel has been created, the picture size, format, colorspace
+ * and framerate are fixed. Also the codec, profile, bitrate, etc. cannot be
+ * changed anymore.
+ *
+ * The channel can be created only once. The MCU will accept source buffers
+ * and stream buffers only after a channel has been created.
+ */
+static int allegro_create_channel(struct allegro_channel *channel)
+{
+ struct allegro_dev *dev = channel->dev;
+ unsigned long timeout;
+ enum v4l2_mpeg_video_h264_level min_level;
+
+ if (channel_exists(channel)) {
+ v4l2_warn(&dev->v4l2_dev,
+ "channel already exists\n");
+ return 0;
+ }
+
+ channel->user_id = allegro_next_user_id(dev);
+ if (channel->user_id < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "no free channels available\n");
+ return -EBUSY;
+ }
+ set_bit(channel->user_id, &dev->channel_user_ids);
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "user %d: creating channel (%4.4s, %dx%d@%d)\n",
+ channel->user_id,
+ (char *)&channel->codec, channel->width, channel->height, 25);
+
+ min_level = select_minimum_h264_level(channel->width, channel->height);
+ if (channel->level < min_level) {
+ v4l2_warn(&dev->v4l2_dev,
+ "user %d: selected Level %s too low: increasing to Level %s\n",
+ channel->user_id,
+ v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_LEVEL)[channel->level],
+ v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_LEVEL)[min_level]);
+ channel->level = min_level;
+ }
+
+ v4l2_ctrl_grab(channel->mpeg_video_h264_profile, true);
+ v4l2_ctrl_grab(channel->mpeg_video_h264_level, true);
+ v4l2_ctrl_grab(channel->mpeg_video_bitrate_mode, true);
+ v4l2_ctrl_grab(channel->mpeg_video_bitrate, true);
+ v4l2_ctrl_grab(channel->mpeg_video_bitrate_peak, true);
+ v4l2_ctrl_grab(channel->mpeg_video_cpb_size, true);
+ v4l2_ctrl_grab(channel->mpeg_video_gop_size, true);
+
+ reinit_completion(&channel->completion);
+ allegro_mcu_send_create_channel(dev, channel);
+ timeout = wait_for_completion_timeout(&channel->completion,
+ msecs_to_jiffies(5000));
+ if (timeout == 0)
+ channel->error = -ETIMEDOUT;
+ if (channel->error)
+ goto err;
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: accepting buffers\n",
+ channel->mcu_channel_id);
+
+ return 0;
+
+err:
+ allegro_destroy_channel(channel);
+
+ return channel->error;
+}
+
+static void allegro_set_default_params(struct allegro_channel *channel)
+{
+ channel->width = ALLEGRO_WIDTH_DEFAULT;
+ channel->height = ALLEGRO_HEIGHT_DEFAULT;
+ channel->stride = round_up(channel->width, 32);
+
+ channel->colorspace = V4L2_COLORSPACE_REC709;
+ channel->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ channel->quantization = V4L2_QUANTIZATION_DEFAULT;
+ channel->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ channel->pixelformat = V4L2_PIX_FMT_NV12;
+ channel->sizeimage_raw = channel->stride * channel->height * 3 / 2;
+
+ channel->codec = V4L2_PIX_FMT_H264;
+ channel->profile = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
+ channel->level =
+ select_minimum_h264_level(channel->width, channel->height);
+ channel->sizeimage_encoded =
+ estimate_stream_size(channel->width, channel->height);
+
+ channel->bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
+ channel->bitrate = maximum_bitrate(channel->level);
+ channel->bitrate_peak = maximum_bitrate(channel->level);
+ channel->cpb_size = maximum_cpb_size(channel->level);
+ channel->gop_size = ALLEGRO_GOP_SIZE_DEFAULT;
+}
+
+static int allegro_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct allegro_channel *channel = vb2_get_drv_priv(vq);
+ struct allegro_dev *dev = channel->dev;
+
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "%s: queue setup[%s]: nplanes = %d\n",
+ V4L2_TYPE_IS_OUTPUT(vq->type) ? "output" : "capture",
+ *nplanes == 0 ? "REQBUFS" : "CREATE_BUFS", *nplanes);
+
+ if (*nplanes != 0) {
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ if (sizes[0] < channel->sizeimage_raw)
+ return -EINVAL;
+ } else {
+ if (sizes[0] < channel->sizeimage_encoded)
+ return -EINVAL;
+ }
+ } else {
+ *nplanes = 1;
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ sizes[0] = channel->sizeimage_raw;
+ else
+ sizes[0] = channel->sizeimage_encoded;
+ }
+
+ return 0;
+}
+
+static int allegro_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct allegro_channel *channel = vb2_get_drv_priv(vb->vb2_queue);
+ struct allegro_dev *dev = channel->dev;
+
+ if (allegro_get_state(channel) == ALLEGRO_STATE_DRAIN &&
+ V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type))
+ return -EBUSY;
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ v4l2_err(&dev->v4l2_dev,
+ "channel %d: unsupported field\n",
+ channel->mcu_channel_id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void allegro_buf_queue(struct vb2_buffer *vb)
+{
+ struct allegro_channel *channel = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (allegro_get_state(channel) == ALLEGRO_STATE_WAIT_FOR_BUFFER &&
+ vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ allegro_channel_buf_done(channel, vbuf, VB2_BUF_STATE_DONE);
+ return;
+ }
+
+ v4l2_m2m_buf_queue(channel->fh.m2m_ctx, vbuf);
+}
+
+static int allegro_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct allegro_channel *channel = vb2_get_drv_priv(q);
+ struct allegro_dev *dev = channel->dev;
+
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "%s: start streaming\n",
+ V4L2_TYPE_IS_OUTPUT(q->type) ? "output" : "capture");
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ channel->osequence = 0;
+ allegro_set_state(channel, ALLEGRO_STATE_ENCODING);
+ } else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ channel->csequence = 0;
+ }
+
+ return 0;
+}
+
+static void allegro_stop_streaming(struct vb2_queue *q)
+{
+ struct allegro_channel *channel = vb2_get_drv_priv(q);
+ struct allegro_dev *dev = channel->dev;
+ struct vb2_v4l2_buffer *buffer;
+
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "%s: stop streaming\n",
+ V4L2_TYPE_IS_OUTPUT(q->type) ? "output" : "capture");
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ allegro_set_state(channel, ALLEGRO_STATE_STOPPED);
+ while ((buffer = v4l2_m2m_src_buf_remove(channel->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_ERROR);
+ } else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ allegro_destroy_channel(channel);
+ while ((buffer = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static const struct vb2_ops allegro_queue_ops = {
+ .queue_setup = allegro_queue_setup,
+ .buf_prepare = allegro_buf_prepare,
+ .buf_queue = allegro_buf_queue,
+ .start_streaming = allegro_start_streaming,
+ .stop_streaming = allegro_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int allegro_queue_init(void *priv,
+ struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ int err;
+ struct allegro_channel *channel = priv;
+
+ src_vq->dev = &channel->dev->plat_dev->dev;
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->drv_priv = channel;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->ops = &allegro_queue_ops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->lock = &channel->dev->lock;
+ err = vb2_queue_init(src_vq);
+ if (err)
+ return err;
+
+ dst_vq->dev = &channel->dev->plat_dev->dev;
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->drv_priv = channel;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->ops = &allegro_queue_ops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->lock = &channel->dev->lock;
+ err = vb2_queue_init(dst_vq);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int allegro_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct allegro_channel *channel = container_of(ctrl->handler,
+ struct allegro_channel,
+ ctrl_handler);
+ struct allegro_dev *dev = channel->dev;
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "s_ctrl: %s = %d\n", v4l2_ctrl_get_name(ctrl->id), ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ channel->level = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ channel->bitrate_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ channel->bitrate = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ channel->bitrate_peak = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
+ channel->cpb_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ channel->gop_size = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops allegro_ctrl_ops = {
+ .s_ctrl = allegro_s_ctrl,
+};
+
+static int allegro_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct allegro_dev *dev = video_get_drvdata(vdev);
+ struct allegro_channel *channel = NULL;
+ struct v4l2_ctrl_handler *handler;
+ u64 mask;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return -ENOMEM;
+
+ v4l2_fh_init(&channel->fh, vdev);
+ file->private_data = &channel->fh;
+ v4l2_fh_add(&channel->fh);
+
+ init_completion(&channel->completion);
+
+ channel->dev = dev;
+
+ allegro_set_default_params(channel);
+
+ handler = &channel->ctrl_handler;
+ v4l2_ctrl_handler_init(handler, 0);
+ channel->mpeg_video_h264_profile = v4l2_ctrl_new_std_menu(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0,
+ V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE);
+ mask = 1 << V4L2_MPEG_VIDEO_H264_LEVEL_1B;
+ channel->mpeg_video_h264_level = v4l2_ctrl_new_std_menu(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_1, mask,
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_1);
+ channel->mpeg_video_bitrate_mode = v4l2_ctrl_new_std_menu(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 0,
+ channel->bitrate_mode);
+ channel->mpeg_video_bitrate = v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE,
+ 0, maximum_bitrate(V4L2_MPEG_VIDEO_H264_LEVEL_5_1),
+ 1, channel->bitrate);
+ channel->mpeg_video_bitrate_peak = v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
+ 0, maximum_bitrate(V4L2_MPEG_VIDEO_H264_LEVEL_5_1),
+ 1, channel->bitrate_peak);
+ channel->mpeg_video_cpb_size = v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE,
+ 0, maximum_cpb_size(V4L2_MPEG_VIDEO_H264_LEVEL_5_1),
+ 1, channel->cpb_size);
+ channel->mpeg_video_gop_size = v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ 0, ALLEGRO_GOP_SIZE_MAX,
+ 1, channel->gop_size);
+ v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
+ 1, 32,
+ 1, 1);
+ channel->fh.ctrl_handler = handler;
+
+ channel->mcu_channel_id = -1;
+ channel->user_id = -1;
+
+ INIT_LIST_HEAD(&channel->buffers_reference);
+ INIT_LIST_HEAD(&channel->buffers_intermediate);
+
+ list_add(&channel->list, &dev->channels);
+
+ channel->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, channel,
+ allegro_queue_init);
+
+ return 0;
+}
+
+static int allegro_release(struct file *file)
+{
+ struct allegro_channel *channel = fh_to_channel(file->private_data);
+
+ v4l2_m2m_ctx_release(channel->fh.m2m_ctx);
+
+ list_del(&channel->list);
+
+ v4l2_ctrl_handler_free(&channel->ctrl_handler);
+
+ v4l2_fh_del(&channel->fh);
+ v4l2_fh_exit(&channel->fh);
+
+ kfree(channel);
+
+ return 0;
+}
+
+static int allegro_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct allegro_dev *dev = video_get_drvdata(vdev);
+
+ strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strscpy(cap->card, "Allegro DVT Video Encoder", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(&dev->plat_dev->dev));
+
+ return 0;
+}
+
+static int allegro_enum_fmt_vid(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index)
+ return -EINVAL;
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ f->pixelformat = V4L2_PIX_FMT_NV12;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ f->pixelformat = V4L2_PIX_FMT_H264;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int allegro_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct allegro_channel *channel = fh_to_channel(fh);
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.width = channel->width;
+ f->fmt.pix.height = channel->height;
+
+ f->fmt.pix.colorspace = channel->colorspace;
+ f->fmt.pix.ycbcr_enc = channel->ycbcr_enc;
+ f->fmt.pix.quantization = channel->quantization;
+ f->fmt.pix.xfer_func = channel->xfer_func;
+
+ f->fmt.pix.pixelformat = channel->codec;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage = channel->sizeimage_encoded;
+
+ return 0;
+}
+
+static int allegro_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ f->fmt.pix.width = clamp_t(__u32, f->fmt.pix.width,
+ ALLEGRO_WIDTH_MIN, ALLEGRO_WIDTH_MAX);
+ f->fmt.pix.height = clamp_t(__u32, f->fmt.pix.height,
+ ALLEGRO_HEIGHT_MIN, ALLEGRO_HEIGHT_MAX);
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ estimate_stream_size(f->fmt.pix.width, f->fmt.pix.height);
+
+ return 0;
+}
+
+static int allegro_g_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct allegro_channel *channel = fh_to_channel(fh);
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ f->fmt.pix.width = channel->width;
+ f->fmt.pix.height = channel->height;
+
+ f->fmt.pix.colorspace = channel->colorspace;
+ f->fmt.pix.ycbcr_enc = channel->ycbcr_enc;
+ f->fmt.pix.quantization = channel->quantization;
+ f->fmt.pix.xfer_func = channel->xfer_func;
+
+ f->fmt.pix.pixelformat = channel->pixelformat;
+ f->fmt.pix.bytesperline = channel->stride;
+ f->fmt.pix.sizeimage = channel->sizeimage_raw;
+
+ return 0;
+}
+
+static int allegro_try_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ /*
+ * The firmware of the Allegro codec handles the padding internally
+ * and expects the visual frame size when configuring a channel.
+ * Therefore, unlike other encoder drivers, this driver does not round
+ * up the width and height to macroblock alignment and does not
+ * implement the selection api.
+ */
+ f->fmt.pix.width = clamp_t(__u32, f->fmt.pix.width,
+ ALLEGRO_WIDTH_MIN, ALLEGRO_WIDTH_MAX);
+ f->fmt.pix.height = clamp_t(__u32, f->fmt.pix.height,
+ ALLEGRO_HEIGHT_MIN, ALLEGRO_HEIGHT_MAX);
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_NV12;
+ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 32);
+ f->fmt.pix.sizeimage =
+ f->fmt.pix.bytesperline * f->fmt.pix.height * 3 / 2;
+
+ return 0;
+}
+
+static int allegro_s_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct allegro_channel *channel = fh_to_channel(fh);
+ int err;
+
+ err = allegro_try_fmt_vid_out(file, fh, f);
+ if (err)
+ return err;
+
+ channel->width = f->fmt.pix.width;
+ channel->height = f->fmt.pix.height;
+ channel->stride = f->fmt.pix.bytesperline;
+ channel->sizeimage_raw = f->fmt.pix.sizeimage;
+
+ channel->colorspace = f->fmt.pix.colorspace;
+ channel->ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ channel->quantization = f->fmt.pix.quantization;
+ channel->xfer_func = f->fmt.pix.xfer_func;
+
+ channel->level =
+ select_minimum_h264_level(channel->width, channel->height);
+ channel->sizeimage_encoded =
+ estimate_stream_size(channel->width, channel->height);
+
+ return 0;
+}
+
+static int allegro_channel_cmd_stop(struct allegro_channel *channel)
+{
+ struct allegro_dev *dev = channel->dev;
+ struct vb2_v4l2_buffer *dst_buf;
+
+ switch (allegro_get_state(channel)) {
+ case ALLEGRO_STATE_DRAIN:
+ case ALLEGRO_STATE_WAIT_FOR_BUFFER:
+ return -EBUSY;
+ case ALLEGRO_STATE_ENCODING:
+ allegro_set_state(channel, ALLEGRO_STATE_DRAIN);
+ break;
+ default:
+ return 0;
+ }
+
+ /* If there are output buffers, they must be encoded */
+ if (v4l2_m2m_num_src_bufs_ready(channel->fh.m2m_ctx) != 0) {
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: CMD_STOP: continue encoding src buffers\n",
+ channel->mcu_channel_id);
+ return 0;
+ }
+
+ /* If there are capture buffers, use it to signal EOS */
+ dst_buf = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx);
+ if (dst_buf) {
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: CMD_STOP: signaling EOS\n",
+ channel->mcu_channel_id);
+ allegro_channel_buf_done(channel, dst_buf, VB2_BUF_STATE_DONE);
+ return 0;
+ }
+
+ /*
+ * If there are no capture buffers, we need to wait for the next
+ * buffer to signal EOS.
+ */
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: CMD_STOP: wait for CAPTURE buffer to signal EOS\n",
+ channel->mcu_channel_id);
+ allegro_set_state(channel, ALLEGRO_STATE_WAIT_FOR_BUFFER);
+
+ return 0;
+}
+
+static int allegro_channel_cmd_start(struct allegro_channel *channel)
+{
+ switch (allegro_get_state(channel)) {
+ case ALLEGRO_STATE_DRAIN:
+ case ALLEGRO_STATE_WAIT_FOR_BUFFER:
+ return -EBUSY;
+ case ALLEGRO_STATE_STOPPED:
+ allegro_set_state(channel, ALLEGRO_STATE_ENCODING);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int allegro_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *cmd)
+{
+ struct allegro_channel *channel = fh_to_channel(fh);
+ int err;
+
+ err = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, cmd);
+ if (err)
+ return err;
+
+ switch (cmd->cmd) {
+ case V4L2_ENC_CMD_STOP:
+ err = allegro_channel_cmd_stop(channel);
+ break;
+ case V4L2_ENC_CMD_START:
+ err = allegro_channel_cmd_start(channel);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int allegro_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ switch (fsize->pixel_format) {
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_NV12:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (fsize->index)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = ALLEGRO_WIDTH_MIN;
+ fsize->stepwise.max_width = ALLEGRO_WIDTH_MAX;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.min_height = ALLEGRO_HEIGHT_MIN;
+ fsize->stepwise.max_height = ALLEGRO_HEIGHT_MAX;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int allegro_ioctl_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct allegro_channel *channel = fh_to_channel(fh);
+ int err;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ err = allegro_create_channel(channel);
+ if (err)
+ return err;
+ }
+
+ return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
+}
+
+static int allegro_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ }
+}
+
+static const struct v4l2_ioctl_ops allegro_ioctl_ops = {
+ .vidioc_querycap = allegro_querycap,
+ .vidioc_enum_fmt_vid_cap = allegro_enum_fmt_vid,
+ .vidioc_enum_fmt_vid_out = allegro_enum_fmt_vid,
+ .vidioc_g_fmt_vid_cap = allegro_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = allegro_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = allegro_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_out = allegro_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = allegro_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = allegro_s_fmt_vid_out,
+
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+
+ .vidioc_streamon = allegro_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
+ .vidioc_encoder_cmd = allegro_encoder_cmd,
+ .vidioc_enum_framesizes = allegro_enum_framesizes,
+
+ .vidioc_subscribe_event = allegro_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations allegro_fops = {
+ .owner = THIS_MODULE,
+ .open = allegro_open,
+ .release = allegro_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int allegro_register_device(struct allegro_dev *dev)
+{
+ struct video_device *video_dev = &dev->video_dev;
+
+ strscpy(video_dev->name, "allegro", sizeof(video_dev->name));
+ video_dev->fops = &allegro_fops;
+ video_dev->ioctl_ops = &allegro_ioctl_ops;
+ video_dev->release = video_device_release_empty;
+ video_dev->lock = &dev->lock;
+ video_dev->v4l2_dev = &dev->v4l2_dev;
+ video_dev->vfl_dir = VFL_DIR_M2M;
+ video_dev->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ video_set_drvdata(video_dev, dev);
+
+ return video_register_device(video_dev, VFL_TYPE_GRABBER, 0);
+}
+
+static void allegro_device_run(void *priv)
+{
+ struct allegro_channel *channel = priv;
+ struct allegro_dev *dev = channel->dev;
+ struct vb2_v4l2_buffer *src_buf;
+ struct vb2_v4l2_buffer *dst_buf;
+ dma_addr_t src_y;
+ dma_addr_t src_uv;
+ dma_addr_t dst_addr;
+ unsigned long dst_size;
+
+ dst_buf = v4l2_m2m_next_dst_buf(channel->fh.m2m_ctx);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ dst_size = vb2_plane_size(&dst_buf->vb2_buf, 0);
+ allegro_mcu_send_put_stream_buffer(dev, channel, dst_addr, dst_size);
+
+ src_buf = v4l2_m2m_next_src_buf(channel->fh.m2m_ctx);
+ src_buf->sequence = channel->osequence++;
+
+ src_y = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ src_uv = src_y + (channel->stride * channel->height);
+ allegro_mcu_send_encode_frame(dev, channel, src_y, src_uv);
+}
+
+static const struct v4l2_m2m_ops allegro_m2m_ops = {
+ .device_run = allegro_device_run,
+};
+
+static int allegro_mcu_hw_init(struct allegro_dev *dev,
+ const struct fw_info *info)
+{
+ int err;
+
+ allegro_mbox_init(dev, &dev->mbox_command,
+ info->mailbox_cmd, info->mailbox_size);
+ allegro_mbox_init(dev, &dev->mbox_status,
+ info->mailbox_status, info->mailbox_size);
+
+ allegro_mcu_enable_interrupts(dev);
+
+ /* The mcu sends INIT after reset. */
+ allegro_mcu_start(dev);
+ err = allegro_mcu_wait_for_init_timeout(dev, 5000);
+ if (err < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "mcu did not send INIT after reset\n");
+ err = -EIO;
+ goto err_disable_interrupts;
+ }
+
+ err = allegro_alloc_buffer(dev, &dev->suballocator,
+ info->suballocator_size);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to allocate %zu bytes for suballocator\n",
+ info->suballocator_size);
+ goto err_reset_mcu;
+ }
+
+ allegro_mcu_send_init(dev, dev->suballocator.paddr,
+ dev->suballocator.size);
+ err = allegro_mcu_wait_for_init_timeout(dev, 5000);
+ if (err < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "mcu failed to configure sub-allocator\n");
+ err = -EIO;
+ goto err_free_suballocator;
+ }
+
+ return 0;
+
+err_free_suballocator:
+ allegro_free_buffer(dev, &dev->suballocator);
+err_reset_mcu:
+ allegro_mcu_reset(dev);
+err_disable_interrupts:
+ allegro_mcu_disable_interrupts(dev);
+
+ return err;
+}
+
+static int allegro_mcu_hw_deinit(struct allegro_dev *dev)
+{
+ int err;
+
+ err = allegro_mcu_reset(dev);
+ if (err)
+ v4l2_warn(&dev->v4l2_dev,
+ "mcu failed to enter sleep state\n");
+
+ err = allegro_mcu_disable_interrupts(dev);
+ if (err)
+ v4l2_warn(&dev->v4l2_dev,
+ "failed to disable interrupts\n");
+
+ allegro_free_buffer(dev, &dev->suballocator);
+
+ return 0;
+}
+
+static void allegro_fw_callback(const struct firmware *fw, void *context)
+{
+ struct allegro_dev *dev = context;
+ const char *fw_codec_name = "al5e.fw";
+ const struct firmware *fw_codec;
+ int err;
+ const struct fw_info *info;
+
+ if (!fw)
+ return;
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "requesting codec firmware '%s'\n", fw_codec_name);
+ err = request_firmware(&fw_codec, fw_codec_name, &dev->plat_dev->dev);
+ if (err)
+ goto err_release_firmware;
+
+ info = allegro_get_firmware_info(dev, fw, fw_codec);
+ if (!info) {
+ v4l2_err(&dev->v4l2_dev, "firmware is not supported\n");
+ goto err_release_firmware_codec;
+ }
+
+ v4l2_info(&dev->v4l2_dev,
+ "using mcu firmware version '%s'\n", info->version);
+
+ /* Ensure that the mcu is sleeping at the reset vector */
+ err = allegro_mcu_reset(dev);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev, "failed to reset mcu\n");
+ goto err_release_firmware_codec;
+ }
+
+ allegro_copy_firmware(dev, fw->data, fw->size);
+ allegro_copy_fw_codec(dev, fw_codec->data, fw_codec->size);
+
+ err = allegro_mcu_hw_init(dev, info);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev, "failed to initialize mcu\n");
+ goto err_free_fw_codec;
+ }
+
+ dev->m2m_dev = v4l2_m2m_init(&allegro_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "failed to init mem2mem device\n");
+ goto err_mcu_hw_deinit;
+ }
+
+ err = allegro_register_device(dev);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev, "failed to register video device\n");
+ goto err_m2m_release;
+ }
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "allegro codec registered as /dev/video%d\n",
+ dev->video_dev.num);
+
+ release_firmware(fw_codec);
+ release_firmware(fw);
+
+ return;
+
+err_m2m_release:
+ v4l2_m2m_release(dev->m2m_dev);
+ dev->m2m_dev = NULL;
+err_mcu_hw_deinit:
+ allegro_mcu_hw_deinit(dev);
+err_free_fw_codec:
+ allegro_free_fw_codec(dev);
+err_release_firmware_codec:
+ release_firmware(fw_codec);
+err_release_firmware:
+ release_firmware(fw);
+}
+
+static int allegro_firmware_request_nowait(struct allegro_dev *dev)
+{
+ const char *fw = "al5e_b.fw";
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "requesting firmware '%s'\n", fw);
+ return request_firmware_nowait(THIS_MODULE, true, fw,
+ &dev->plat_dev->dev, GFP_KERNEL, dev,
+ allegro_fw_callback);
+}
+
+static int allegro_probe(struct platform_device *pdev)
+{
+ struct allegro_dev *dev;
+ struct resource *res, *sram_res;
+ int ret;
+ int irq;
+ void __iomem *regs, *sram_regs;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->plat_dev = pdev;
+ init_completion(&dev->init_complete);
+ INIT_LIST_HEAD(&dev->channels);
+
+ mutex_init(&dev->lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ if (!res) {
+ dev_err(&pdev->dev,
+ "regs resource missing from device tree\n");
+ return -EINVAL;
+ }
+ regs = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
+ if (IS_ERR(regs)) {
+ dev_err(&pdev->dev, "failed to map registers\n");
+ return PTR_ERR(regs);
+ }
+ dev->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
+ &allegro_regmap_config);
+ if (IS_ERR(dev->regmap)) {
+ dev_err(&pdev->dev, "failed to init regmap\n");
+ return PTR_ERR(dev->regmap);
+ }
+
+ sram_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ if (!sram_res) {
+ dev_err(&pdev->dev,
+ "sram resource missing from device tree\n");
+ return -EINVAL;
+ }
+ sram_regs = devm_ioremap_nocache(&pdev->dev,
+ sram_res->start,
+ resource_size(sram_res));
+ if (IS_ERR(sram_regs)) {
+ dev_err(&pdev->dev, "failed to map sram\n");
+ return PTR_ERR(sram_regs);
+ }
+ dev->sram = devm_regmap_init_mmio(&pdev->dev, sram_regs,
+ &allegro_sram_config);
+ if (IS_ERR(dev->sram)) {
+ dev_err(&pdev->dev, "failed to init sram\n");
+ return PTR_ERR(dev->sram);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ return irq;
+ }
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ allegro_hardirq,
+ allegro_irq_thread,
+ IRQF_SHARED, dev_name(&pdev->dev), dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
+ return ret;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, dev);
+
+ ret = allegro_firmware_request_nowait(dev);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to request firmware: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int allegro_remove(struct platform_device *pdev)
+{
+ struct allegro_dev *dev = platform_get_drvdata(pdev);
+
+ video_unregister_device(&dev->video_dev);
+ if (dev->m2m_dev)
+ v4l2_m2m_release(dev->m2m_dev);
+ allegro_mcu_hw_deinit(dev);
+ allegro_free_fw_codec(dev);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return 0;
+}
+
+static const struct of_device_id allegro_dt_ids[] = {
+ { .compatible = "allegro,al5e-1.1" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, allegro_dt_ids);
+
+static struct platform_driver allegro_driver = {
+ .probe = allegro_probe,
+ .remove = allegro_remove,
+ .driver = {
+ .name = "allegro",
+ .of_match_table = of_match_ptr(allegro_dt_ids),
+ },
+};
+
+module_platform_driver(allegro_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michael Tretter <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Allegro DVT encoder driver");
diff --git a/drivers/staging/media/allegro-dvt/nal-h264.c b/drivers/staging/media/allegro-dvt/nal-h264.c
new file mode 100644
index 000000000000..4e14b77851e1
--- /dev/null
+++ b/drivers/staging/media/allegro-dvt/nal-h264.c
@@ -0,0 +1,1001 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ *
+ * Convert NAL units between raw byte sequence payloads (RBSP) and C structs
+ *
+ * The conversion is defined in "ITU-T Rec. H.264 (04/2017) Advanced video
+ * coding for generic audiovisual services". Decoder drivers may use the
+ * parser to parse RBSP from encoded streams and configure the hardware, if
+ * the hardware is not able to parse RBSP itself. Encoder drivers may use the
+ * generator to generate the RBSP for SPS/PPS nal units and add them to the
+ * encoded stream if the hardware does not generate the units.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/v4l2-controls.h>
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/log2.h>
+
+#include "nal-h264.h"
+
+/*
+ * See Rec. ITU-T H.264 (04/2017) Table 7-1 – NAL unit type codes, syntax
+ * element categories, and NAL unit type classes
+ */
+enum nal_unit_type {
+ SEQUENCE_PARAMETER_SET = 7,
+ PICTURE_PARAMETER_SET = 8,
+ FILLER_DATA = 12,
+};
+
+struct rbsp;
+
+struct nal_h264_ops {
+ int (*rbsp_bit)(struct rbsp *rbsp, int *val);
+ int (*rbsp_bits)(struct rbsp *rbsp, int n, unsigned int *val);
+ int (*rbsp_uev)(struct rbsp *rbsp, unsigned int *val);
+ int (*rbsp_sev)(struct rbsp *rbsp, int *val);
+};
+
+/**
+ * struct rbsp - State object for handling a raw byte sequence payload
+ * @data: pointer to the data of the rbsp
+ * @size: maximum size of the data of the rbsp
+ * @pos: current bit position inside the rbsp
+ * @num_consecutive_zeros: number of zeros before @pos
+ * @ops: per datatype functions for interacting with the rbsp
+ * @error: an error occurred while handling the rbsp
+ *
+ * This struct is passed around the various parsing functions and tracks the
+ * current position within the raw byte sequence payload.
+ *
+ * The @ops field allows to separate the operation, i.e., reading/writing a
+ * value from/to that rbsp, from the structure of the NAL unit. This allows to
+ * have a single function for iterating the NAL unit, while @ops has function
+ * pointers for handling each type in the rbsp.
+ */
+struct rbsp {
+ u8 *data;
+ size_t size;
+ unsigned int pos;
+ unsigned int num_consecutive_zeros;
+ struct nal_h264_ops *ops;
+ int error;
+};
+
+static void rbsp_init(struct rbsp *rbsp, void *addr, size_t size,
+ struct nal_h264_ops *ops)
+{
+ if (!rbsp)
+ return;
+
+ rbsp->data = addr;
+ rbsp->size = size;
+ rbsp->pos = 0;
+ rbsp->ops = ops;
+ rbsp->error = 0;
+}
+
+/**
+ * nal_h264_profile_from_v4l2() - Get profile_idc for v4l2 h264 profile
+ * @profile: the profile as &enum v4l2_mpeg_video_h264_profile
+ *
+ * Convert the &enum v4l2_mpeg_video_h264_profile to profile_idc as specified
+ * in Rec. ITU-T H.264 (04/2017) A.2.
+ *
+ * Return: the profile_idc for the passed level
+ */
+int nal_h264_profile_from_v4l2(enum v4l2_mpeg_video_h264_profile profile)
+{
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ return 66;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ return 77;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
+ return 88;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ return 100;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * nal_h264_level_from_v4l2() - Get level_idc for v4l2 h264 level
+ * @level: the level as &enum v4l2_mpeg_video_h264_level
+ *
+ * Convert the &enum v4l2_mpeg_video_h264_level to level_idc as specified in
+ * Rec. ITU-T H.264 (04/2017) A.3.2.
+ *
+ * Return: the level_idc for the passed level
+ */
+int nal_h264_level_from_v4l2(enum v4l2_mpeg_video_h264_level level)
+{
+ switch (level) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return 10;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ return 9;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return 11;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return 12;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return 13;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return 20;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return 21;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return 22;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return 30;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return 31;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return 32;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return 40;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return 41;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return 42;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ return 50;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ return 51;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rbsp_read_bits(struct rbsp *rbsp, int n, unsigned int *value);
+static int rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int value);
+
+/*
+ * When reading or writing, the emulation_prevention_three_byte is detected
+ * only when the 2 one bits need to be inserted. Therefore, we are not
+ * actually adding the 0x3 byte, but the 2 one bits and the six 0 bits of the
+ * next byte.
+ */
+#define EMULATION_PREVENTION_THREE_BYTE (0x3 << 6)
+
+static int add_emulation_prevention_three_byte(struct rbsp *rbsp)
+{
+ rbsp->num_consecutive_zeros = 0;
+ rbsp_write_bits(rbsp, 8, EMULATION_PREVENTION_THREE_BYTE);
+
+ return 0;
+}
+
+static int discard_emulation_prevention_three_byte(struct rbsp *rbsp)
+{
+ unsigned int tmp = 0;
+
+ rbsp->num_consecutive_zeros = 0;
+ rbsp_read_bits(rbsp, 8, &tmp);
+ if (tmp != EMULATION_PREVENTION_THREE_BYTE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int rbsp_read_bit(struct rbsp *rbsp)
+{
+ int shift;
+ int ofs;
+ int bit;
+ int err;
+
+ if (rbsp->num_consecutive_zeros == 22) {
+ err = discard_emulation_prevention_three_byte(rbsp);
+ if (err)
+ return err;
+ }
+
+ shift = 7 - (rbsp->pos % 8);
+ ofs = rbsp->pos / 8;
+ if (ofs >= rbsp->size)
+ return -EINVAL;
+
+ bit = (rbsp->data[ofs] >> shift) & 1;
+
+ rbsp->pos++;
+
+ if (bit == 1 ||
+ (rbsp->num_consecutive_zeros < 7 && (rbsp->pos % 8 == 0)))
+ rbsp->num_consecutive_zeros = 0;
+ else
+ rbsp->num_consecutive_zeros++;
+
+ return bit;
+}
+
+static inline int rbsp_write_bit(struct rbsp *rbsp, bool value)
+{
+ int shift;
+ int ofs;
+
+ if (rbsp->num_consecutive_zeros == 22)
+ add_emulation_prevention_three_byte(rbsp);
+
+ shift = 7 - (rbsp->pos % 8);
+ ofs = rbsp->pos / 8;
+ if (ofs >= rbsp->size)
+ return -EINVAL;
+
+ rbsp->data[ofs] &= ~(1 << shift);
+ rbsp->data[ofs] |= value << shift;
+
+ rbsp->pos++;
+
+ if (value == 1 ||
+ (rbsp->num_consecutive_zeros < 7 && (rbsp->pos % 8 == 0))) {
+ rbsp->num_consecutive_zeros = 0;
+ } else {
+ rbsp->num_consecutive_zeros++;
+ }
+
+ return 0;
+}
+
+static inline int rbsp_read_bits(struct rbsp *rbsp, int n, unsigned int *value)
+{
+ int i;
+ int bit;
+ unsigned int tmp = 0;
+
+ if (n > 8 * sizeof(*value))
+ return -EINVAL;
+
+ for (i = n; i > 0; i--) {
+ bit = rbsp_read_bit(rbsp);
+ if (bit < 0)
+ return bit;
+ tmp |= bit << (i - 1);
+ }
+
+ if (value)
+ *value = tmp;
+
+ return 0;
+}
+
+static int rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int value)
+{
+ int ret;
+
+ if (n > 8 * sizeof(value))
+ return -EINVAL;
+
+ while (n--) {
+ ret = rbsp_write_bit(rbsp, (value >> n) & 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rbsp_read_uev(struct rbsp *rbsp, unsigned int *value)
+{
+ int leading_zero_bits = 0;
+ unsigned int tmp = 0;
+ int ret;
+
+ while ((ret = rbsp_read_bit(rbsp)) == 0)
+ leading_zero_bits++;
+ if (ret < 0)
+ return ret;
+
+ if (leading_zero_bits > 0) {
+ ret = rbsp_read_bits(rbsp, leading_zero_bits, &tmp);
+ if (ret)
+ return ret;
+ }
+
+ if (value)
+ *value = (1 << leading_zero_bits) - 1 + tmp;
+
+ return 0;
+}
+
+static int rbsp_write_uev(struct rbsp *rbsp, unsigned int *value)
+{
+ int ret;
+ int leading_zero_bits;
+
+ if (!value)
+ return -EINVAL;
+
+ leading_zero_bits = ilog2(*value + 1);
+
+ ret = rbsp_write_bits(rbsp, leading_zero_bits, 0);
+ if (ret)
+ return ret;
+
+ return rbsp_write_bits(rbsp, leading_zero_bits + 1, *value + 1);
+}
+
+static int rbsp_read_sev(struct rbsp *rbsp, int *value)
+{
+ int ret;
+ unsigned int tmp;
+
+ ret = rbsp_read_uev(rbsp, &tmp);
+ if (ret)
+ return ret;
+
+ if (value) {
+ if (tmp & 1)
+ *value = (tmp + 1) / 2;
+ else
+ *value = -(tmp / 2);
+ }
+
+ return 0;
+}
+
+static int rbsp_write_sev(struct rbsp *rbsp, int *value)
+{
+ unsigned int tmp;
+
+ if (!value)
+ return -EINVAL;
+
+ if (*value > 0)
+ tmp = (2 * (*value)) | 1;
+ else
+ tmp = -2 * (*value);
+
+ return rbsp_write_uev(rbsp, &tmp);
+}
+
+static int __rbsp_write_bit(struct rbsp *rbsp, int *value)
+{
+ return rbsp_write_bit(rbsp, *value);
+}
+
+static int __rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int *value)
+{
+ return rbsp_write_bits(rbsp, n, *value);
+}
+
+static struct nal_h264_ops write = {
+ .rbsp_bit = __rbsp_write_bit,
+ .rbsp_bits = __rbsp_write_bits,
+ .rbsp_uev = rbsp_write_uev,
+ .rbsp_sev = rbsp_write_sev,
+};
+
+static int __rbsp_read_bit(struct rbsp *rbsp, int *value)
+{
+ int tmp = rbsp_read_bit(rbsp);
+
+ if (tmp < 0)
+ return tmp;
+ *value = tmp;
+
+ return 0;
+}
+
+static struct nal_h264_ops read = {
+ .rbsp_bit = __rbsp_read_bit,
+ .rbsp_bits = rbsp_read_bits,
+ .rbsp_uev = rbsp_read_uev,
+ .rbsp_sev = rbsp_read_sev,
+};
+
+static inline void rbsp_bit(struct rbsp *rbsp, int *value)
+{
+ if (rbsp->error)
+ return;
+ rbsp->error = rbsp->ops->rbsp_bit(rbsp, value);
+}
+
+static inline void rbsp_bits(struct rbsp *rbsp, int n, int *value)
+{
+ if (rbsp->error)
+ return;
+ rbsp->error = rbsp->ops->rbsp_bits(rbsp, n, value);
+}
+
+static inline void rbsp_uev(struct rbsp *rbsp, unsigned int *value)
+{
+ if (rbsp->error)
+ return;
+ rbsp->error = rbsp->ops->rbsp_uev(rbsp, value);
+}
+
+static inline void rbsp_sev(struct rbsp *rbsp, int *value)
+{
+ if (rbsp->error)
+ return;
+ rbsp->error = rbsp->ops->rbsp_sev(rbsp, value);
+}
+
+static void nal_h264_rbsp_trailing_bits(struct rbsp *rbsp)
+{
+ unsigned int rbsp_stop_one_bit = 1;
+ unsigned int rbsp_alignment_zero_bit = 0;
+
+ rbsp_bit(rbsp, &rbsp_stop_one_bit);
+ rbsp_bits(rbsp, round_up(rbsp->pos, 8) - rbsp->pos,
+ &rbsp_alignment_zero_bit);
+}
+
+static void nal_h264_write_start_code_prefix(struct rbsp *rbsp)
+{
+ u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
+ int i = 4;
+
+ if (DIV_ROUND_UP(rbsp->pos, 8) + i > rbsp->size) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ p[0] = 0x00;
+ p[1] = 0x00;
+ p[2] = 0x00;
+ p[3] = 0x01;
+
+ rbsp->pos += i * 8;
+}
+
+static void nal_h264_read_start_code_prefix(struct rbsp *rbsp)
+{
+ u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
+ int i = 4;
+
+ if (DIV_ROUND_UP(rbsp->pos, 8) + i > rbsp->size) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ if (p[0] != 0x00 || p[1] != 0x00 || p[2] != 0x00 || p[3] != 0x01) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ rbsp->pos += i * 8;
+}
+
+static void nal_h264_write_filler_data(struct rbsp *rbsp)
+{
+ u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
+ int i;
+
+ /* Keep 1 byte extra for terminating the NAL unit */
+ i = rbsp->size - DIV_ROUND_UP(rbsp->pos, 8) - 1;
+ memset(p, 0xff, i);
+ rbsp->pos += i * 8;
+}
+
+static void nal_h264_read_filler_data(struct rbsp *rbsp)
+{
+ u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
+
+ while (*p == 0xff) {
+ if (DIV_ROUND_UP(rbsp->pos, 8) > rbsp->size) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ p++;
+ rbsp->pos += 8;
+ }
+}
+
+static void nal_h264_rbsp_hrd_parameters(struct rbsp *rbsp,
+ struct nal_h264_hrd_parameters *hrd)
+{
+ unsigned int i;
+
+ if (!hrd) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ rbsp_uev(rbsp, &hrd->cpb_cnt_minus1);
+ rbsp_bits(rbsp, 4, &hrd->bit_rate_scale);
+ rbsp_bits(rbsp, 4, &hrd->cpb_size_scale);
+
+ for (i = 0; i <= hrd->cpb_cnt_minus1; i++) {
+ rbsp_uev(rbsp, &hrd->bit_rate_value_minus1[i]);
+ rbsp_uev(rbsp, &hrd->cpb_size_value_minus1[i]);
+ rbsp_bit(rbsp, &hrd->cbr_flag[i]);
+ }
+
+ rbsp_bits(rbsp, 5, &hrd->initial_cpb_removal_delay_length_minus1);
+ rbsp_bits(rbsp, 5, &hrd->cpb_removal_delay_length_minus1);
+ rbsp_bits(rbsp, 5, &hrd->dpb_output_delay_length_minus1);
+ rbsp_bits(rbsp, 5, &hrd->time_offset_length);
+}
+
+static void nal_h264_rbsp_vui_parameters(struct rbsp *rbsp,
+ struct nal_h264_vui_parameters *vui)
+{
+ if (!vui) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ rbsp_bit(rbsp, &vui->aspect_ratio_info_present_flag);
+ if (vui->aspect_ratio_info_present_flag) {
+ rbsp_bits(rbsp, 8, &vui->aspect_ratio_idc);
+ if (vui->aspect_ratio_idc == 255) {
+ rbsp_bits(rbsp, 16, &vui->sar_width);
+ rbsp_bits(rbsp, 16, &vui->sar_height);
+ }
+ }
+
+ rbsp_bit(rbsp, &vui->overscan_info_present_flag);
+ if (vui->overscan_info_present_flag)
+ rbsp_bit(rbsp, &vui->overscan_appropriate_flag);
+
+ rbsp_bit(rbsp, &vui->video_signal_type_present_flag);
+ if (vui->video_signal_type_present_flag) {
+ rbsp_bits(rbsp, 3, &vui->video_format);
+ rbsp_bit(rbsp, &vui->video_full_range_flag);
+
+ rbsp_bit(rbsp, &vui->colour_description_present_flag);
+ if (vui->colour_description_present_flag) {
+ rbsp_bits(rbsp, 8, &vui->colour_primaries);
+ rbsp_bits(rbsp, 8, &vui->transfer_characteristics);
+ rbsp_bits(rbsp, 8, &vui->matrix_coefficients);
+ }
+ }
+
+ rbsp_bit(rbsp, &vui->chroma_loc_info_present_flag);
+ if (vui->chroma_loc_info_present_flag) {
+ rbsp_uev(rbsp, &vui->chroma_sample_loc_type_top_field);
+ rbsp_uev(rbsp, &vui->chroma_sample_loc_type_bottom_field);
+ }
+
+ rbsp_bit(rbsp, &vui->timing_info_present_flag);
+ if (vui->timing_info_present_flag) {
+ rbsp_bits(rbsp, 32, &vui->num_units_in_tick);
+ rbsp_bits(rbsp, 32, &vui->time_scale);
+ rbsp_bit(rbsp, &vui->fixed_frame_rate_flag);
+ }
+
+ rbsp_bit(rbsp, &vui->nal_hrd_parameters_present_flag);
+ if (vui->nal_hrd_parameters_present_flag)
+ nal_h264_rbsp_hrd_parameters(rbsp, &vui->nal_hrd_parameters);
+
+ rbsp_bit(rbsp, &vui->vcl_hrd_parameters_present_flag);
+ if (vui->vcl_hrd_parameters_present_flag)
+ nal_h264_rbsp_hrd_parameters(rbsp, &vui->vcl_hrd_parameters);
+
+ if (vui->nal_hrd_parameters_present_flag ||
+ vui->vcl_hrd_parameters_present_flag)
+ rbsp_bit(rbsp, &vui->low_delay_hrd_flag);
+
+ rbsp_bit(rbsp, &vui->pic_struct_present_flag);
+
+ rbsp_bit(rbsp, &vui->bitstream_restriction_flag);
+ if (vui->bitstream_restriction_flag) {
+ rbsp_bit(rbsp, &vui->motion_vectors_over_pic_boundaries_flag);
+ rbsp_uev(rbsp, &vui->max_bytes_per_pic_denom);
+ rbsp_uev(rbsp, &vui->max_bits_per_mb_denom);
+ rbsp_uev(rbsp, &vui->log2_max_mv_length_horizontal);
+ rbsp_uev(rbsp, &vui->log21_max_mv_length_vertical);
+ rbsp_uev(rbsp, &vui->max_num_reorder_frames);
+ rbsp_uev(rbsp, &vui->max_dec_frame_buffering);
+ }
+}
+
+static void nal_h264_rbsp_sps(struct rbsp *rbsp, struct nal_h264_sps *sps)
+{
+ unsigned int i;
+
+ if (!sps) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ rbsp_bits(rbsp, 8, &sps->profile_idc);
+ rbsp_bit(rbsp, &sps->constraint_set0_flag);
+ rbsp_bit(rbsp, &sps->constraint_set1_flag);
+ rbsp_bit(rbsp, &sps->constraint_set2_flag);
+ rbsp_bit(rbsp, &sps->constraint_set3_flag);
+ rbsp_bit(rbsp, &sps->constraint_set4_flag);
+ rbsp_bit(rbsp, &sps->constraint_set5_flag);
+ rbsp_bits(rbsp, 2, &sps->reserved_zero_2bits);
+ rbsp_bits(rbsp, 8, &sps->level_idc);
+
+ rbsp_uev(rbsp, &sps->seq_parameter_set_id);
+
+ if (sps->profile_idc == 100 || sps->profile_idc == 110 ||
+ sps->profile_idc == 122 || sps->profile_idc == 244 ||
+ sps->profile_idc == 44 || sps->profile_idc == 83 ||
+ sps->profile_idc == 86 || sps->profile_idc == 118 ||
+ sps->profile_idc == 128 || sps->profile_idc == 138 ||
+ sps->profile_idc == 139 || sps->profile_idc == 134 ||
+ sps->profile_idc == 135) {
+ rbsp_uev(rbsp, &sps->chroma_format_idc);
+
+ if (sps->chroma_format_idc == 3)
+ rbsp_bit(rbsp, &sps->separate_colour_plane_flag);
+ rbsp_uev(rbsp, &sps->bit_depth_luma_minus8);
+ rbsp_uev(rbsp, &sps->bit_depth_chroma_minus8);
+ rbsp_bit(rbsp, &sps->qpprime_y_zero_transform_bypass_flag);
+ rbsp_bit(rbsp, &sps->seq_scaling_matrix_present_flag);
+ if (sps->seq_scaling_matrix_present_flag)
+ rbsp->error = -EINVAL;
+ }
+
+ rbsp_uev(rbsp, &sps->log2_max_frame_num_minus4);
+
+ rbsp_uev(rbsp, &sps->pic_order_cnt_type);
+ switch (sps->pic_order_cnt_type) {
+ case 0:
+ rbsp_uev(rbsp, &sps->log2_max_pic_order_cnt_lsb_minus4);
+ break;
+ case 1:
+ rbsp_bit(rbsp, &sps->delta_pic_order_always_zero_flag);
+ rbsp_sev(rbsp, &sps->offset_for_non_ref_pic);
+ rbsp_sev(rbsp, &sps->offset_for_top_to_bottom_field);
+
+ rbsp_uev(rbsp, &sps->num_ref_frames_in_pic_order_cnt_cycle);
+ for (i = 0; i < sps->num_ref_frames_in_pic_order_cnt_cycle; i++)
+ rbsp_sev(rbsp, &sps->offset_for_ref_frame[i]);
+ break;
+ default:
+ rbsp->error = -EINVAL;
+ break;
+ }
+
+ rbsp_uev(rbsp, &sps->max_num_ref_frames);
+ rbsp_bit(rbsp, &sps->gaps_in_frame_num_value_allowed_flag);
+ rbsp_uev(rbsp, &sps->pic_width_in_mbs_minus1);
+ rbsp_uev(rbsp, &sps->pic_height_in_map_units_minus1);
+
+ rbsp_bit(rbsp, &sps->frame_mbs_only_flag);
+ if (!sps->frame_mbs_only_flag)
+ rbsp_bit(rbsp, &sps->mb_adaptive_frame_field_flag);
+
+ rbsp_bit(rbsp, &sps->direct_8x8_inference_flag);
+
+ rbsp_bit(rbsp, &sps->frame_cropping_flag);
+ if (sps->frame_cropping_flag) {
+ rbsp_uev(rbsp, &sps->crop_left);
+ rbsp_uev(rbsp, &sps->crop_right);
+ rbsp_uev(rbsp, &sps->crop_top);
+ rbsp_uev(rbsp, &sps->crop_bottom);
+ }
+
+ rbsp_bit(rbsp, &sps->vui_parameters_present_flag);
+ if (sps->vui_parameters_present_flag)
+ nal_h264_rbsp_vui_parameters(rbsp, &sps->vui);
+}
+
+static void nal_h264_rbsp_pps(struct rbsp *rbsp, struct nal_h264_pps *pps)
+{
+ int i;
+
+ rbsp_uev(rbsp, &pps->pic_parameter_set_id);
+ rbsp_uev(rbsp, &pps->seq_parameter_set_id);
+ rbsp_bit(rbsp, &pps->entropy_coding_mode_flag);
+ rbsp_bit(rbsp, &pps->bottom_field_pic_order_in_frame_present_flag);
+ rbsp_uev(rbsp, &pps->num_slice_groups_minus1);
+ if (pps->num_slice_groups_minus1 > 0) {
+ rbsp_uev(rbsp, &pps->slice_group_map_type);
+ switch (pps->slice_group_map_type) {
+ case 0:
+ for (i = 0; i < pps->num_slice_groups_minus1; i++)
+ rbsp_uev(rbsp, &pps->run_length_minus1[i]);
+ break;
+ case 2:
+ for (i = 0; i < pps->num_slice_groups_minus1; i++) {
+ rbsp_uev(rbsp, &pps->top_left[i]);
+ rbsp_uev(rbsp, &pps->bottom_right[i]);
+ }
+ break;
+ case 3: case 4: case 5:
+ rbsp_bit(rbsp, &pps->slice_group_change_direction_flag);
+ rbsp_uev(rbsp, &pps->slice_group_change_rate_minus1);
+ break;
+ case 6:
+ rbsp_uev(rbsp, &pps->pic_size_in_map_units_minus1);
+ for (i = 0; i < pps->pic_size_in_map_units_minus1; i++)
+ rbsp_bits(rbsp,
+ order_base_2(pps->num_slice_groups_minus1 + 1),
+ &pps->slice_group_id[i]);
+ break;
+ default:
+ break;
+ }
+ }
+ rbsp_uev(rbsp, &pps->num_ref_idx_l0_default_active_minus1);
+ rbsp_uev(rbsp, &pps->num_ref_idx_l1_default_active_minus1);
+ rbsp_bit(rbsp, &pps->weighted_pred_flag);
+ rbsp_bits(rbsp, 2, &pps->weighted_bipred_idc);
+ rbsp_sev(rbsp, &pps->pic_init_qp_minus26);
+ rbsp_sev(rbsp, &pps->pic_init_qs_minus26);
+ rbsp_sev(rbsp, &pps->chroma_qp_index_offset);
+ rbsp_bit(rbsp, &pps->deblocking_filter_control_present_flag);
+ rbsp_bit(rbsp, &pps->constrained_intra_pred_flag);
+ rbsp_bit(rbsp, &pps->redundant_pic_cnt_present_flag);
+ if (/* more_rbsp_data() */ false) {
+ rbsp_bit(rbsp, &pps->transform_8x8_mode_flag);
+ rbsp_bit(rbsp, &pps->pic_scaling_matrix_present_flag);
+ if (pps->pic_scaling_matrix_present_flag)
+ rbsp->error = -EINVAL;
+ rbsp_sev(rbsp, &pps->second_chroma_qp_index_offset);
+ }
+}
+
+/**
+ * nal_h264_write_sps() - Write SPS NAL unit into RBSP format
+ * @dev: device pointer
+ * @dest: the buffer that is filled with RBSP data
+ * @n: maximum size of @dest in bytes
+ * @sps: &struct nal_h264_sps to convert to RBSP
+ *
+ * Convert @sps to RBSP data and write it into @dest.
+ *
+ * The size of the SPS NAL unit is not known in advance and this function will
+ * fail, if @dest does not hold sufficient space for the SPS NAL unit.
+ *
+ * Return: number of bytes written to @dest or negative error code
+ */
+ssize_t nal_h264_write_sps(const struct device *dev,
+ void *dest, size_t n, struct nal_h264_sps *sps)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit = 0;
+ unsigned int nal_ref_idc = 0;
+ unsigned int nal_unit_type = SEQUENCE_PARAMETER_SET;
+
+ if (!dest)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, dest, n, &write);
+
+ nal_h264_write_start_code_prefix(&rbsp);
+
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 2, &nal_ref_idc);
+ rbsp_bits(&rbsp, 5, &nal_unit_type);
+
+ nal_h264_rbsp_sps(&rbsp, sps);
+
+ nal_h264_rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_h264_write_sps);
+
+/**
+ * nal_h264_read_sps() - Read SPS NAL unit from RBSP format
+ * @dev: device pointer
+ * @sps: the &struct nal_h264_sps to fill from the RBSP data
+ * @src: the buffer that contains the RBSP data
+ * @n: size of @src in bytes
+ *
+ * Read RBSP data from @src and use it to fill @sps.
+ *
+ * Return: number of bytes read from @src or negative error code
+ */
+ssize_t nal_h264_read_sps(const struct device *dev,
+ struct nal_h264_sps *sps, void *src, size_t n)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit;
+ unsigned int nal_ref_idc;
+ unsigned int nal_unit_type;
+
+ if (!src)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, src, n, &read);
+
+ nal_h264_read_start_code_prefix(&rbsp);
+
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 2, &nal_ref_idc);
+ rbsp_bits(&rbsp, 5, &nal_unit_type);
+
+ if (rbsp.error ||
+ forbidden_zero_bit != 0 ||
+ nal_ref_idc != 0 ||
+ nal_unit_type != SEQUENCE_PARAMETER_SET)
+ return -EINVAL;
+
+ nal_h264_rbsp_sps(&rbsp, sps);
+
+ nal_h264_rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_h264_read_sps);
+
+/**
+ * nal_h264_write_pps() - Write PPS NAL unit into RBSP format
+ * @dev: device pointer
+ * @dest: the buffer that is filled with RBSP data
+ * @n: maximum size of @dest in bytes
+ * @pps: &struct nal_h264_pps to convert to RBSP
+ *
+ * Convert @pps to RBSP data and write it into @dest.
+ *
+ * The size of the PPS NAL unit is not known in advance and this function will
+ * fail, if @dest does not hold sufficient space for the PPS NAL unit.
+ *
+ * Return: number of bytes written to @dest or negative error code
+ */
+ssize_t nal_h264_write_pps(const struct device *dev,
+ void *dest, size_t n, struct nal_h264_pps *pps)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit = 0;
+ unsigned int nal_ref_idc = 0;
+ unsigned int nal_unit_type = PICTURE_PARAMETER_SET;
+
+ if (!dest)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, dest, n, &write);
+
+ nal_h264_write_start_code_prefix(&rbsp);
+
+ /* NAL unit header */
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 2, &nal_ref_idc);
+ rbsp_bits(&rbsp, 5, &nal_unit_type);
+
+ nal_h264_rbsp_pps(&rbsp, pps);
+
+ nal_h264_rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_h264_write_pps);
+
+/**
+ * nal_h264_read_pps() - Read PPS NAL unit from RBSP format
+ * @dev: device pointer
+ * @pps: the &struct nal_h264_pps to fill from the RBSP data
+ * @src: the buffer that contains the RBSP data
+ * @n: size of @src in bytes
+ *
+ * Read RBSP data from @src and use it to fill @pps.
+ *
+ * Return: number of bytes read from @src or negative error code
+ */
+ssize_t nal_h264_read_pps(const struct device *dev,
+ struct nal_h264_pps *pps, void *src, size_t n)
+{
+ struct rbsp rbsp;
+
+ if (!src)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, src, n, &read);
+
+ nal_h264_read_start_code_prefix(&rbsp);
+
+ /* NAL unit header */
+ rbsp.pos += 8;
+
+ nal_h264_rbsp_pps(&rbsp, pps);
+
+ nal_h264_rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_h264_read_pps);
+
+/**
+ * nal_h264_write_filler() - Write filler data RBSP
+ * @dev: device pointer
+ * @dest: buffer to fill with filler data
+ * @n: size of the buffer to fill with filler data
+ *
+ * Write a filler data RBSP to @dest with a size of @n bytes and return the
+ * number of written filler data bytes.
+ *
+ * Use this function to generate dummy data in an RBSP data stream that can be
+ * safely ignored by h264 decoders.
+ *
+ * The RBSP format of the filler data is specified in Rec. ITU-T H.264
+ * (04/2017) 7.3.2.7 Filler data RBSP syntax.
+ *
+ * Return: number of filler data bytes (including marker) or negative error
+ */
+ssize_t nal_h264_write_filler(const struct device *dev, void *dest, size_t n)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit = 0;
+ unsigned int nal_ref_idc = 0;
+ unsigned int nal_unit_type = FILLER_DATA;
+
+ if (!dest)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, dest, n, &write);
+
+ nal_h264_write_start_code_prefix(&rbsp);
+
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 2, &nal_ref_idc);
+ rbsp_bits(&rbsp, 5, &nal_unit_type);
+
+ nal_h264_write_filler_data(&rbsp);
+
+ nal_h264_rbsp_trailing_bits(&rbsp);
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_h264_write_filler);
+
+/**
+ * nal_h264_read_filler() - Read filler data RBSP
+ * @dev: device pointer
+ * @src: buffer with RBSP data that is read
+ * @n: maximum size of src that shall be read
+ *
+ * Read a filler data RBSP from @src up to a maximum size of @n bytes and
+ * return the size of the filler data in bytes including the marker.
+ *
+ * This function is used to parse filler data and skip the respective bytes in
+ * the RBSP data.
+ *
+ * The RBSP format of the filler data is specified in Rec. ITU-T H.264
+ * (04/2017) 7.3.2.7 Filler data RBSP syntax.
+ *
+ * Return: number of filler data bytes (including marker) or negative error
+ */
+ssize_t nal_h264_read_filler(const struct device *dev, void *src, size_t n)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit;
+ unsigned int nal_ref_idc;
+ unsigned int nal_unit_type;
+
+ if (!src)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, src, n, &read);
+
+ nal_h264_read_start_code_prefix(&rbsp);
+
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 2, &nal_ref_idc);
+ rbsp_bits(&rbsp, 5, &nal_unit_type);
+
+ if (rbsp.error)
+ return rbsp.error;
+ if (forbidden_zero_bit != 0 ||
+ nal_ref_idc != 0 ||
+ nal_unit_type != FILLER_DATA)
+ return -EINVAL;
+
+ nal_h264_read_filler_data(&rbsp);
+ nal_h264_rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_h264_read_filler);
diff --git a/drivers/staging/media/allegro-dvt/nal-h264.h b/drivers/staging/media/allegro-dvt/nal-h264.h
new file mode 100644
index 000000000000..2ba7cbced7a5
--- /dev/null
+++ b/drivers/staging/media/allegro-dvt/nal-h264.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ *
+ * Convert NAL units between raw byte sequence payloads (RBSP) and C structs.
+ */
+
+#ifndef __NAL_H264_H__
+#define __NAL_H264_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/**
+ * struct nal_h264_hdr_parameters - HDR parameters
+ *
+ * C struct representation of the sequence parameter set NAL unit as defined by
+ * Rec. ITU-T H.264 (04/2017) E.1.2 HRD parameters syntax.
+ */
+struct nal_h264_hrd_parameters {
+ unsigned int cpb_cnt_minus1;
+ unsigned int bit_rate_scale;
+ unsigned int cpb_size_scale;
+ struct {
+ int bit_rate_value_minus1[16];
+ int cpb_size_value_minus1[16];
+ unsigned int cbr_flag[16];
+ };
+ unsigned int initial_cpb_removal_delay_length_minus1;
+ unsigned int cpb_removal_delay_length_minus1;
+ unsigned int dpb_output_delay_length_minus1;
+ unsigned int time_offset_length;
+};
+
+/**
+ * struct nal_h264_vui_parameters - VUI parameters
+ *
+ * C struct representation of the VUI parameters as defined by Rec. ITU-T
+ * H.264 (04/2017) E.1.1 VUI parameters syntax.
+ */
+struct nal_h264_vui_parameters {
+ unsigned int aspect_ratio_info_present_flag;
+ struct {
+ unsigned int aspect_ratio_idc;
+ unsigned int sar_width;
+ unsigned int sar_height;
+ };
+ unsigned int overscan_info_present_flag;
+ unsigned int overscan_appropriate_flag;
+ unsigned int video_signal_type_present_flag;
+ struct {
+ unsigned int video_format;
+ unsigned int video_full_range_flag;
+ unsigned int colour_description_present_flag;
+ struct {
+ unsigned int colour_primaries;
+ unsigned int transfer_characteristics;
+ unsigned int matrix_coefficients;
+ };
+ };
+ unsigned int chroma_loc_info_present_flag;
+ struct {
+ unsigned int chroma_sample_loc_type_top_field;
+ unsigned int chroma_sample_loc_type_bottom_field;
+ };
+ unsigned int timing_info_present_flag;
+ struct {
+ unsigned int num_units_in_tick;
+ unsigned int time_scale;
+ unsigned int fixed_frame_rate_flag;
+ };
+ unsigned int nal_hrd_parameters_present_flag;
+ struct nal_h264_hrd_parameters nal_hrd_parameters;
+ unsigned int vcl_hrd_parameters_present_flag;
+ struct nal_h264_hrd_parameters vcl_hrd_parameters;
+ unsigned int low_delay_hrd_flag;
+ unsigned int pic_struct_present_flag;
+ unsigned int bitstream_restriction_flag;
+ struct {
+ unsigned int motion_vectors_over_pic_boundaries_flag;
+ unsigned int max_bytes_per_pic_denom;
+ unsigned int max_bits_per_mb_denom;
+ unsigned int log2_max_mv_length_horizontal;
+ unsigned int log21_max_mv_length_vertical;
+ unsigned int max_num_reorder_frames;
+ unsigned int max_dec_frame_buffering;
+ };
+};
+
+/**
+ * struct nal_h264_sps - Sequence parameter set
+ *
+ * C struct representation of the sequence parameter set NAL unit as defined by
+ * Rec. ITU-T H.264 (04/2017) 7.3.2.1.1 Sequence parameter set data syntax.
+ */
+struct nal_h264_sps {
+ unsigned int profile_idc;
+ unsigned int constraint_set0_flag;
+ unsigned int constraint_set1_flag;
+ unsigned int constraint_set2_flag;
+ unsigned int constraint_set3_flag;
+ unsigned int constraint_set4_flag;
+ unsigned int constraint_set5_flag;
+ unsigned int reserved_zero_2bits;
+ unsigned int level_idc;
+ unsigned int seq_parameter_set_id;
+ struct {
+ unsigned int chroma_format_idc;
+ unsigned int separate_colour_plane_flag;
+ unsigned int bit_depth_luma_minus8;
+ unsigned int bit_depth_chroma_minus8;
+ unsigned int qpprime_y_zero_transform_bypass_flag;
+ unsigned int seq_scaling_matrix_present_flag;
+ };
+ unsigned int log2_max_frame_num_minus4;
+ unsigned int pic_order_cnt_type;
+ union {
+ unsigned int log2_max_pic_order_cnt_lsb_minus4;
+ struct {
+ unsigned int delta_pic_order_always_zero_flag;
+ int offset_for_non_ref_pic;
+ int offset_for_top_to_bottom_field;
+ unsigned int num_ref_frames_in_pic_order_cnt_cycle;
+ int offset_for_ref_frame[255];
+ };
+ };
+ unsigned int max_num_ref_frames;
+ unsigned int gaps_in_frame_num_value_allowed_flag;
+ unsigned int pic_width_in_mbs_minus1;
+ unsigned int pic_height_in_map_units_minus1;
+ unsigned int frame_mbs_only_flag;
+ unsigned int mb_adaptive_frame_field_flag;
+ unsigned int direct_8x8_inference_flag;
+ unsigned int frame_cropping_flag;
+ struct {
+ unsigned int crop_left;
+ unsigned int crop_right;
+ unsigned int crop_top;
+ unsigned int crop_bottom;
+ };
+ unsigned int vui_parameters_present_flag;
+ struct nal_h264_vui_parameters vui;
+};
+
+/**
+ * struct nal_h264_pps - Picture parameter set
+ *
+ * C struct representation of the picture parameter set NAL unit as defined by
+ * Rec. ITU-T H.264 (04/2017) 7.3.2.2 Picture parameter set RBSP syntax.
+ */
+struct nal_h264_pps {
+ unsigned int pic_parameter_set_id;
+ unsigned int seq_parameter_set_id;
+ unsigned int entropy_coding_mode_flag;
+ unsigned int bottom_field_pic_order_in_frame_present_flag;
+ unsigned int num_slice_groups_minus1;
+ unsigned int slice_group_map_type;
+ union {
+ unsigned int run_length_minus1[8];
+ struct {
+ unsigned int top_left[8];
+ unsigned int bottom_right[8];
+ };
+ struct {
+ unsigned int slice_group_change_direction_flag;
+ unsigned int slice_group_change_rate_minus1;
+ };
+ struct {
+ unsigned int pic_size_in_map_units_minus1;
+ unsigned int slice_group_id[8];
+ };
+ };
+ unsigned int num_ref_idx_l0_default_active_minus1;
+ unsigned int num_ref_idx_l1_default_active_minus1;
+ unsigned int weighted_pred_flag;
+ unsigned int weighted_bipred_idc;
+ int pic_init_qp_minus26;
+ int pic_init_qs_minus26;
+ int chroma_qp_index_offset;
+ unsigned int deblocking_filter_control_present_flag;
+ unsigned int constrained_intra_pred_flag;
+ unsigned int redundant_pic_cnt_present_flag;
+ struct {
+ unsigned int transform_8x8_mode_flag;
+ unsigned int pic_scaling_matrix_present_flag;
+ int second_chroma_qp_index_offset;
+ };
+};
+
+int nal_h264_profile_from_v4l2(enum v4l2_mpeg_video_h264_profile profile);
+int nal_h264_level_from_v4l2(enum v4l2_mpeg_video_h264_level level);
+
+ssize_t nal_h264_write_sps(const struct device *dev,
+ void *dest, size_t n, struct nal_h264_sps *sps);
+ssize_t nal_h264_read_sps(const struct device *dev,
+ struct nal_h264_sps *sps, void *src, size_t n);
+void nal_h264_print_sps(const struct device *dev, struct nal_h264_sps *sps);
+
+ssize_t nal_h264_write_pps(const struct device *dev,
+ void *dest, size_t n, struct nal_h264_pps *pps);
+ssize_t nal_h264_read_pps(const struct device *dev,
+ struct nal_h264_pps *pps, void *src, size_t n);
+void nal_h264_print_pps(const struct device *dev, struct nal_h264_pps *pps);
+
+ssize_t nal_h264_write_filler(const struct device *dev, void *dest, size_t n);
+ssize_t nal_h264_read_filler(const struct device *dev, void *src, size_t n);
+
+#endif /* __NAL_H264_H__ */
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 09903ffb13ba..2c60a1fb6350 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -2310,11 +2310,6 @@ static int bcm2048_vidioc_querycap(struct file *file, void *priv,
strscpy(capability->card, BCM2048_DRIVER_CARD,
sizeof(capability->card));
snprintf(capability->bus_info, 32, "I2C: 0x%X", bdev->client->addr);
- capability->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
- V4L2_CAP_HW_FREQ_SEEK;
- capability->capabilities = capability->device_caps |
- V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -2570,6 +2565,8 @@ static const struct video_device bcm2048_viddev_template = {
.name = BCM2048_DRIVER_NAME,
.release = video_device_release_empty,
.ioctl_ops = &bcm2048_ioctl_ops,
+ .device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
+ V4L2_CAP_HW_FREQ_SEEK,
};
/*
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
index 30e2edc0cec5..52397ad0e3e2 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
@@ -1251,10 +1251,10 @@ static int ipipe_s_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
unsigned int i;
int rval = 0;
+ struct ipipe_module_params *params;
for (i = 0; i < ARRAY_SIZE(ipipe_modules); i++) {
const struct ipipe_module_if *module_if;
- struct ipipe_module_params *params;
void *from, *to;
size_t size;
@@ -1265,25 +1265,30 @@ static int ipipe_s_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
from = *(void **)((void *)cfg + module_if->config_offset);
params = kmalloc(sizeof(*params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
to = (void *)params + module_if->param_offset;
size = module_if->param_size;
if (to && from && size) {
if (copy_from_user(to, (void __user *)from, size)) {
rval = -EFAULT;
- break;
+ goto error_free;
}
rval = module_if->set(ipipe, to);
if (rval)
- goto error;
+ goto error_free;
} else if (to && !from && size) {
rval = module_if->set(ipipe, NULL);
if (rval)
- goto error;
+ goto error_free;
}
kfree(params);
}
-error:
+ return rval;
+
+error_free:
+ kfree(params);
return rval;
}
@@ -1772,7 +1777,7 @@ vpfe_ipipe_init(struct vpfe_ipipe_device *ipipe, struct platform_device *pdev)
struct media_pad *pads = &ipipe->pads[0];
struct v4l2_subdev *sd = &ipipe->subdev;
struct media_entity *me = &sd->entity;
- struct resource *res, *memres;
+ struct resource *res, *res2, *memres;
res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
if (!res)
@@ -1786,11 +1791,11 @@ vpfe_ipipe_init(struct vpfe_ipipe_device *ipipe, struct platform_device *pdev)
if (!ipipe->base_addr)
goto error_release;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 6);
- if (!res)
+ res2 = platform_get_resource(pdev, IORESOURCE_MEM, 6);
+ if (!res2)
goto error_unmap;
- ipipe->isp5_base_addr = ioremap_nocache(res->start,
- resource_size(res));
+ ipipe->isp5_base_addr = ioremap_nocache(res2->start,
+ resource_size(res2));
if (!ipipe->isp5_base_addr)
goto error_unmap;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index 46fd8184fc77..05a997f7aa5d 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -816,7 +816,7 @@ isif_config_dfc(struct vpfe_isif_device *isif, struct vpfe_isif_dfc *vdfc)
/* Correct whole line or partial */
if (vdfc->corr_whole_line)
- val |= 1 << ISIF_VDFC_CORR_WHOLE_LN_SHIFT;
+ val |= BIT(ISIF_VDFC_CORR_WHOLE_LN_SHIFT);
/* level shift value */
val |= (vdfc->def_level_shift & ISIF_VDFC_LEVEL_SHFT_MASK) <<
@@ -844,7 +844,7 @@ isif_config_dfc(struct vpfe_isif_device *isif, struct vpfe_isif_dfc *vdfc)
val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
/* set DFCMARST and set DFCMWR */
- val |= 1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT;
+ val |= BIT(ISIF_DFCMEMCTL_DFCMARST_SHIFT);
val |= 1;
isif_write(isif->isif_cfg.base_addr, val, DFCMEMCTL);
@@ -875,7 +875,7 @@ isif_config_dfc(struct vpfe_isif_device *isif, struct vpfe_isif_dfc *vdfc)
}
val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
/* clear DFCMARST and set DFCMWR */
- val &= ~(1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT);
+ val &= ~BIT(ISIF_DFCMEMCTL_DFCMARST_SHIFT);
val |= 1;
isif_write(isif->isif_cfg.base_addr, val, DFCMEMCTL);
@@ -1135,7 +1135,7 @@ static int isif_config_raw(struct v4l2_subdev *sd, int mode)
isif_write(isif->isif_cfg.base_addr, val, CGAMMAWD);
/* Configure DPCM compression settings */
if (params->v4l2_pix_fmt == V4L2_PIX_FMT_SGRBG10DPCM8) {
- val = 1 << ISIF_DPCM_EN_SHIFT;
+ val = BIT(ISIF_DPCM_EN_SHIFT);
val |= (params->dpcm_predictor &
ISIF_DPCM_PREDICTOR_MASK) << ISIF_DPCM_PREDICTOR_SHIFT;
}
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
index 57b93605bc58..9dc28ffe38d5 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
@@ -158,7 +158,7 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
{
struct vpfe_device *vpfe_dev = dev_id;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_isr\n");
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "%s\n", __func__);
vpfe_isif_buffer_isr(&vpfe_dev->vpfe_isif);
vpfe_resizer_buffer_isr(&vpfe_dev->vpfe_resizer);
return IRQ_HANDLED;
@@ -169,7 +169,7 @@ static irqreturn_t vpfe_vdint1_isr(int irq, void *dev_id)
{
struct vpfe_device *vpfe_dev = dev_id;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_vdint1_isr\n");
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "%s\n", __func__);
vpfe_isif_vidint1_isr(&vpfe_dev->vpfe_isif);
return IRQ_HANDLED;
}
@@ -179,7 +179,7 @@ static irqreturn_t vpfe_imp_dma_isr(int irq, void *dev_id)
{
struct vpfe_device *vpfe_dev = dev_id;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_imp_dma_isr\n");
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "%s\n", __func__);
vpfe_ipipeif_ss_buffer_isr(&vpfe_dev->vpfe_ipipeif);
vpfe_resizer_dma_isr(&vpfe_dev->vpfe_resizer);
return IRQ_HANDLED;
@@ -691,7 +691,7 @@ static int vpfe_remove(struct platform_device *pdev)
{
struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev);
- v4l2_info(pdev->dev.driver, "vpfe_remove\n");
+ v4l2_info(pdev->dev.driver, "%s\n", __func__);
kzfree(vpfe_dev->sd);
vpfe_detach_irq(vpfe_dev);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 510202a3b091..ab6bc452d9f6 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -419,6 +419,9 @@ static int vpfe_open(struct file *file)
/* If decoder is not initialized. initialize it */
if (!video->initialized && vpfe_update_pipe_state(video)) {
mutex_unlock(&video->lock);
+ v4l2_fh_del(&handle->vfh);
+ v4l2_fh_exit(&handle->vfh);
+ kfree(handle);
return -ENODEV;
}
/* Increment device users counter */
@@ -609,10 +612,6 @@ static int vpfe_querycap(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- else
- cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
strscpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
@@ -1625,6 +1624,11 @@ int vpfe_video_register(struct vpfe_video_device *video,
video->video_dev.v4l2_dev = vdev;
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ video->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE;
+ else
+ video->video_dev.device_caps = V4L2_CAP_VIDEO_OUTPUT;
+ video->video_dev.device_caps |= V4L2_CAP_STREAMING;
ret = video_register_device(&video->video_dev, VFL_TYPE_GRABBER, -1);
if (ret < 0)
pr_err("%s: could not register video device (%d)\n",
diff --git a/drivers/staging/media/hantro/Kconfig b/drivers/staging/media/hantro/Kconfig
new file mode 100644
index 000000000000..be133bbaa68a
--- /dev/null
+++ b/drivers/staging/media/hantro/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_HANTRO
+ tristate "Hantro VPU driver"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on MEDIA_CONTROLLER_REQUEST_API
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the Hantro IP based Video Processing Unit present on
+ Rockchip SoC, which accelerates video and image encoding and
+ decoding.
+ To compile this driver as a module, choose M here: the module
+ will be called hantro-vpu.
+
+config VIDEO_HANTRO_ROCKCHIP
+ bool "Hantro VPU Rockchip support"
+ depends on VIDEO_HANTRO
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ default y
+ help
+ Enable support for RK3288 and RK3399 SoCs.
diff --git a/drivers/staging/media/hantro/Makefile b/drivers/staging/media/hantro/Makefile
new file mode 100644
index 000000000000..1584acdbf4a3
--- /dev/null
+++ b/drivers/staging/media/hantro/Makefile
@@ -0,0 +1,15 @@
+obj-$(CONFIG_VIDEO_HANTRO) += hantro-vpu.o
+
+hantro-vpu-y += \
+ hantro_drv.o \
+ hantro_v4l2.o \
+ hantro_h1_jpeg_enc.o \
+ hantro_g1_mpeg2_dec.o \
+ rk3399_vpu_hw_jpeg_enc.o \
+ rk3399_vpu_hw_mpeg2_dec.o \
+ hantro_jpeg.o \
+ hantro_mpeg2.o
+
+hantro-vpu-$(CONFIG_VIDEO_HANTRO_ROCKCHIP) += \
+ rk3288_vpu_hw.o \
+ rk3399_vpu_hw.o
diff --git a/drivers/staging/media/rockchip/vpu/TODO b/drivers/staging/media/hantro/TODO
index fa0c94057007..fa0c94057007 100644
--- a/drivers/staging/media/rockchip/vpu/TODO
+++ b/drivers/staging/media/hantro/TODO
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h
new file mode 100644
index 000000000000..62dcca9ff19c
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro.h
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef HANTRO_H_
+#define HANTRO_H_
+
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/wait.h>
+#include <linux/clk.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "hantro_hw.h"
+
+#define MPEG2_MB_DIM 16
+#define MPEG2_MB_WIDTH(w) DIV_ROUND_UP(w, MPEG2_MB_DIM)
+#define MPEG2_MB_HEIGHT(h) DIV_ROUND_UP(h, MPEG2_MB_DIM)
+
+#define JPEG_MB_DIM 16
+#define JPEG_MB_WIDTH(w) DIV_ROUND_UP(w, JPEG_MB_DIM)
+#define JPEG_MB_HEIGHT(h) DIV_ROUND_UP(h, JPEG_MB_DIM)
+
+struct hantro_ctx;
+struct hantro_codec_ops;
+
+#define HANTRO_JPEG_ENCODER BIT(0)
+#define HANTRO_ENCODERS 0x0000ffff
+
+#define HANTRO_MPEG2_DECODER BIT(16)
+#define HANTRO_DECODERS 0xffff0000
+
+/**
+ * struct hantro_irq - irq handler and name
+ *
+ * @name: irq name for device tree lookup
+ * @handler: interrupt handler
+ */
+struct hantro_irq {
+ const char *name;
+ irqreturn_t (*handler)(int irq, void *priv);
+};
+
+/**
+ * struct hantro_variant - information about VPU hardware variant
+ *
+ * @enc_offset: Offset from VPU base to encoder registers.
+ * @dec_offset: Offset from VPU base to decoder registers.
+ * @enc_fmts: Encoder formats.
+ * @num_enc_fmts: Number of encoder formats.
+ * @dec_fmts: Decoder formats.
+ * @num_dec_fmts: Number of decoder formats.
+ * @codec: Supported codecs
+ * @codec_ops: Codec ops.
+ * @init: Initialize hardware.
+ * @runtime_resume: reenable hardware after power gating
+ * @irqs: array of irq names and interrupt handlers
+ * @num_irqs: number of irqs in the array
+ * @clk_names: array of clock names
+ * @num_clocks: number of clocks in the array
+ * @reg_names: array of register range names
+ * @num_regs: number of register range names in the array
+ */
+struct hantro_variant {
+ unsigned int enc_offset;
+ unsigned int dec_offset;
+ const struct hantro_fmt *enc_fmts;
+ unsigned int num_enc_fmts;
+ const struct hantro_fmt *dec_fmts;
+ unsigned int num_dec_fmts;
+ unsigned int codec;
+ const struct hantro_codec_ops *codec_ops;
+ int (*init)(struct hantro_dev *vpu);
+ int (*runtime_resume)(struct hantro_dev *vpu);
+ const struct hantro_irq *irqs;
+ int num_irqs;
+ const char * const *clk_names;
+ int num_clocks;
+ const char * const *reg_names;
+ int num_regs;
+};
+
+/**
+ * enum hantro_codec_mode - codec operating mode.
+ * @HANTRO_MODE_NONE: No operating mode. Used for RAW video formats.
+ * @HANTRO_MODE_JPEG_ENC: JPEG encoder.
+ * @HANTRO_MODE_MPEG2_DEC: MPEG-2 decoder.
+ */
+enum hantro_codec_mode {
+ HANTRO_MODE_NONE = -1,
+ HANTRO_MODE_JPEG_ENC,
+ HANTRO_MODE_MPEG2_DEC,
+};
+
+/*
+ * struct hantro_ctrl - helper type to declare supported controls
+ * @id: V4L2 control ID (V4L2_CID_xxx)
+ * @codec: codec id this control belong to (HANTRO_JPEG_ENCODER, etc.)
+ * @cfg: control configuration
+ */
+struct hantro_ctrl {
+ unsigned int id;
+ unsigned int codec;
+ struct v4l2_ctrl_config cfg;
+};
+
+/*
+ * struct hantro_func - Hantro VPU functionality
+ *
+ * @id: processing functionality ID (can be
+ * %MEDIA_ENT_F_PROC_VIDEO_ENCODER or
+ * %MEDIA_ENT_F_PROC_VIDEO_DECODER)
+ * @vdev: &struct video_device that exposes the encoder or
+ * decoder functionality
+ * @source_pad: &struct media_pad with the source pad.
+ * @sink: &struct media_entity pointer with the sink entity
+ * @sink_pad: &struct media_pad with the sink pad.
+ * @proc: &struct media_entity pointer with the M2M device itself.
+ * @proc_pads: &struct media_pad with the @proc pads.
+ * @intf_devnode: &struct media_intf devnode pointer with the interface
+ * with controls the M2M device.
+ *
+ * Contains everything needed to attach the video device to the media device.
+ */
+struct hantro_func {
+ unsigned int id;
+ struct video_device vdev;
+ struct media_pad source_pad;
+ struct media_entity sink;
+ struct media_pad sink_pad;
+ struct media_entity proc;
+ struct media_pad proc_pads[2];
+ struct media_intf_devnode *intf_devnode;
+};
+
+static inline struct hantro_func *
+hantro_vdev_to_func(struct video_device *vdev)
+{
+ return container_of(vdev, struct hantro_func, vdev);
+}
+
+/**
+ * struct hantro_dev - driver data
+ * @v4l2_dev: V4L2 device to register video devices for.
+ * @m2m_dev: mem2mem device associated to this device.
+ * @mdev: media device associated to this device.
+ * @encoder: encoder functionality.
+ * @decoder: decoder functionality.
+ * @pdev: Pointer to VPU platform device.
+ * @dev: Pointer to device for convenient logging using
+ * dev_ macros.
+ * @clocks: Array of clock handles.
+ * @reg_bases: Mapped addresses of VPU registers.
+ * @enc_base: Mapped address of VPU encoder register for convenience.
+ * @dec_base: Mapped address of VPU decoder register for convenience.
+ * @ctrl_base: Mapped address of VPU control block.
+ * @vpu_mutex: Mutex to synchronize V4L2 calls.
+ * @irqlock: Spinlock to synchronize access to data structures
+ * shared with interrupt handlers.
+ * @variant: Hardware variant-specific parameters.
+ * @watchdog_work: Delayed work for hardware timeout handling.
+ */
+struct hantro_dev {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct media_device mdev;
+ struct hantro_func *encoder;
+ struct hantro_func *decoder;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct clk_bulk_data *clocks;
+ void __iomem **reg_bases;
+ void __iomem *enc_base;
+ void __iomem *dec_base;
+ void __iomem *ctrl_base;
+
+ struct mutex vpu_mutex; /* video_device lock */
+ spinlock_t irqlock;
+ const struct hantro_variant *variant;
+ struct delayed_work watchdog_work;
+};
+
+/**
+ * struct hantro_ctx - Context (instance) private data.
+ *
+ * @dev: VPU driver data to which the context belongs.
+ * @fh: V4L2 file handler.
+ *
+ * @sequence_cap: Sequence counter for capture queue
+ * @sequence_out: Sequence counter for output queue
+ *
+ * @vpu_src_fmt: Descriptor of active source format.
+ * @src_fmt: V4L2 pixel format of active source format.
+ * @vpu_dst_fmt: Descriptor of active destination format.
+ * @dst_fmt: V4L2 pixel format of active destination format.
+ *
+ * @ctrl_handler: Control handler used to register controls.
+ * @jpeg_quality: User-specified JPEG compression quality.
+ *
+ * @buf_finish: Buffer finish. This depends on encoder or decoder
+ * context, and it's called right before
+ * calling v4l2_m2m_job_finish.
+ * @codec_ops: Set of operations related to codec mode.
+ * @jpeg_enc: JPEG-encoding context.
+ * @mpeg2_dec: MPEG-2-decoding context.
+ */
+struct hantro_ctx {
+ struct hantro_dev *dev;
+ struct v4l2_fh fh;
+
+ u32 sequence_cap;
+ u32 sequence_out;
+
+ const struct hantro_fmt *vpu_src_fmt;
+ struct v4l2_pix_format_mplane src_fmt;
+ const struct hantro_fmt *vpu_dst_fmt;
+ struct v4l2_pix_format_mplane dst_fmt;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ int jpeg_quality;
+
+ int (*buf_finish)(struct hantro_ctx *ctx,
+ struct vb2_buffer *buf,
+ unsigned int bytesused);
+
+ const struct hantro_codec_ops *codec_ops;
+
+ /* Specific for particular codec modes. */
+ union {
+ struct hantro_jpeg_enc_hw_ctx jpeg_enc;
+ struct hantro_mpeg2_dec_hw_ctx mpeg2_dec;
+ };
+};
+
+/**
+ * struct hantro_fmt - information about supported video formats.
+ * @name: Human readable name of the format.
+ * @fourcc: FourCC code of the format. See V4L2_PIX_FMT_*.
+ * @codec_mode: Codec mode related to this format. See
+ * enum hantro_codec_mode.
+ * @header_size: Optional header size. Currently used by JPEG encoder.
+ * @max_depth: Maximum depth, for bitstream formats
+ * @enc_fmt: Format identifier for encoder registers.
+ * @frmsize: Supported range of frame sizes (only for bitstream formats).
+ */
+struct hantro_fmt {
+ char *name;
+ u32 fourcc;
+ enum hantro_codec_mode codec_mode;
+ int header_size;
+ int max_depth;
+ enum hantro_enc_fmt enc_fmt;
+ struct v4l2_frmsize_stepwise frmsize;
+};
+
+/* Logging helpers */
+
+/**
+ * debug - Module parameter to control level of debugging messages.
+ *
+ * Level of debugging messages can be controlled by bits of
+ * module parameter called "debug". Meaning of particular
+ * bits is as follows:
+ *
+ * bit 0 - global information: mode, size, init, release
+ * bit 1 - each run start/result information
+ * bit 2 - contents of small controls from userspace
+ * bit 3 - contents of big controls from userspace
+ * bit 4 - detail fmt, ctrl, buffer q/dq information
+ * bit 5 - detail function enter/leave trace information
+ * bit 6 - register write/read information
+ */
+extern int hantro_debug;
+
+#define vpu_debug(level, fmt, args...) \
+ do { \
+ if (hantro_debug & BIT(level)) \
+ pr_info("%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+
+#define vpu_err(fmt, args...) \
+ pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
+
+/* Structure access helpers. */
+static inline struct hantro_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct hantro_ctx, fh);
+}
+
+/* Register accessors. */
+static inline void vepu_write_relaxed(struct hantro_dev *vpu,
+ u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel_relaxed(val, vpu->enc_base + reg);
+}
+
+static inline void vepu_write(struct hantro_dev *vpu, u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel(val, vpu->enc_base + reg);
+}
+
+static inline u32 vepu_read(struct hantro_dev *vpu, u32 reg)
+{
+ u32 val = readl(vpu->enc_base + reg);
+
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ return val;
+}
+
+static inline void vdpu_write_relaxed(struct hantro_dev *vpu,
+ u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel_relaxed(val, vpu->dec_base + reg);
+}
+
+static inline void vdpu_write(struct hantro_dev *vpu, u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel(val, vpu->dec_base + reg);
+}
+
+static inline u32 vdpu_read(struct hantro_dev *vpu, u32 reg)
+{
+ u32 val = readl(vpu->dec_base + reg);
+
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ return val;
+}
+
+bool hantro_is_encoder_ctx(const struct hantro_ctx *ctx);
+
+void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id);
+dma_addr_t hantro_get_ref(struct vb2_queue *q, u64 ts);
+
+#endif /* HANTRO_H_ */
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
new file mode 100644
index 000000000000..c3665f0e87a2
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Collabora, Ltd.
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "hantro_v4l2.h"
+#include "hantro.h"
+#include "hantro_hw.h"
+
+#define DRIVER_NAME "hantro-vpu"
+
+int hantro_debug;
+module_param_named(debug, hantro_debug, int, 0644);
+MODULE_PARM_DESC(debug,
+ "Debug level - higher value produces more verbose messages");
+
+void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id)
+{
+ struct v4l2_ctrl *ctrl;
+
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, id);
+ return ctrl ? ctrl->p_cur.p : NULL;
+}
+
+dma_addr_t hantro_get_ref(struct vb2_queue *q, u64 ts)
+{
+ struct vb2_buffer *buf;
+ int index;
+
+ index = vb2_find_timestamp(q, ts, 0);
+ if (index < 0)
+ return 0;
+ buf = vb2_get_buffer(q, index);
+ return vb2_dma_contig_plane_dma_addr(buf, 0);
+}
+
+static int
+hantro_enc_buf_finish(struct hantro_ctx *ctx, struct vb2_buffer *buf,
+ unsigned int bytesused)
+{
+ size_t avail_size;
+
+ avail_size = vb2_plane_size(buf, 0) - ctx->vpu_dst_fmt->header_size;
+ if (bytesused > avail_size)
+ return -EINVAL;
+ /*
+ * The bounce buffer is only for the JPEG encoder.
+ * TODO: Rework the JPEG encoder to eliminate the need
+ * for a bounce buffer.
+ */
+ if (ctx->jpeg_enc.bounce_buffer.cpu) {
+ memcpy(vb2_plane_vaddr(buf, 0) +
+ ctx->vpu_dst_fmt->header_size,
+ ctx->jpeg_enc.bounce_buffer.cpu, bytesused);
+ }
+ buf->planes[0].bytesused =
+ ctx->vpu_dst_fmt->header_size + bytesused;
+ return 0;
+}
+
+static int
+hantro_dec_buf_finish(struct hantro_ctx *ctx, struct vb2_buffer *buf,
+ unsigned int bytesused)
+{
+ /* For decoders set bytesused as per the output picture. */
+ buf->planes[0].bytesused = ctx->dst_fmt.plane_fmt[0].sizeimage;
+ return 0;
+}
+
+static void hantro_job_finish(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ unsigned int bytesused,
+ enum vb2_buffer_state result)
+{
+ struct vb2_v4l2_buffer *src, *dst;
+ int ret;
+
+ pm_runtime_mark_last_busy(vpu->dev);
+ pm_runtime_put_autosuspend(vpu->dev);
+ clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
+
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (WARN_ON(!src))
+ return;
+ if (WARN_ON(!dst))
+ return;
+
+ src->sequence = ctx->sequence_out++;
+ dst->sequence = ctx->sequence_cap++;
+
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
+
+ ret = ctx->buf_finish(ctx, &dst->vb2_buf, bytesused);
+ if (ret)
+ result = VB2_BUF_STATE_ERROR;
+
+ v4l2_m2m_buf_done(src, result);
+ v4l2_m2m_buf_done(dst, result);
+
+ v4l2_m2m_job_finish(vpu->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+void hantro_irq_done(struct hantro_dev *vpu, unsigned int bytesused,
+ enum vb2_buffer_state result)
+{
+ struct hantro_ctx *ctx =
+ v4l2_m2m_get_curr_priv(vpu->m2m_dev);
+
+ /*
+ * If cancel_delayed_work returns false
+ * the timeout expired. The watchdog is running,
+ * and will take care of finishing the job.
+ */
+ if (cancel_delayed_work(&vpu->watchdog_work))
+ hantro_job_finish(vpu, ctx, bytesused, result);
+}
+
+void hantro_watchdog(struct work_struct *work)
+{
+ struct hantro_dev *vpu;
+ struct hantro_ctx *ctx;
+
+ vpu = container_of(to_delayed_work(work),
+ struct hantro_dev, watchdog_work);
+ ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
+ if (ctx) {
+ vpu_err("frame processing timed out!\n");
+ ctx->codec_ops->reset(ctx);
+ hantro_job_finish(vpu, ctx, 0, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static void device_run(void *priv)
+{
+ struct hantro_ctx *ctx = priv;
+ int ret;
+
+ ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
+ if (ret)
+ goto err_cancel_job;
+ ret = pm_runtime_get_sync(ctx->dev->dev);
+ if (ret < 0)
+ goto err_cancel_job;
+
+ ctx->codec_ops->run(ctx);
+ return;
+
+err_cancel_job:
+ hantro_job_finish(ctx->dev, ctx, 0, VB2_BUF_STATE_ERROR);
+}
+
+bool hantro_is_encoder_ctx(const struct hantro_ctx *ctx)
+{
+ return ctx->buf_finish == hantro_enc_buf_finish;
+}
+
+static struct v4l2_m2m_ops vpu_m2m_ops = {
+ .device_run = device_run,
+};
+
+static int
+queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct hantro_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &hantro_queue_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+
+ /*
+ * Driver does mostly sequential access, so sacrifice TLB efficiency
+ * for faster allocation. Also, no CPU access on the source queue,
+ * so no kernel mapping needed.
+ */
+ src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->vpu_mutex;
+ src_vq->dev = ctx->dev->v4l2_dev.dev;
+ src_vq->supports_requests = true;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ /*
+ * When encoding, the CAPTURE queue doesn't need dma memory,
+ * as the CPU needs to create the JPEG frames, from the
+ * hardware-produced JPEG payload.
+ *
+ * For the DMA destination buffer, we use a bounce buffer.
+ */
+ if (hantro_is_encoder_ctx(ctx)) {
+ dst_vq->mem_ops = &vb2_vmalloc_memops;
+ } else {
+ dst_vq->bidirectional = true;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+ }
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &hantro_queue_ops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->vpu_mutex;
+ dst_vq->dev = ctx->dev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int hantro_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hantro_ctx *ctx;
+
+ ctx = container_of(ctrl->handler,
+ struct hantro_ctx, ctrl_handler);
+
+ vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ ctx->jpeg_quality = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops hantro_ctrl_ops = {
+ .s_ctrl = hantro_s_ctrl,
+};
+
+static struct hantro_ctrl controls[] = {
+ {
+ .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ .codec = HANTRO_JPEG_ENCODER,
+ .cfg = {
+ .min = 5,
+ .max = 100,
+ .step = 1,
+ .def = 50,
+ },
+ }, {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS,
+ .codec = HANTRO_MPEG2_DECODER,
+ .cfg = {
+ .elem_size = sizeof(struct v4l2_ctrl_mpeg2_slice_params),
+ },
+ }, {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION,
+ .codec = HANTRO_MPEG2_DECODER,
+ .cfg = {
+ .elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantization),
+ },
+ },
+};
+
+static int hantro_ctrls_setup(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ int allowed_codecs)
+{
+ int i, num_ctrls = ARRAY_SIZE(controls);
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, num_ctrls);
+
+ for (i = 0; i < num_ctrls; i++) {
+ if (!(allowed_codecs & controls[i].codec))
+ continue;
+ if (!controls[i].cfg.elem_size) {
+ v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &hantro_ctrl_ops,
+ controls[i].id, controls[i].cfg.min,
+ controls[i].cfg.max,
+ controls[i].cfg.step,
+ controls[i].cfg.def);
+ } else {
+ controls[i].cfg.id = controls[i].id;
+ v4l2_ctrl_new_custom(&ctx->ctrl_handler,
+ &controls[i].cfg, NULL);
+ }
+
+ if (ctx->ctrl_handler.error) {
+ vpu_err("Adding control (%d) failed %d\n",
+ controls[i].id,
+ ctx->ctrl_handler.error);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return ctx->ctrl_handler.error;
+ }
+ }
+ return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+}
+
+/*
+ * V4L2 file operations.
+ */
+
+static int hantro_open(struct file *filp)
+{
+ struct hantro_dev *vpu = video_drvdata(filp);
+ struct video_device *vdev = video_devdata(filp);
+ struct hantro_func *func = hantro_vdev_to_func(vdev);
+ struct hantro_ctx *ctx;
+ int allowed_codecs, ret;
+
+ /*
+ * We do not need any extra locking here, because we operate only
+ * on local data here, except reading few fields from dev, which
+ * do not change through device's lifetime (which is guaranteed by
+ * reference on module from open()) and V4L2 internal objects (such
+ * as vdev and ctx->fh), which have proper locking done in respective
+ * helper functions used here.
+ */
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = vpu;
+ if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
+ allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS;
+ ctx->buf_finish = hantro_enc_buf_finish;
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx,
+ queue_init);
+ } else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
+ allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
+ ctx->buf_finish = hantro_dec_buf_finish;
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx,
+ queue_init);
+ } else {
+ ctx->fh.m2m_ctx = ERR_PTR(-ENODEV);
+ }
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ kfree(ctx);
+ return ret;
+ }
+
+ v4l2_fh_init(&ctx->fh, vdev);
+ filp->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ hantro_reset_fmts(ctx);
+
+ ret = hantro_ctrls_setup(vpu, ctx, allowed_codecs);
+ if (ret) {
+ vpu_err("Failed to set up controls\n");
+ goto err_fh_free;
+ }
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+
+ return 0;
+
+err_fh_free:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
+}
+
+static int hantro_release(struct file *filp)
+{
+ struct hantro_ctx *ctx =
+ container_of(filp->private_data, struct hantro_ctx, fh);
+
+ /*
+ * No need for extra locking because this was the last reference
+ * to this file.
+ */
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations hantro_fops = {
+ .owner = THIS_MODULE,
+ .open = hantro_open,
+ .release = hantro_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct of_device_id of_hantro_match[] = {
+#ifdef CONFIG_VIDEO_HANTRO_ROCKCHIP
+ { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
+ { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
+#endif
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_hantro_match);
+
+static int hantro_register_entity(struct media_device *mdev,
+ struct media_entity *entity,
+ const char *entity_name,
+ struct media_pad *pads, int num_pads,
+ int function, struct video_device *vdev)
+{
+ char *name;
+ int ret;
+
+ entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
+ if (function == MEDIA_ENT_F_IO_V4L) {
+ entity->info.dev.major = VIDEO_MAJOR;
+ entity->info.dev.minor = vdev->minor;
+ }
+
+ name = devm_kasprintf(mdev->dev, GFP_KERNEL, "%s-%s", vdev->name,
+ entity_name);
+ if (!name)
+ return -ENOMEM;
+
+ entity->name = name;
+ entity->function = function;
+
+ ret = media_entity_pads_init(entity, num_pads, pads);
+ if (ret)
+ return ret;
+
+ ret = media_device_register_entity(mdev, entity);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hantro_attach_func(struct hantro_dev *vpu,
+ struct hantro_func *func)
+{
+ struct media_device *mdev = &vpu->mdev;
+ struct media_link *link;
+ int ret;
+
+ /* Create the three encoder entities with their pads */
+ func->source_pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = hantro_register_entity(mdev, &func->vdev.entity, "source",
+ &func->source_pad, 1, MEDIA_ENT_F_IO_V4L,
+ &func->vdev);
+ if (ret)
+ return ret;
+
+ func->proc_pads[0].flags = MEDIA_PAD_FL_SINK;
+ func->proc_pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ ret = hantro_register_entity(mdev, &func->proc, "proc",
+ func->proc_pads, 2, func->id,
+ &func->vdev);
+ if (ret)
+ goto err_rel_entity0;
+
+ func->sink_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = hantro_register_entity(mdev, &func->sink, "sink",
+ &func->sink_pad, 1, MEDIA_ENT_F_IO_V4L,
+ &func->vdev);
+ if (ret)
+ goto err_rel_entity1;
+
+ /* Connect the three entities */
+ ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 1,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rel_entity2;
+
+ ret = media_create_pad_link(&func->proc, 0, &func->sink, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rm_links0;
+
+ /* Create video interface */
+ func->intf_devnode = media_devnode_create(mdev, MEDIA_INTF_T_V4L_VIDEO,
+ 0, VIDEO_MAJOR,
+ func->vdev.minor);
+ if (!func->intf_devnode) {
+ ret = -ENOMEM;
+ goto err_rm_links1;
+ }
+
+ /* Connect the two DMA engines to the interface */
+ link = media_create_intf_link(&func->vdev.entity,
+ &func->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_devnode;
+ }
+
+ link = media_create_intf_link(&func->sink, &func->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_devnode;
+ }
+ return 0;
+
+err_rm_devnode:
+ media_devnode_remove(func->intf_devnode);
+
+err_rm_links1:
+ media_entity_remove_links(&func->sink);
+
+err_rm_links0:
+ media_entity_remove_links(&func->proc);
+ media_entity_remove_links(&func->vdev.entity);
+
+err_rel_entity2:
+ media_device_unregister_entity(&func->sink);
+
+err_rel_entity1:
+ media_device_unregister_entity(&func->proc);
+
+err_rel_entity0:
+ media_device_unregister_entity(&func->vdev.entity);
+ return ret;
+}
+
+static void hantro_detach_func(struct hantro_func *func)
+{
+ media_devnode_remove(func->intf_devnode);
+ media_entity_remove_links(&func->sink);
+ media_entity_remove_links(&func->proc);
+ media_entity_remove_links(&func->vdev.entity);
+ media_device_unregister_entity(&func->sink);
+ media_device_unregister_entity(&func->proc);
+ media_device_unregister_entity(&func->vdev.entity);
+}
+
+static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
+{
+ const struct of_device_id *match;
+ struct hantro_func *func;
+ struct video_device *vfd;
+ int ret;
+
+ match = of_match_node(of_hantro_match, vpu->dev->of_node);
+ func = devm_kzalloc(vpu->dev, sizeof(*func), GFP_KERNEL);
+ if (!func) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
+ return -ENOMEM;
+ }
+
+ func->id = funcid;
+
+ vfd = &func->vdev;
+ vfd->fops = &hantro_fops;
+ vfd->release = video_device_release_empty;
+ vfd->lock = &vpu->vpu_mutex;
+ vfd->v4l2_dev = &vpu->v4l2_dev;
+ vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+ vfd->ioctl_ops = &hantro_ioctl_ops;
+ snprintf(vfd->name, sizeof(vfd->name), "%s-%s", match->compatible,
+ funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ? "enc" : "dec");
+
+ if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
+ vpu->encoder = func;
+ else
+ vpu->decoder = func;
+
+ video_set_drvdata(vfd, vpu);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ ret = hantro_attach_func(vpu, func);
+ if (ret) {
+ v4l2_err(&vpu->v4l2_dev,
+ "Failed to attach functionality to the media device\n");
+ goto err_unreg_dev;
+ }
+
+ v4l2_info(&vpu->v4l2_dev, "registered %s as /dev/video%d\n", vfd->name,
+ vfd->num);
+
+ return 0;
+
+err_unreg_dev:
+ video_unregister_device(vfd);
+ return ret;
+}
+
+static int hantro_add_enc_func(struct hantro_dev *vpu)
+{
+ if (!vpu->variant->enc_fmts)
+ return 0;
+
+ return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
+}
+
+static int hantro_add_dec_func(struct hantro_dev *vpu)
+{
+ if (!vpu->variant->dec_fmts)
+ return 0;
+
+ return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
+}
+
+static void hantro_remove_func(struct hantro_dev *vpu,
+ unsigned int funcid)
+{
+ struct hantro_func *func;
+
+ if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
+ func = vpu->encoder;
+ else
+ func = vpu->decoder;
+
+ if (!func)
+ return;
+
+ hantro_detach_func(func);
+ video_unregister_device(&func->vdev);
+}
+
+static void hantro_remove_enc_func(struct hantro_dev *vpu)
+{
+ hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
+}
+
+static void hantro_remove_dec_func(struct hantro_dev *vpu)
+{
+ hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
+}
+
+static const struct media_device_ops hantro_m2m_media_ops = {
+ .req_validate = vb2_request_validate,
+ .req_queue = v4l2_m2m_request_queue,
+};
+
+static int hantro_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct hantro_dev *vpu;
+ struct resource *res;
+ int num_bases;
+ int i, ret;
+
+ vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
+ if (!vpu)
+ return -ENOMEM;
+
+ vpu->dev = &pdev->dev;
+ vpu->pdev = pdev;
+ mutex_init(&vpu->vpu_mutex);
+ spin_lock_init(&vpu->irqlock);
+
+ match = of_match_node(of_hantro_match, pdev->dev.of_node);
+ vpu->variant = match->data;
+
+ INIT_DELAYED_WORK(&vpu->watchdog_work, hantro_watchdog);
+
+ vpu->clocks = devm_kcalloc(&pdev->dev, vpu->variant->num_clocks,
+ sizeof(*vpu->clocks), GFP_KERNEL);
+ if (!vpu->clocks)
+ return -ENOMEM;
+
+ for (i = 0; i < vpu->variant->num_clocks; i++)
+ vpu->clocks[i].id = vpu->variant->clk_names[i];
+ ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
+ vpu->clocks);
+ if (ret)
+ return ret;
+
+ num_bases = vpu->variant->num_regs ?: 1;
+ vpu->reg_bases = devm_kcalloc(&pdev->dev, num_bases,
+ sizeof(*vpu->reg_bases), GFP_KERNEL);
+ if (!vpu->reg_bases)
+ return -ENOMEM;
+
+ for (i = 0; i < num_bases; i++) {
+ res = vpu->variant->reg_names ?
+ platform_get_resource_byname(vpu->pdev, IORESOURCE_MEM,
+ vpu->variant->reg_names[i]) :
+ platform_get_resource(vpu->pdev, IORESOURCE_MEM, 0);
+ vpu->reg_bases[i] = devm_ioremap_resource(vpu->dev, res);
+ if (IS_ERR(vpu->reg_bases[i]))
+ return PTR_ERR(vpu->reg_bases[i]);
+ }
+ vpu->enc_base = vpu->reg_bases[0] + vpu->variant->enc_offset;
+ vpu->dec_base = vpu->reg_bases[0] + vpu->variant->dec_offset;
+
+ ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
+ return ret;
+ }
+
+ for (i = 0; i < vpu->variant->num_irqs; i++) {
+ const char *irq_name = vpu->variant->irqs[i].name;
+ int irq;
+
+ if (!vpu->variant->irqs[i].handler)
+ continue;
+
+ irq = platform_get_irq_byname(vpu->pdev, irq_name);
+ if (irq <= 0) {
+ dev_err(vpu->dev, "Could not get %s IRQ.\n", irq_name);
+ return -ENXIO;
+ }
+
+ ret = devm_request_irq(vpu->dev, irq,
+ vpu->variant->irqs[i].handler, 0,
+ dev_name(vpu->dev), vpu);
+ if (ret) {
+ dev_err(vpu->dev, "Could not request %s IRQ.\n",
+ irq_name);
+ return ret;
+ }
+ }
+
+ ret = vpu->variant->init(vpu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init VPU hardware\n");
+ return ret;
+ }
+
+ pm_runtime_set_autosuspend_delay(vpu->dev, 100);
+ pm_runtime_use_autosuspend(vpu->dev);
+ pm_runtime_enable(vpu->dev);
+
+ ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare clocks\n");
+ return ret;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ goto err_clk_unprepare;
+ }
+ platform_set_drvdata(pdev, vpu);
+
+ vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
+ if (IS_ERR(vpu->m2m_dev)) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(vpu->m2m_dev);
+ goto err_v4l2_unreg;
+ }
+
+ vpu->mdev.dev = vpu->dev;
+ strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
+ strscpy(vpu->mdev.bus_info, "platform: " DRIVER_NAME,
+ sizeof(vpu->mdev.model));
+ media_device_init(&vpu->mdev);
+ vpu->mdev.ops = &hantro_m2m_media_ops;
+ vpu->v4l2_dev.mdev = &vpu->mdev;
+
+ ret = hantro_add_enc_func(vpu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register encoder\n");
+ goto err_m2m_rel;
+ }
+
+ ret = hantro_add_dec_func(vpu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register decoder\n");
+ goto err_rm_enc_func;
+ }
+
+ ret = media_device_register(&vpu->mdev);
+ if (ret) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n");
+ goto err_rm_dec_func;
+ }
+
+ return 0;
+
+err_rm_dec_func:
+ hantro_remove_dec_func(vpu);
+err_rm_enc_func:
+ hantro_remove_enc_func(vpu);
+err_m2m_rel:
+ media_device_cleanup(&vpu->mdev);
+ v4l2_m2m_release(vpu->m2m_dev);
+err_v4l2_unreg:
+ v4l2_device_unregister(&vpu->v4l2_dev);
+err_clk_unprepare:
+ clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ pm_runtime_dont_use_autosuspend(vpu->dev);
+ pm_runtime_disable(vpu->dev);
+ return ret;
+}
+
+static int hantro_remove(struct platform_device *pdev)
+{
+ struct hantro_dev *vpu = platform_get_drvdata(pdev);
+
+ v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
+
+ media_device_unregister(&vpu->mdev);
+ hantro_remove_dec_func(vpu);
+ hantro_remove_enc_func(vpu);
+ media_device_cleanup(&vpu->mdev);
+ v4l2_m2m_release(vpu->m2m_dev);
+ v4l2_device_unregister(&vpu->v4l2_dev);
+ clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ pm_runtime_dont_use_autosuspend(vpu->dev);
+ pm_runtime_disable(vpu->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int hantro_runtime_resume(struct device *dev)
+{
+ struct hantro_dev *vpu = dev_get_drvdata(dev);
+
+ if (vpu->variant->runtime_resume)
+ return vpu->variant->runtime_resume(vpu);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops hantro_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(NULL, hantro_runtime_resume, NULL)
+};
+
+static struct platform_driver hantro_driver = {
+ .probe = hantro_probe,
+ .remove = hantro_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(of_hantro_match),
+ .pm = &hantro_pm_ops,
+ },
+};
+module_platform_driver(hantro_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
+MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
+MODULE_DESCRIPTION("Hantro VPU codec driver");
diff --git a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
new file mode 100644
index 000000000000..e592c1b66375
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <media/v4l2-mem2mem.h>
+#include "hantro.h"
+#include "hantro_hw.h"
+
+#define G1_SWREG(nr) ((nr) * 4)
+
+#define G1_REG_RLC_VLC_BASE G1_SWREG(12)
+#define G1_REG_DEC_OUT_BASE G1_SWREG(13)
+#define G1_REG_REFER0_BASE G1_SWREG(14)
+#define G1_REG_REFER1_BASE G1_SWREG(15)
+#define G1_REG_REFER2_BASE G1_SWREG(16)
+#define G1_REG_REFER3_BASE G1_SWREG(17)
+#define G1_REG_QTABLE_BASE G1_SWREG(40)
+#define G1_REG_DEC_E(v) ((v) ? BIT(0) : 0)
+
+#define G1_REG_DEC_AXI_RD_ID(v) (((v) << 24) & GENMASK(31, 24))
+#define G1_REG_DEC_TIMEOUT_E(v) ((v) ? BIT(23) : 0)
+#define G1_REG_DEC_STRSWAP32_E(v) ((v) ? BIT(22) : 0)
+#define G1_REG_DEC_STRENDIAN_E(v) ((v) ? BIT(21) : 0)
+#define G1_REG_DEC_INSWAP32_E(v) ((v) ? BIT(20) : 0)
+#define G1_REG_DEC_OUTSWAP32_E(v) ((v) ? BIT(19) : 0)
+#define G1_REG_DEC_DATA_DISC_E(v) ((v) ? BIT(18) : 0)
+#define G1_REG_DEC_LATENCY(v) (((v) << 11) & GENMASK(16, 11))
+#define G1_REG_DEC_CLK_GATE_E(v) ((v) ? BIT(10) : 0)
+#define G1_REG_DEC_IN_ENDIAN(v) ((v) ? BIT(9) : 0)
+#define G1_REG_DEC_OUT_ENDIAN(v) ((v) ? BIT(8) : 0)
+#define G1_REG_DEC_ADV_PRE_DIS(v) ((v) ? BIT(6) : 0)
+#define G1_REG_DEC_SCMD_DIS(v) ((v) ? BIT(5) : 0)
+#define G1_REG_DEC_MAX_BURST(v) (((v) << 0) & GENMASK(4, 0))
+
+#define G1_REG_DEC_MODE(v) (((v) << 28) & GENMASK(31, 28))
+#define G1_REG_RLC_MODE_E(v) ((v) ? BIT(27) : 0)
+#define G1_REG_PIC_INTERLACE_E(v) ((v) ? BIT(23) : 0)
+#define G1_REG_PIC_FIELDMODE_E(v) ((v) ? BIT(22) : 0)
+#define G1_REG_PIC_B_E(v) ((v) ? BIT(21) : 0)
+#define G1_REG_PIC_INTER_E(v) ((v) ? BIT(20) : 0)
+#define G1_REG_PIC_TOPFIELD_E(v) ((v) ? BIT(19) : 0)
+#define G1_REG_FWD_INTERLACE_E(v) ((v) ? BIT(18) : 0)
+#define G1_REG_FILTERING_DIS(v) ((v) ? BIT(14) : 0)
+#define G1_REG_WRITE_MVS_E(v) ((v) ? BIT(12) : 0)
+#define G1_REG_DEC_AXI_WR_ID(v) (((v) << 0) & GENMASK(7, 0))
+
+#define G1_REG_PIC_MB_WIDTH(v) (((v) << 23) & GENMASK(31, 23))
+#define G1_REG_PIC_MB_HEIGHT_P(v) (((v) << 11) & GENMASK(18, 11))
+#define G1_REG_ALT_SCAN_E(v) ((v) ? BIT(6) : 0)
+#define G1_REG_TOPFIELDFIRST_E(v) ((v) ? BIT(5) : 0)
+
+#define G1_REG_STRM_START_BIT(v) (((v) << 26) & GENMASK(31, 26))
+#define G1_REG_QSCALE_TYPE(v) ((v) ? BIT(24) : 0)
+#define G1_REG_CON_MV_E(v) ((v) ? BIT(4) : 0)
+#define G1_REG_INTRA_DC_PREC(v) (((v) << 2) & GENMASK(3, 2))
+#define G1_REG_INTRA_VLC_TAB(v) ((v) ? BIT(1) : 0)
+#define G1_REG_FRAME_PRED_DCT(v) ((v) ? BIT(0) : 0)
+
+#define G1_REG_INIT_QP(v) (((v) << 25) & GENMASK(30, 25))
+#define G1_REG_STREAM_LEN(v) (((v) << 0) & GENMASK(23, 0))
+
+#define G1_REG_ALT_SCAN_FLAG_E(v) ((v) ? BIT(19) : 0)
+#define G1_REG_FCODE_FWD_HOR(v) (((v) << 15) & GENMASK(18, 15))
+#define G1_REG_FCODE_FWD_VER(v) (((v) << 11) & GENMASK(14, 11))
+#define G1_REG_FCODE_BWD_HOR(v) (((v) << 7) & GENMASK(10, 7))
+#define G1_REG_FCODE_BWD_VER(v) (((v) << 3) & GENMASK(6, 3))
+#define G1_REG_MV_ACCURACY_FWD(v) ((v) ? BIT(2) : 0)
+#define G1_REG_MV_ACCURACY_BWD(v) ((v) ? BIT(1) : 0)
+
+#define G1_REG_STARTMB_X(v) (((v) << 23) & GENMASK(31, 23))
+#define G1_REG_STARTMB_Y(v) (((v) << 15) & GENMASK(22, 15))
+
+#define G1_REG_APF_THRESHOLD(v) (((v) << 0) & GENMASK(13, 0))
+
+#define PICT_TOP_FIELD 1
+#define PICT_BOTTOM_FIELD 2
+#define PICT_FRAME 3
+
+static void
+hantro_g1_mpeg2_dec_set_quantization(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
+{
+ struct v4l2_ctrl_mpeg2_quantization *quantization;
+
+ quantization = hantro_get_ctrl(ctx,
+ V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION);
+ hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu,
+ quantization);
+ vdpu_write_relaxed(vpu, ctx->mpeg2_dec.qtable.dma,
+ G1_REG_QTABLE_BASE);
+}
+
+static void
+hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf,
+ const struct v4l2_mpeg2_sequence *sequence,
+ const struct v4l2_mpeg2_picture *picture,
+ const struct v4l2_ctrl_mpeg2_slice_params *slice_params)
+{
+ dma_addr_t forward_addr = 0, backward_addr = 0;
+ dma_addr_t current_addr, addr;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
+
+ switch (picture->picture_coding_type) {
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
+ backward_addr = hantro_get_ref(vq,
+ slice_params->backward_ref_ts);
+ /* fall-through */
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
+ forward_addr = hantro_get_ref(vq,
+ slice_params->forward_ref_ts);
+ }
+
+ /* Source bitstream buffer */
+ addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ vdpu_write_relaxed(vpu, addr, G1_REG_RLC_VLC_BASE);
+
+ /* Destination frame buffer */
+ addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ current_addr = addr;
+
+ if (picture->picture_structure == PICT_BOTTOM_FIELD)
+ addr += ALIGN(ctx->dst_fmt.width, 16);
+ vdpu_write_relaxed(vpu, addr, G1_REG_DEC_OUT_BASE);
+
+ if (!forward_addr)
+ forward_addr = current_addr;
+ if (!backward_addr)
+ backward_addr = current_addr;
+
+ /* Set forward ref frame (top/bottom field) */
+ if (picture->picture_structure == PICT_FRAME ||
+ picture->picture_coding_type == V4L2_MPEG2_PICTURE_CODING_TYPE_B ||
+ (picture->picture_structure == PICT_TOP_FIELD &&
+ picture->top_field_first) ||
+ (picture->picture_structure == PICT_BOTTOM_FIELD &&
+ !picture->top_field_first)) {
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER1_BASE);
+ } else if (picture->picture_structure == PICT_TOP_FIELD) {
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, current_addr, G1_REG_REFER1_BASE);
+ } else if (picture->picture_structure == PICT_BOTTOM_FIELD) {
+ vdpu_write_relaxed(vpu, current_addr, G1_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER1_BASE);
+ }
+
+ /* Set backward ref frame (top/bottom field) */
+ vdpu_write_relaxed(vpu, backward_addr, G1_REG_REFER2_BASE);
+ vdpu_write_relaxed(vpu, backward_addr, G1_REG_REFER3_BASE);
+}
+
+void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ const struct v4l2_ctrl_mpeg2_slice_params *slice_params;
+ const struct v4l2_mpeg2_sequence *sequence;
+ const struct v4l2_mpeg2_picture *picture;
+ u32 reg;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ /* Apply request controls if any */
+ v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+
+ slice_params = hantro_get_ctrl(ctx,
+ V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS);
+ sequence = &slice_params->sequence;
+ picture = &slice_params->picture;
+
+ reg = G1_REG_DEC_AXI_RD_ID(0) |
+ G1_REG_DEC_TIMEOUT_E(1) |
+ G1_REG_DEC_STRSWAP32_E(1) |
+ G1_REG_DEC_STRENDIAN_E(1) |
+ G1_REG_DEC_INSWAP32_E(1) |
+ G1_REG_DEC_OUTSWAP32_E(1) |
+ G1_REG_DEC_DATA_DISC_E(0) |
+ G1_REG_DEC_LATENCY(0) |
+ G1_REG_DEC_CLK_GATE_E(1) |
+ G1_REG_DEC_IN_ENDIAN(1) |
+ G1_REG_DEC_OUT_ENDIAN(1) |
+ G1_REG_DEC_ADV_PRE_DIS(0) |
+ G1_REG_DEC_SCMD_DIS(0) |
+ G1_REG_DEC_MAX_BURST(16);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(2));
+
+ reg = G1_REG_DEC_MODE(5) |
+ G1_REG_RLC_MODE_E(0) |
+ G1_REG_PIC_INTERLACE_E(!sequence->progressive_sequence) |
+ G1_REG_PIC_FIELDMODE_E(picture->picture_structure != PICT_FRAME) |
+ G1_REG_PIC_B_E(picture->picture_coding_type == V4L2_MPEG2_PICTURE_CODING_TYPE_B) |
+ G1_REG_PIC_INTER_E(picture->picture_coding_type != V4L2_MPEG2_PICTURE_CODING_TYPE_I) |
+ G1_REG_PIC_TOPFIELD_E(picture->picture_structure == PICT_TOP_FIELD) |
+ G1_REG_FWD_INTERLACE_E(0) |
+ G1_REG_FILTERING_DIS(1) |
+ G1_REG_WRITE_MVS_E(0) |
+ G1_REG_DEC_AXI_WR_ID(0);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(3));
+
+ reg = G1_REG_PIC_MB_WIDTH(MPEG2_MB_WIDTH(ctx->dst_fmt.width)) |
+ G1_REG_PIC_MB_HEIGHT_P(MPEG2_MB_HEIGHT(ctx->dst_fmt.height)) |
+ G1_REG_ALT_SCAN_E(picture->alternate_scan) |
+ G1_REG_TOPFIELDFIRST_E(picture->top_field_first);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(4));
+
+ reg = G1_REG_STRM_START_BIT(slice_params->data_bit_offset) |
+ G1_REG_QSCALE_TYPE(picture->q_scale_type) |
+ G1_REG_CON_MV_E(picture->concealment_motion_vectors) |
+ G1_REG_INTRA_DC_PREC(picture->intra_dc_precision) |
+ G1_REG_INTRA_VLC_TAB(picture->intra_vlc_format) |
+ G1_REG_FRAME_PRED_DCT(picture->frame_pred_frame_dct);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(5));
+
+ reg = G1_REG_INIT_QP(1) |
+ G1_REG_STREAM_LEN(slice_params->bit_size >> 3);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(6));
+
+ reg = G1_REG_ALT_SCAN_FLAG_E(picture->alternate_scan) |
+ G1_REG_FCODE_FWD_HOR(picture->f_code[0][0]) |
+ G1_REG_FCODE_FWD_VER(picture->f_code[0][1]) |
+ G1_REG_FCODE_BWD_HOR(picture->f_code[1][0]) |
+ G1_REG_FCODE_BWD_VER(picture->f_code[1][1]) |
+ G1_REG_MV_ACCURACY_FWD(1) |
+ G1_REG_MV_ACCURACY_BWD(1);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(18));
+
+ reg = G1_REG_STARTMB_X(0) |
+ G1_REG_STARTMB_Y(0);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(48));
+
+ reg = G1_REG_APF_THRESHOLD(8);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(55));
+
+ hantro_g1_mpeg2_dec_set_quantization(vpu, ctx);
+
+ hantro_g1_mpeg2_dec_set_buffers(vpu, ctx, &src_buf->vb2_buf,
+ &dst_buf->vb2_buf,
+ sequence, picture, slice_params);
+
+ /* Controls no longer in-use, we can complete them */
+ v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+
+ /* Kick the watchdog and start decoding */
+ schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
+
+ reg = G1_REG_DEC_E(1);
+ vdpu_write(vpu, reg, G1_SWREG(1));
+}
diff --git a/drivers/staging/media/hantro/hantro_g1_regs.h b/drivers/staging/media/hantro/hantro_g1_regs.h
new file mode 100644
index 000000000000..5c0ea7994336
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_g1_regs.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#ifndef HANTRO_G1_REGS_H_
+#define HANTRO_G1_REGS_H_
+
+/* Decoder registers. */
+#define G1_REG_INTERRUPT 0x004
+#define G1_REG_INTERRUPT_DEC_PIC_INF BIT(24)
+#define G1_REG_INTERRUPT_DEC_TIMEOUT BIT(18)
+#define G1_REG_INTERRUPT_DEC_SLICE_INT BIT(17)
+#define G1_REG_INTERRUPT_DEC_ERROR_INT BIT(16)
+#define G1_REG_INTERRUPT_DEC_ASO_INT BIT(15)
+#define G1_REG_INTERRUPT_DEC_BUFFER_INT BIT(14)
+#define G1_REG_INTERRUPT_DEC_BUS_INT BIT(13)
+#define G1_REG_INTERRUPT_DEC_RDY_INT BIT(12)
+#define G1_REG_INTERRUPT_DEC_IRQ BIT(8)
+#define G1_REG_INTERRUPT_DEC_IRQ_DIS BIT(4)
+#define G1_REG_INTERRUPT_DEC_E BIT(0)
+#define G1_REG_CONFIG 0x008
+#define G1_REG_CONFIG_DEC_AXI_RD_ID(x) (((x) & 0xff) << 24)
+#define G1_REG_CONFIG_DEC_TIMEOUT_E BIT(23)
+#define G1_REG_CONFIG_DEC_STRSWAP32_E BIT(22)
+#define G1_REG_CONFIG_DEC_STRENDIAN_E BIT(21)
+#define G1_REG_CONFIG_DEC_INSWAP32_E BIT(20)
+#define G1_REG_CONFIG_DEC_OUTSWAP32_E BIT(19)
+#define G1_REG_CONFIG_DEC_DATA_DISC_E BIT(18)
+#define G1_REG_CONFIG_TILED_MODE_MSB BIT(17)
+#define G1_REG_CONFIG_DEC_OUT_TILED_E BIT(17)
+#define G1_REG_CONFIG_DEC_LATENCY(x) (((x) & 0x3f) << 11)
+#define G1_REG_CONFIG_DEC_CLK_GATE_E BIT(10)
+#define G1_REG_CONFIG_DEC_IN_ENDIAN BIT(9)
+#define G1_REG_CONFIG_DEC_OUT_ENDIAN BIT(8)
+#define G1_REG_CONFIG_PRIORITY_MODE(x) (((x) & 0x7) << 5)
+#define G1_REG_CONFIG_TILED_MODE_LSB BIT(7)
+#define G1_REG_CONFIG_DEC_ADV_PRE_DIS BIT(6)
+#define G1_REG_CONFIG_DEC_SCMD_DIS BIT(5)
+#define G1_REG_CONFIG_DEC_MAX_BURST(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL0 0x00c
+#define G1_REG_DEC_CTRL0_DEC_MODE(x) (((x) & 0xf) << 28)
+#define G1_REG_DEC_CTRL0_RLC_MODE_E BIT(27)
+#define G1_REG_DEC_CTRL0_SKIP_MODE BIT(26)
+#define G1_REG_DEC_CTRL0_DIVX3_E BIT(25)
+#define G1_REG_DEC_CTRL0_PJPEG_E BIT(24)
+#define G1_REG_DEC_CTRL0_PIC_INTERLACE_E BIT(23)
+#define G1_REG_DEC_CTRL0_PIC_FIELDMODE_E BIT(22)
+#define G1_REG_DEC_CTRL0_PIC_B_E BIT(21)
+#define G1_REG_DEC_CTRL0_PIC_INTER_E BIT(20)
+#define G1_REG_DEC_CTRL0_PIC_TOPFIELD_E BIT(19)
+#define G1_REG_DEC_CTRL0_FWD_INTERLACE_E BIT(18)
+#define G1_REG_DEC_CTRL0_SORENSON_E BIT(17)
+#define G1_REG_DEC_CTRL0_REF_TOPFIELD_E BIT(16)
+#define G1_REG_DEC_CTRL0_DEC_OUT_DIS BIT(15)
+#define G1_REG_DEC_CTRL0_FILTERING_DIS BIT(14)
+#define G1_REG_DEC_CTRL0_WEBP_E BIT(13)
+#define G1_REG_DEC_CTRL0_MVC_E BIT(13)
+#define G1_REG_DEC_CTRL0_PIC_FIXED_QUANT BIT(13)
+#define G1_REG_DEC_CTRL0_WRITE_MVS_E BIT(12)
+#define G1_REG_DEC_CTRL0_REFTOPFIRST_E BIT(11)
+#define G1_REG_DEC_CTRL0_SEQ_MBAFF_E BIT(10)
+#define G1_REG_DEC_CTRL0_PICORD_COUNT_E BIT(9)
+#define G1_REG_DEC_CTRL0_DEC_AHB_HLOCK_E BIT(8)
+#define G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL1 0x010
+#define G1_REG_DEC_CTRL1_PIC_MB_WIDTH(x) (((x) & 0x1ff) << 23)
+#define G1_REG_DEC_CTRL1_MB_WIDTH_OFF(x) (((x) & 0xf) << 19)
+#define G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(x) (((x) & 0xff) << 11)
+#define G1_REG_DEC_CTRL1_MB_HEIGHT_OFF(x) (((x) & 0xf) << 7)
+#define G1_REG_DEC_CTRL1_ALT_SCAN_E BIT(6)
+#define G1_REG_DEC_CTRL1_TOPFIELDFIRST_E BIT(5)
+#define G1_REG_DEC_CTRL1_REF_FRAMES(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL1_PIC_MB_W_EXT(x) (((x) & 0x7) << 3)
+#define G1_REG_DEC_CTRL1_PIC_MB_H_EXT(x) (((x) & 0x7) << 0)
+#define G1_REG_DEC_CTRL1_PIC_REFER_FLAG BIT(0)
+#define G1_REG_DEC_CTRL2 0x014
+#define G1_REG_DEC_CTRL2_STRM_START_BIT(x) (((x) & 0x3f) << 26)
+#define G1_REG_DEC_CTRL2_SYNC_MARKER_E BIT(25)
+#define G1_REG_DEC_CTRL2_TYPE1_QUANT_E BIT(24)
+#define G1_REG_DEC_CTRL2_CH_QP_OFFSET(x) (((x) & 0x1f) << 19)
+#define G1_REG_DEC_CTRL2_CH_QP_OFFSET2(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E BIT(0)
+#define G1_REG_DEC_CTRL2_INTRADC_VLC_THR(x) (((x) & 0x7) << 16)
+#define G1_REG_DEC_CTRL2_VOP_TIME_INCR(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL2_DQ_PROFILE BIT(24)
+#define G1_REG_DEC_CTRL2_DQBI_LEVEL BIT(23)
+#define G1_REG_DEC_CTRL2_RANGE_RED_FRM_E BIT(22)
+#define G1_REG_DEC_CTRL2_FAST_UVMC_E BIT(20)
+#define G1_REG_DEC_CTRL2_TRANSDCTAB BIT(17)
+#define G1_REG_DEC_CTRL2_TRANSACFRM(x) (((x) & 0x3) << 15)
+#define G1_REG_DEC_CTRL2_TRANSACFRM2(x) (((x) & 0x3) << 13)
+#define G1_REG_DEC_CTRL2_MB_MODE_TAB(x) (((x) & 0x7) << 10)
+#define G1_REG_DEC_CTRL2_MVTAB(x) (((x) & 0x7) << 7)
+#define G1_REG_DEC_CTRL2_CBPTAB(x) (((x) & 0x7) << 4)
+#define G1_REG_DEC_CTRL2_2MV_BLK_PAT_TAB(x) (((x) & 0x3) << 2)
+#define G1_REG_DEC_CTRL2_4MV_BLK_PAT_TAB(x) (((x) & 0x3) << 0)
+#define G1_REG_DEC_CTRL2_QSCALE_TYPE BIT(24)
+#define G1_REG_DEC_CTRL2_CON_MV_E BIT(4)
+#define G1_REG_DEC_CTRL2_INTRA_DC_PREC(x) (((x) & 0x3) << 2)
+#define G1_REG_DEC_CTRL2_INTRA_VLC_TAB BIT(1)
+#define G1_REG_DEC_CTRL2_FRAME_PRED_DCT BIT(0)
+#define G1_REG_DEC_CTRL2_JPEG_QTABLES(x) (((x) & 0x3) << 11)
+#define G1_REG_DEC_CTRL2_JPEG_MODE(x) (((x) & 0x7) << 8)
+#define G1_REG_DEC_CTRL2_JPEG_FILRIGHT_E BIT(7)
+#define G1_REG_DEC_CTRL2_JPEG_STREAM_ALL BIT(6)
+#define G1_REG_DEC_CTRL2_CR_AC_VLCTABLE BIT(5)
+#define G1_REG_DEC_CTRL2_CB_AC_VLCTABLE BIT(4)
+#define G1_REG_DEC_CTRL2_CR_DC_VLCTABLE BIT(3)
+#define G1_REG_DEC_CTRL2_CB_DC_VLCTABLE BIT(2)
+#define G1_REG_DEC_CTRL2_CR_DC_VLCTABLE3 BIT(1)
+#define G1_REG_DEC_CTRL2_CB_DC_VLCTABLE3 BIT(0)
+#define G1_REG_DEC_CTRL2_STRM1_START_BIT(x) (((x) & 0x3f) << 18)
+#define G1_REG_DEC_CTRL2_HUFFMAN_E BIT(17)
+#define G1_REG_DEC_CTRL2_MULTISTREAM_E BIT(16)
+#define G1_REG_DEC_CTRL2_BOOLEAN_VALUE(x) (((x) & 0xff) << 8)
+#define G1_REG_DEC_CTRL2_BOOLEAN_RANGE(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL2_ALPHA_OFFSET(x) (((x) & 0x1f) << 5)
+#define G1_REG_DEC_CTRL2_BETA_OFFSET(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL3 0x018
+#define G1_REG_DEC_CTRL3_START_CODE_E BIT(31)
+#define G1_REG_DEC_CTRL3_INIT_QP(x) (((x) & 0x3f) << 25)
+#define G1_REG_DEC_CTRL3_CH_8PIX_ILEAV_E BIT(24)
+#define G1_REG_DEC_CTRL3_STREAM_LEN_EXT(x) (((x) & 0xff) << 24)
+#define G1_REG_DEC_CTRL3_STREAM_LEN(x) (((x) & 0xffffff) << 0)
+#define G1_REG_DEC_CTRL4 0x01c
+#define G1_REG_DEC_CTRL4_CABAC_E BIT(31)
+#define G1_REG_DEC_CTRL4_BLACKWHITE_E BIT(30)
+#define G1_REG_DEC_CTRL4_DIR_8X8_INFER_E BIT(29)
+#define G1_REG_DEC_CTRL4_WEIGHT_PRED_E BIT(28)
+#define G1_REG_DEC_CTRL4_WEIGHT_BIPR_IDC(x) (((x) & 0x3) << 26)
+#define G1_REG_DEC_CTRL4_AVS_H264_H_EXT BIT(25)
+#define G1_REG_DEC_CTRL4_FRAMENUM_LEN(x) (((x) & 0x1f) << 16)
+#define G1_REG_DEC_CTRL4_FRAMENUM(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL4_BITPLANE0_E BIT(31)
+#define G1_REG_DEC_CTRL4_BITPLANE1_E BIT(30)
+#define G1_REG_DEC_CTRL4_BITPLANE2_E BIT(29)
+#define G1_REG_DEC_CTRL4_ALT_PQUANT(x) (((x) & 0x1f) << 24)
+#define G1_REG_DEC_CTRL4_DQ_EDGES(x) (((x) & 0xf) << 20)
+#define G1_REG_DEC_CTRL4_TTMBF BIT(19)
+#define G1_REG_DEC_CTRL4_PQINDEX(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL4_VC1_HEIGHT_EXT BIT(13)
+#define G1_REG_DEC_CTRL4_BILIN_MC_E BIT(12)
+#define G1_REG_DEC_CTRL4_UNIQP_E BIT(11)
+#define G1_REG_DEC_CTRL4_HALFQP_E BIT(10)
+#define G1_REG_DEC_CTRL4_TTFRM(x) (((x) & 0x3) << 8)
+#define G1_REG_DEC_CTRL4_2ND_BYTE_EMUL_E BIT(7)
+#define G1_REG_DEC_CTRL4_DQUANT_E BIT(6)
+#define G1_REG_DEC_CTRL4_VC1_ADV_E BIT(5)
+#define G1_REG_DEC_CTRL4_PJPEG_FILDOWN_E BIT(26)
+#define G1_REG_DEC_CTRL4_PJPEG_WDIV8 BIT(25)
+#define G1_REG_DEC_CTRL4_PJPEG_HDIV8 BIT(24)
+#define G1_REG_DEC_CTRL4_PJPEG_AH(x) (((x) & 0xf) << 20)
+#define G1_REG_DEC_CTRL4_PJPEG_AL(x) (((x) & 0xf) << 16)
+#define G1_REG_DEC_CTRL4_PJPEG_SS(x) (((x) & 0xff) << 8)
+#define G1_REG_DEC_CTRL4_PJPEG_SE(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL4_DCT1_START_BIT(x) (((x) & 0x3f) << 26)
+#define G1_REG_DEC_CTRL4_DCT2_START_BIT(x) (((x) & 0x3f) << 20)
+#define G1_REG_DEC_CTRL4_CH_MV_RES BIT(13)
+#define G1_REG_DEC_CTRL4_INIT_DC_MATCH0(x) (((x) & 0x7) << 9)
+#define G1_REG_DEC_CTRL4_INIT_DC_MATCH1(x) (((x) & 0x7) << 6)
+#define G1_REG_DEC_CTRL4_VP7_VERSION BIT(5)
+#define G1_REG_DEC_CTRL5 0x020
+#define G1_REG_DEC_CTRL5_CONST_INTRA_E BIT(31)
+#define G1_REG_DEC_CTRL5_FILT_CTRL_PRES BIT(30)
+#define G1_REG_DEC_CTRL5_RDPIC_CNT_PRES BIT(29)
+#define G1_REG_DEC_CTRL5_8X8TRANS_FLAG_E BIT(28)
+#define G1_REG_DEC_CTRL5_REFPIC_MK_LEN(x) (((x) & 0x7ff) << 17)
+#define G1_REG_DEC_CTRL5_IDR_PIC_E BIT(16)
+#define G1_REG_DEC_CTRL5_IDR_PIC_ID(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL5_MV_SCALEFACTOR(x) (((x) & 0xff) << 24)
+#define G1_REG_DEC_CTRL5_REF_DIST_FWD(x) (((x) & 0x1f) << 19)
+#define G1_REG_DEC_CTRL5_REF_DIST_BWD(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL5_LOOP_FILT_LIMIT(x) (((x) & 0xf) << 14)
+#define G1_REG_DEC_CTRL5_VARIANCE_TEST_E BIT(13)
+#define G1_REG_DEC_CTRL5_MV_THRESHOLD(x) (((x) & 0x7) << 10)
+#define G1_REG_DEC_CTRL5_VAR_THRESHOLD(x) (((x) & 0x3ff) << 0)
+#define G1_REG_DEC_CTRL5_DIVX_IDCT_E BIT(8)
+#define G1_REG_DEC_CTRL5_DIVX3_SLICE_SIZE(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL5_PJPEG_REST_FREQ(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL5_RV_PROFILE(x) (((x) & 0x3) << 30)
+#define G1_REG_DEC_CTRL5_RV_OSV_QUANT(x) (((x) & 0x3) << 28)
+#define G1_REG_DEC_CTRL5_RV_FWD_SCALE(x) (((x) & 0x3fff) << 14)
+#define G1_REG_DEC_CTRL5_RV_BWD_SCALE(x) (((x) & 0x3fff) << 0)
+#define G1_REG_DEC_CTRL5_INIT_DC_COMP0(x) (((x) & 0xffff) << 16)
+#define G1_REG_DEC_CTRL5_INIT_DC_COMP1(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL6 0x024
+#define G1_REG_DEC_CTRL6_PPS_ID(x) (((x) & 0xff) << 24)
+#define G1_REG_DEC_CTRL6_REFIDX1_ACTIVE(x) (((x) & 0x1f) << 19)
+#define G1_REG_DEC_CTRL6_REFIDX0_ACTIVE(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL6_POC_LENGTH(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL6_ICOMP0_E BIT(24)
+#define G1_REG_DEC_CTRL6_ISCALE0(x) (((x) & 0xff) << 16)
+#define G1_REG_DEC_CTRL6_ISHIFT0(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL6_STREAM1_LEN(x) (((x) & 0xffffff) << 0)
+#define G1_REG_DEC_CTRL6_PIC_SLICE_AM(x) (((x) & 0x1fff) << 0)
+#define G1_REG_DEC_CTRL6_COEFFS_PART_AM(x) (((x) & 0xf) << 24)
+#define G1_REG_FWD_PIC(i) (0x028 + ((i) * 0x4))
+#define G1_REG_FWD_PIC_PINIT_RLIST_F5(x) (((x) & 0x1f) << 25)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F4(x) (((x) & 0x1f) << 20)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define G1_REG_FWD_PIC1_ICOMP1_E BIT(24)
+#define G1_REG_FWD_PIC1_ISCALE1(x) (((x) & 0xff) << 16)
+#define G1_REG_FWD_PIC1_ISHIFT1(x) (((x) & 0xffff) << 0)
+#define G1_REG_FWD_PIC1_SEGMENT_BASE(x) ((x) << 0)
+#define G1_REG_FWD_PIC1_SEGMENT_UPD_E BIT(1)
+#define G1_REG_FWD_PIC1_SEGMENT_E BIT(0)
+#define G1_REG_DEC_CTRL7 0x02c
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F15(x) (((x) & 0x1f) << 25)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F14(x) (((x) & 0x1f) << 20)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F13(x) (((x) & 0x1f) << 15)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F12(x) (((x) & 0x1f) << 10)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F11(x) (((x) & 0x1f) << 5)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F10(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL7_ICOMP2_E BIT(24)
+#define G1_REG_DEC_CTRL7_ISCALE2(x) (((x) & 0xff) << 16)
+#define G1_REG_DEC_CTRL7_ISHIFT2(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL7_DCT3_START_BIT(x) (((x) & 0x3f) << 24)
+#define G1_REG_DEC_CTRL7_DCT4_START_BIT(x) (((x) & 0x3f) << 18)
+#define G1_REG_DEC_CTRL7_DCT5_START_BIT(x) (((x) & 0x3f) << 12)
+#define G1_REG_DEC_CTRL7_DCT6_START_BIT(x) (((x) & 0x3f) << 6)
+#define G1_REG_DEC_CTRL7_DCT7_START_BIT(x) (((x) & 0x3f) << 0)
+#define G1_REG_ADDR_STR 0x030
+#define G1_REG_ADDR_DST 0x034
+#define G1_REG_ADDR_REF(i) (0x038 + ((i) * 0x4))
+#define G1_REG_ADDR_REF_FIELD_E BIT(1)
+#define G1_REG_ADDR_REF_TOPC_E BIT(0)
+#define G1_REG_REF_PIC(i) (0x078 + ((i) * 0x4))
+#define G1_REG_REF_PIC_FILT_TYPE_E BIT(31)
+#define G1_REG_REF_PIC_FILT_SHARPNESS(x) (((x) & 0x7) << 28)
+#define G1_REG_REF_PIC_MB_ADJ_0(x) (((x) & 0x7f) << 21)
+#define G1_REG_REF_PIC_MB_ADJ_1(x) (((x) & 0x7f) << 14)
+#define G1_REG_REF_PIC_MB_ADJ_2(x) (((x) & 0x7f) << 7)
+#define G1_REG_REF_PIC_MB_ADJ_3(x) (((x) & 0x7f) << 0)
+#define G1_REG_REF_PIC_REFER1_NBR(x) (((x) & 0xffff) << 16)
+#define G1_REG_REF_PIC_REFER0_NBR(x) (((x) & 0xffff) << 0)
+#define G1_REG_REF_PIC_LF_LEVEL_0(x) (((x) & 0x3f) << 18)
+#define G1_REG_REF_PIC_LF_LEVEL_1(x) (((x) & 0x3f) << 12)
+#define G1_REG_REF_PIC_LF_LEVEL_2(x) (((x) & 0x3f) << 6)
+#define G1_REG_REF_PIC_LF_LEVEL_3(x) (((x) & 0x3f) << 0)
+#define G1_REG_REF_PIC_QUANT_DELTA_0(x) (((x) & 0x1f) << 27)
+#define G1_REG_REF_PIC_QUANT_DELTA_1(x) (((x) & 0x1f) << 22)
+#define G1_REG_REF_PIC_QUANT_0(x) (((x) & 0x7ff) << 11)
+#define G1_REG_REF_PIC_QUANT_1(x) (((x) & 0x7ff) << 0)
+#define G1_REG_LT_REF 0x098
+#define G1_REG_VALID_REF 0x09c
+#define G1_REG_ADDR_QTABLE 0x0a0
+#define G1_REG_ADDR_DIR_MV 0x0a4
+#define G1_REG_BD_REF_PIC(i) (0x0a8 + ((i) * 0x4))
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_B2(x) (((x) & 0x1f) << 25)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_F2(x) (((x) & 0x1f) << 20)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_B1(x) (((x) & 0x1f) << 15)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_F1(x) (((x) & 0x1f) << 10)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_B0(x) (((x) & 0x1f) << 5)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define G1_REG_BD_REF_PIC_PRED_TAP_2_M1(x) (((x) & 0x3) << 10)
+#define G1_REG_BD_REF_PIC_PRED_TAP_2_4(x) (((x) & 0x3) << 8)
+#define G1_REG_BD_REF_PIC_PRED_TAP_4_M1(x) (((x) & 0x3) << 6)
+#define G1_REG_BD_REF_PIC_PRED_TAP_4_4(x) (((x) & 0x3) << 4)
+#define G1_REG_BD_REF_PIC_PRED_TAP_6_M1(x) (((x) & 0x3) << 2)
+#define G1_REG_BD_REF_PIC_PRED_TAP_6_4(x) (((x) & 0x3) << 0)
+#define G1_REG_BD_REF_PIC_QUANT_DELTA_2(x) (((x) & 0x1f) << 27)
+#define G1_REG_BD_REF_PIC_QUANT_DELTA_3(x) (((x) & 0x1f) << 22)
+#define G1_REG_BD_REF_PIC_QUANT_2(x) (((x) & 0x7ff) << 11)
+#define G1_REG_BD_REF_PIC_QUANT_3(x) (((x) & 0x7ff) << 0)
+#define G1_REG_BD_P_REF_PIC 0x0bc
+#define G1_REG_BD_P_REF_PIC_QUANT_DELTA_4(x) (((x) & 0x1f) << 27)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 25)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 20)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 15)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 10)
+#define G1_REG_BD_P_REF_PIC_BINIT_RLIST_B15(x) (((x) & 0x1f) << 5)
+#define G1_REG_BD_P_REF_PIC_BINIT_RLIST_F15(x) (((x) & 0x1f) << 0)
+#define G1_REG_ERR_CONC 0x0c0
+#define G1_REG_ERR_CONC_STARTMB_X(x) (((x) & 0x1ff) << 23)
+#define G1_REG_ERR_CONC_STARTMB_Y(x) (((x) & 0xff) << 15)
+#define G1_REG_PRED_FLT 0x0c4
+#define G1_REG_PRED_FLT_PRED_BC_TAP_0_0(x) (((x) & 0x3ff) << 22)
+#define G1_REG_PRED_FLT_PRED_BC_TAP_0_1(x) (((x) & 0x3ff) << 12)
+#define G1_REG_PRED_FLT_PRED_BC_TAP_0_2(x) (((x) & 0x3ff) << 2)
+#define G1_REG_REF_BUF_CTRL 0x0cc
+#define G1_REG_REF_BUF_CTRL_REFBU_E BIT(31)
+#define G1_REG_REF_BUF_CTRL_REFBU_THR(x) (((x) & 0xfff) << 19)
+#define G1_REG_REF_BUF_CTRL_REFBU_PICID(x) (((x) & 0x1f) << 14)
+#define G1_REG_REF_BUF_CTRL_REFBU_EVAL_E BIT(13)
+#define G1_REG_REF_BUF_CTRL_REFBU_FPARMOD_E BIT(12)
+#define G1_REG_REF_BUF_CTRL_REFBU_Y_OFFSET(x) (((x) & 0x1ff) << 0)
+#define G1_REG_REF_BUF_CTRL2 0x0dc
+#define G1_REG_REF_BUF_CTRL2_REFBU2_BUF_E BIT(31)
+#define G1_REG_REF_BUF_CTRL2_REFBU2_THR(x) (((x) & 0xfff) << 19)
+#define G1_REG_REF_BUF_CTRL2_REFBU2_PICID(x) (((x) & 0x1f) << 14)
+#define G1_REG_REF_BUF_CTRL2_APF_THRESHOLD(x) (((x) & 0x3fff) << 0)
+#define G1_REG_SOFT_RESET 0x194
+
+#endif /* HANTRO_G1_REGS_H_ */
diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
new file mode 100644
index 000000000000..0c1e3043dc7e
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include <asm/unaligned.h>
+#include <media/v4l2-mem2mem.h>
+#include "hantro_jpeg.h"
+#include "hantro.h"
+#include "hantro_v4l2.h"
+#include "hantro_hw.h"
+#include "hantro_h1_regs.h"
+
+#define H1_JPEG_QUANT_TABLE_COUNT 16
+
+static void hantro_h1_set_src_img_ctrl(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
+{
+ struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ u32 reg;
+
+ reg = H1_REG_IN_IMG_CTRL_ROW_LEN(pix_fmt->width)
+ | H1_REG_IN_IMG_CTRL_OVRFLR_D4(0)
+ | H1_REG_IN_IMG_CTRL_OVRFLB_D4(0)
+ | H1_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
+ vepu_write_relaxed(vpu, reg, H1_REG_IN_IMG_CTRL);
+}
+
+static void hantro_h1_jpeg_enc_set_buffers(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf)
+{
+ struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ dma_addr_t src[3];
+
+ WARN_ON(pix_fmt->num_planes > 3);
+
+ vepu_write_relaxed(vpu, ctx->jpeg_enc.bounce_buffer.dma,
+ H1_REG_ADDR_OUTPUT_STREAM);
+ vepu_write_relaxed(vpu, ctx->jpeg_enc.bounce_buffer.size,
+ H1_REG_STR_BUF_LIMIT);
+
+ if (pix_fmt->num_planes == 1) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ /* single plane formats we supported are all interlaced */
+ vepu_write_relaxed(vpu, src[0], H1_REG_ADDR_IN_PLANE_0);
+ } else if (pix_fmt->num_planes == 2) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ vepu_write_relaxed(vpu, src[0], H1_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], H1_REG_ADDR_IN_PLANE_1);
+ } else {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ src[2] = vb2_dma_contig_plane_dma_addr(src_buf, 2);
+ vepu_write_relaxed(vpu, src[0], H1_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], H1_REG_ADDR_IN_PLANE_1);
+ vepu_write_relaxed(vpu, src[2], H1_REG_ADDR_IN_PLANE_2);
+ }
+}
+
+static void
+hantro_h1_jpeg_enc_set_qtable(struct hantro_dev *vpu,
+ unsigned char *luma_qtable,
+ unsigned char *chroma_qtable)
+{
+ u32 reg, i;
+
+ for (i = 0; i < H1_JPEG_QUANT_TABLE_COUNT; i++) {
+ reg = get_unaligned_be32(&luma_qtable[i]);
+ vepu_write_relaxed(vpu, reg, H1_REG_JPEG_LUMA_QUAT(i));
+
+ reg = get_unaligned_be32(&chroma_qtable[i]);
+ vepu_write_relaxed(vpu, reg, H1_REG_JPEG_CHROMA_QUAT(i));
+ }
+}
+
+void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct hantro_jpeg_ctx jpeg_ctx;
+ u32 reg;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
+ jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ jpeg_ctx.width = ctx->dst_fmt.width;
+ jpeg_ctx.height = ctx->dst_fmt.height;
+ jpeg_ctx.quality = ctx->jpeg_quality;
+ hantro_jpeg_header_assemble(&jpeg_ctx);
+
+ /* Switch to JPEG encoder mode before writing registers */
+ vepu_write_relaxed(vpu, H1_REG_ENC_CTRL_ENC_MODE_JPEG,
+ H1_REG_ENC_CTRL);
+
+ hantro_h1_set_src_img_ctrl(vpu, ctx);
+ hantro_h1_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
+ hantro_h1_jpeg_enc_set_qtable(vpu,
+ hantro_jpeg_get_qtable(&jpeg_ctx, 0),
+ hantro_jpeg_get_qtable(&jpeg_ctx, 1));
+
+ reg = H1_REG_AXI_CTRL_OUTPUT_SWAP16
+ | H1_REG_AXI_CTRL_INPUT_SWAP16
+ | H1_REG_AXI_CTRL_BURST_LEN(16)
+ | H1_REG_AXI_CTRL_OUTPUT_SWAP32
+ | H1_REG_AXI_CTRL_INPUT_SWAP32
+ | H1_REG_AXI_CTRL_OUTPUT_SWAP8
+ | H1_REG_AXI_CTRL_INPUT_SWAP8;
+ /* Make sure that all registers are written at this point. */
+ vepu_write(vpu, reg, H1_REG_AXI_CTRL);
+
+ reg = H1_REG_ENC_CTRL_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
+ | H1_REG_ENC_CTRL_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
+ | H1_REG_ENC_CTRL_ENC_MODE_JPEG
+ | H1_REG_ENC_PIC_INTRA
+ | H1_REG_ENC_CTRL_EN_BIT;
+ /* Kick the watchdog and start encoding */
+ schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
+ vepu_write(vpu, reg, H1_REG_ENC_CTRL);
+}
diff --git a/drivers/staging/media/hantro/hantro_h1_regs.h b/drivers/staging/media/hantro/hantro_h1_regs.h
new file mode 100644
index 000000000000..d6e9825bb5c7
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_h1_regs.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#ifndef HANTRO_H1_REGS_H_
+#define HANTRO_H1_REGS_H_
+
+/* Encoder registers. */
+#define H1_REG_INTERRUPT 0x004
+#define H1_REG_INTERRUPT_FRAME_RDY BIT(2)
+#define H1_REG_INTERRUPT_DIS_BIT BIT(1)
+#define H1_REG_INTERRUPT_BIT BIT(0)
+#define H1_REG_AXI_CTRL 0x008
+#define H1_REG_AXI_CTRL_OUTPUT_SWAP16 BIT(15)
+#define H1_REG_AXI_CTRL_INPUT_SWAP16 BIT(14)
+#define H1_REG_AXI_CTRL_BURST_LEN(x) ((x) << 8)
+#define H1_REG_AXI_CTRL_GATE_BIT BIT(4)
+#define H1_REG_AXI_CTRL_OUTPUT_SWAP32 BIT(3)
+#define H1_REG_AXI_CTRL_INPUT_SWAP32 BIT(2)
+#define H1_REG_AXI_CTRL_OUTPUT_SWAP8 BIT(1)
+#define H1_REG_AXI_CTRL_INPUT_SWAP8 BIT(0)
+#define H1_REG_ADDR_OUTPUT_STREAM 0x014
+#define H1_REG_ADDR_OUTPUT_CTRL 0x018
+#define H1_REG_ADDR_REF_LUMA 0x01c
+#define H1_REG_ADDR_REF_CHROMA 0x020
+#define H1_REG_ADDR_REC_LUMA 0x024
+#define H1_REG_ADDR_REC_CHROMA 0x028
+#define H1_REG_ADDR_IN_PLANE_0 0x02c
+#define H1_REG_ADDR_IN_PLANE_1 0x030
+#define H1_REG_ADDR_IN_PLANE_2 0x034
+#define H1_REG_ENC_CTRL 0x038
+#define H1_REG_ENC_CTRL_TIMEOUT_EN BIT(31)
+#define H1_REG_ENC_CTRL_NAL_MODE_BIT BIT(29)
+#define H1_REG_ENC_CTRL_WIDTH(w) ((w) << 19)
+#define H1_REG_ENC_CTRL_HEIGHT(h) ((h) << 10)
+#define H1_REG_ENC_PIC_INTER (0x0 << 3)
+#define H1_REG_ENC_PIC_INTRA (0x1 << 3)
+#define H1_REG_ENC_PIC_MVCINTER (0x2 << 3)
+#define H1_REG_ENC_CTRL_ENC_MODE_H264 (0x3 << 1)
+#define H1_REG_ENC_CTRL_ENC_MODE_JPEG (0x2 << 1)
+#define H1_REG_ENC_CTRL_ENC_MODE_VP8 (0x1 << 1)
+#define H1_REG_ENC_CTRL_EN_BIT BIT(0)
+#define H1_REG_IN_IMG_CTRL 0x03c
+#define H1_REG_IN_IMG_CTRL_ROW_LEN(x) ((x) << 12)
+#define H1_REG_IN_IMG_CTRL_OVRFLR_D4(x) ((x) << 10)
+#define H1_REG_IN_IMG_CTRL_OVRFLB_D4(x) ((x) << 6)
+#define H1_REG_IN_IMG_CTRL_FMT(x) ((x) << 2)
+#define H1_REG_ENC_CTRL0 0x040
+#define H1_REG_ENC_CTRL0_INIT_QP(x) ((x) << 26)
+#define H1_REG_ENC_CTRL0_SLICE_ALPHA(x) ((x) << 22)
+#define H1_REG_ENC_CTRL0_SLICE_BETA(x) ((x) << 18)
+#define H1_REG_ENC_CTRL0_CHROMA_QP_OFFSET(x) ((x) << 13)
+#define H1_REG_ENC_CTRL0_FILTER_DIS(x) ((x) << 5)
+#define H1_REG_ENC_CTRL0_IDR_PICID(x) ((x) << 1)
+#define H1_REG_ENC_CTRL0_CONSTR_INTRA_PRED BIT(0)
+#define H1_REG_ENC_CTRL1 0x044
+#define H1_REG_ENC_CTRL1_PPS_ID(x) ((x) << 24)
+#define H1_REG_ENC_CTRL1_INTRA_PRED_MODE(x) ((x) << 16)
+#define H1_REG_ENC_CTRL1_FRAME_NUM(x) ((x))
+#define H1_REG_ENC_CTRL2 0x048
+#define H1_REG_ENC_CTRL2_DEBLOCKING_FILETER_MODE(x) ((x) << 30)
+#define H1_REG_ENC_CTRL2_H264_SLICE_SIZE(x) ((x) << 23)
+#define H1_REG_ENC_CTRL2_DISABLE_QUARTER_PIXMV BIT(22)
+#define H1_REG_ENC_CTRL2_TRANS8X8_MODE_EN BIT(21)
+#define H1_REG_ENC_CTRL2_CABAC_INIT_IDC(x) ((x) << 19)
+#define H1_REG_ENC_CTRL2_ENTROPY_CODING_MODE BIT(18)
+#define H1_REG_ENC_CTRL2_H264_INTER4X4_MODE BIT(17)
+#define H1_REG_ENC_CTRL2_H264_STREAM_MODE BIT(16)
+#define H1_REG_ENC_CTRL2_INTRA16X16_MODE(x) ((x))
+#define H1_REG_ENC_CTRL3 0x04c
+#define H1_REG_ENC_CTRL3_MUTIMV_EN BIT(30)
+#define H1_REG_ENC_CTRL3_MV_PENALTY_1_4P(x) ((x) << 20)
+#define H1_REG_ENC_CTRL3_MV_PENALTY_4P(x) ((x) << 10)
+#define H1_REG_ENC_CTRL3_MV_PENALTY_1P(x) ((x))
+#define H1_REG_ENC_CTRL4 0x050
+#define H1_REG_ENC_CTRL4_MV_PENALTY_16X8_8X16(x) ((x) << 20)
+#define H1_REG_ENC_CTRL4_MV_PENALTY_8X8(x) ((x) << 10)
+#define H1_REG_ENC_CTRL4_8X4_4X8(x) ((x))
+#define H1_REG_ENC_CTRL5 0x054
+#define H1_REG_ENC_CTRL5_MACROBLOCK_PENALTY(x) ((x) << 24)
+#define H1_REG_ENC_CTRL5_COMPLETE_SLICES(x) ((x) << 16)
+#define H1_REG_ENC_CTRL5_INTER_MODE(x) ((x))
+#define H1_REG_STR_HDR_REM_MSB 0x058
+#define H1_REG_STR_HDR_REM_LSB 0x05c
+#define H1_REG_STR_BUF_LIMIT 0x060
+#define H1_REG_MAD_CTRL 0x064
+#define H1_REG_MAD_CTRL_QP_ADJUST(x) ((x) << 28)
+#define H1_REG_MAD_CTRL_MAD_THREDHOLD(x) ((x) << 22)
+#define H1_REG_MAD_CTRL_QP_SUM_DIV2(x) ((x))
+#define H1_REG_ADDR_VP8_PROB_CNT 0x068
+#define H1_REG_QP_VAL 0x06c
+#define H1_REG_QP_VAL_LUM(x) ((x) << 26)
+#define H1_REG_QP_VAL_MAX(x) ((x) << 20)
+#define H1_REG_QP_VAL_MIN(x) ((x) << 14)
+#define H1_REG_QP_VAL_CHECKPOINT_DISTAN(x) ((x))
+#define H1_REG_VP8_QP_VAL(i) (0x06c + ((i) * 0x4))
+#define H1_REG_CHECKPOINT(i) (0x070 + ((i) * 0x4))
+#define H1_REG_CHECKPOINT_CHECK0(x) (((x) & 0xffff))
+#define H1_REG_CHECKPOINT_CHECK1(x) (((x) & 0xffff) << 16)
+#define H1_REG_CHECKPOINT_RESULT(x) ((((x) >> (16 - 16 \
+ * (i & 1))) & 0xffff) \
+ * 32)
+#define H1_REG_CHKPT_WORD_ERR(i) (0x084 + ((i) * 0x4))
+#define H1_REG_CHKPT_WORD_ERR_CHK0(x) (((x) & 0xffff))
+#define H1_REG_CHKPT_WORD_ERR_CHK1(x) (((x) & 0xffff) << 16)
+#define H1_REG_VP8_BOOL_ENC 0x08c
+#define H1_REG_CHKPT_DELTA_QP 0x090
+#define H1_REG_CHKPT_DELTA_QP_CHK0(x) (((x) & 0x0f) << 0)
+#define H1_REG_CHKPT_DELTA_QP_CHK1(x) (((x) & 0x0f) << 4)
+#define H1_REG_CHKPT_DELTA_QP_CHK2(x) (((x) & 0x0f) << 8)
+#define H1_REG_CHKPT_DELTA_QP_CHK3(x) (((x) & 0x0f) << 12)
+#define H1_REG_CHKPT_DELTA_QP_CHK4(x) (((x) & 0x0f) << 16)
+#define H1_REG_CHKPT_DELTA_QP_CHK5(x) (((x) & 0x0f) << 20)
+#define H1_REG_CHKPT_DELTA_QP_CHK6(x) (((x) & 0x0f) << 24)
+#define H1_REG_VP8_CTRL0 0x090
+#define H1_REG_RLC_CTRL 0x094
+#define H1_REG_RLC_CTRL_STR_OFFS_SHIFT 23
+#define H1_REG_RLC_CTRL_STR_OFFS_MASK (0x3f << 23)
+#define H1_REG_RLC_CTRL_RLC_SUM(x) ((x))
+#define H1_REG_MB_CTRL 0x098
+#define H1_REG_MB_CNT_OUT(x) (((x) & 0xffff))
+#define H1_REG_MB_CNT_SET(x) (((x) & 0xffff) << 16)
+#define H1_REG_ADDR_NEXT_PIC 0x09c
+#define H1_REG_JPEG_LUMA_QUAT(i) (0x100 + ((i) * 0x4))
+#define H1_REG_JPEG_CHROMA_QUAT(i) (0x140 + ((i) * 0x4))
+#define H1_REG_STABILIZATION_OUTPUT 0x0A0
+#define H1_REG_ADDR_CABAC_TBL 0x0cc
+#define H1_REG_ADDR_MV_OUT 0x0d0
+#define H1_REG_RGB_YUV_COEFF(i) (0x0d4 + ((i) * 0x4))
+#define H1_REG_RGB_MASK_MSB 0x0dc
+#define H1_REG_INTRA_AREA_CTRL 0x0e0
+#define H1_REG_CIR_INTRA_CTRL 0x0e4
+#define H1_REG_INTRA_SLICE_BITMAP(i) (0x0e8 + ((i) * 0x4))
+#define H1_REG_ADDR_VP8_DCT_PART(i) (0x0e8 + ((i) * 0x4))
+#define H1_REG_FIRST_ROI_AREA 0x0f0
+#define H1_REG_SECOND_ROI_AREA 0x0f4
+#define H1_REG_MVC_CTRL 0x0f8
+#define H1_REG_MVC_CTRL_MV16X16_FAVOR(x) ((x) << 28)
+#define H1_REG_VP8_INTRA_PENALTY(i) (0x100 + ((i) * 0x4))
+#define H1_REG_ADDR_VP8_SEG_MAP 0x11c
+#define H1_REG_VP8_SEG_QP(i) (0x120 + ((i) * 0x4))
+#define H1_REG_DMV_4P_1P_PENALTY(i) (0x180 + ((i) * 0x4))
+#define H1_REG_DMV_4P_1P_PENALTY_BIT(x, i) ((x) << (i) * 8)
+#define H1_REG_DMV_QPEL_PENALTY(i) (0x200 + ((i) * 0x4))
+#define H1_REG_DMV_QPEL_PENALTY_BIT(x, i) ((x) << (i) * 8)
+#define H1_REG_VP8_CTRL1 0x280
+#define H1_REG_VP8_BIT_COST_GOLDEN 0x284
+#define H1_REG_VP8_LOOP_FLT_DELTA(i) (0x288 + ((i) * 0x4))
+
+#endif /* HANTRO_H1_REGS_H_ */
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
new file mode 100644
index 000000000000..3c361c2e9b88
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#ifndef HANTRO_HW_H_
+#define HANTRO_HW_H_
+
+#include <linux/interrupt.h>
+#include <linux/v4l2-controls.h>
+#include <media/mpeg2-ctrls.h>
+#include <media/videobuf2-core.h>
+
+struct hantro_dev;
+struct hantro_ctx;
+struct hantro_buf;
+struct hantro_variant;
+
+/**
+ * struct hantro_aux_buf - auxiliary DMA buffer for hardware data
+ * @cpu: CPU pointer to the buffer.
+ * @dma: DMA address of the buffer.
+ * @size: Size of the buffer.
+ */
+struct hantro_aux_buf {
+ void *cpu;
+ dma_addr_t dma;
+ size_t size;
+};
+
+/**
+ * struct hantro_jpeg_enc_hw_ctx
+ * @bounce_buffer: Bounce buffer
+ */
+struct hantro_jpeg_enc_hw_ctx {
+ struct hantro_aux_buf bounce_buffer;
+};
+
+/**
+ * struct hantro_mpeg2_dec_hw_ctx
+ * @qtable: Quantization table
+ */
+struct hantro_mpeg2_dec_hw_ctx {
+ struct hantro_aux_buf qtable;
+};
+
+/**
+ * struct hantro_codec_ops - codec mode specific operations
+ *
+ * @init: If needed, can be used for initialization.
+ * Optional and called from process context.
+ * @exit: If needed, can be used to undo the .init phase.
+ * Optional and called from process context.
+ * @run: Start single {en,de)coding job. Called from atomic context
+ * to indicate that a pair of buffers is ready and the hardware
+ * should be programmed and started.
+ * @done: Read back processing results and additional data from hardware.
+ * @reset: Reset the hardware in case of a timeout.
+ */
+struct hantro_codec_ops {
+ int (*init)(struct hantro_ctx *ctx);
+ void (*exit)(struct hantro_ctx *ctx);
+ void (*run)(struct hantro_ctx *ctx);
+ void (*done)(struct hantro_ctx *ctx, enum vb2_buffer_state);
+ void (*reset)(struct hantro_ctx *ctx);
+};
+
+/**
+ * enum hantro_enc_fmt - source format ID for hardware registers.
+ */
+enum hantro_enc_fmt {
+ RK3288_VPU_ENC_FMT_YUV420P = 0,
+ RK3288_VPU_ENC_FMT_YUV420SP = 1,
+ RK3288_VPU_ENC_FMT_YUYV422 = 2,
+ RK3288_VPU_ENC_FMT_UYVY422 = 3,
+};
+
+extern const struct hantro_variant rk3399_vpu_variant;
+extern const struct hantro_variant rk3328_vpu_variant;
+extern const struct hantro_variant rk3288_vpu_variant;
+
+void hantro_watchdog(struct work_struct *work);
+void hantro_run(struct hantro_ctx *ctx);
+void hantro_irq_done(struct hantro_dev *vpu, unsigned int bytesused,
+ enum vb2_buffer_state result);
+
+void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx);
+void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx);
+int hantro_jpeg_enc_init(struct hantro_ctx *ctx);
+void hantro_jpeg_enc_exit(struct hantro_ctx *ctx);
+
+void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx);
+void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx);
+void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
+ const struct v4l2_ctrl_mpeg2_quantization *ctrl);
+int hantro_mpeg2_dec_init(struct hantro_ctx *ctx);
+void hantro_mpeg2_dec_exit(struct hantro_ctx *ctx);
+
+#endif /* HANTRO_HW_H_ */
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.c b/drivers/staging/media/hantro/hantro_jpeg.c
index 0ff0badc1f7a..125eb41f2ede 100644
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.c
+++ b/drivers/staging/media/hantro/hantro_jpeg.c
@@ -6,9 +6,11 @@
* Copyright (C) Jean-Francois Moine (http://moinejf.free.fr)
* Copyright (C) 2014 Philipp Zabel, Pengutronix
*/
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include "rockchip_vpu_jpeg.h"
+#include "hantro_jpeg.h"
+#include "hantro.h"
#define LUMA_QUANT_OFF 7
#define CHROMA_QUANT_OFF 72
@@ -116,7 +118,7 @@ static const unsigned char chroma_ac_table[] = {
* and we'll use fixed offsets to change the width, height
* quantization tables, etc.
*/
-static const unsigned char rockchip_vpu_jpeg_header[JPEG_HEADER_SIZE] = {
+static const unsigned char hantro_jpeg_header[JPEG_HEADER_SIZE] = {
/* SOI */
0xff, 0xd8,
@@ -260,19 +262,19 @@ static void jpeg_set_quality(unsigned char *buffer, int quality)
}
unsigned char *
-rockchip_vpu_jpeg_get_qtable(struct rockchip_vpu_jpeg_ctx *ctx, int index)
+hantro_jpeg_get_qtable(struct hantro_jpeg_ctx *ctx, int index)
{
if (index == 0)
return ctx->buffer + LUMA_QUANT_OFF;
return ctx->buffer + CHROMA_QUANT_OFF;
}
-void rockchip_vpu_jpeg_header_assemble(struct rockchip_vpu_jpeg_ctx *ctx)
+void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx)
{
char *buf = ctx->buffer;
- memcpy(buf, rockchip_vpu_jpeg_header,
- sizeof(rockchip_vpu_jpeg_header));
+ memcpy(buf, hantro_jpeg_header,
+ sizeof(hantro_jpeg_header));
buf[HEIGHT_OFF + 0] = ctx->height >> 8;
buf[HEIGHT_OFF + 1] = ctx->height;
@@ -288,3 +290,30 @@ void rockchip_vpu_jpeg_header_assemble(struct rockchip_vpu_jpeg_ctx *ctx)
jpeg_set_quality(buf, ctx->quality);
}
+
+int hantro_jpeg_enc_init(struct hantro_ctx *ctx)
+{
+ ctx->jpeg_enc.bounce_buffer.size =
+ ctx->dst_fmt.plane_fmt[0].sizeimage -
+ ctx->vpu_dst_fmt->header_size;
+
+ ctx->jpeg_enc.bounce_buffer.cpu =
+ dma_alloc_attrs(ctx->dev->dev,
+ ctx->jpeg_enc.bounce_buffer.size,
+ &ctx->jpeg_enc.bounce_buffer.dma,
+ GFP_KERNEL,
+ DMA_ATTR_ALLOC_SINGLE_PAGES);
+ if (!ctx->jpeg_enc.bounce_buffer.cpu)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void hantro_jpeg_enc_exit(struct hantro_ctx *ctx)
+{
+ dma_free_attrs(ctx->dev->dev,
+ ctx->jpeg_enc.bounce_buffer.size,
+ ctx->jpeg_enc.bounce_buffer.cpu,
+ ctx->jpeg_enc.bounce_buffer.dma,
+ DMA_ATTR_ALLOC_SINGLE_PAGES);
+}
diff --git a/drivers/staging/media/hantro/hantro_jpeg.h b/drivers/staging/media/hantro/hantro_jpeg.h
new file mode 100644
index 000000000000..9e8397c71388
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_jpeg.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#define JPEG_HEADER_SIZE 601
+
+struct hantro_jpeg_ctx {
+ int width;
+ int height;
+ int quality;
+ unsigned char *buffer;
+};
+
+unsigned char *hantro_jpeg_get_qtable(struct hantro_jpeg_ctx *ctx, int index);
+void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx);
diff --git a/drivers/staging/media/hantro/hantro_mpeg2.c b/drivers/staging/media/hantro/hantro_mpeg2.c
new file mode 100644
index 000000000000..1d334e6fcd06
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_mpeg2.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include "hantro.h"
+
+static const u8 zigzag[64] = {
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
+};
+
+void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
+ const struct v4l2_ctrl_mpeg2_quantization *ctrl)
+{
+ int i, n;
+
+ if (!qtable || !ctrl)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(zigzag); i++) {
+ n = zigzag[i];
+ qtable[n + 0] = ctrl->intra_quantiser_matrix[i];
+ qtable[n + 64] = ctrl->non_intra_quantiser_matrix[i];
+ qtable[n + 128] = ctrl->chroma_intra_quantiser_matrix[i];
+ qtable[n + 192] = ctrl->chroma_non_intra_quantiser_matrix[i];
+ }
+}
+
+int hantro_mpeg2_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ ctx->mpeg2_dec.qtable.size = ARRAY_SIZE(zigzag) * 4;
+ ctx->mpeg2_dec.qtable.cpu =
+ dma_alloc_coherent(vpu->dev,
+ ctx->mpeg2_dec.qtable.size,
+ &ctx->mpeg2_dec.qtable.dma,
+ GFP_KERNEL);
+ if (!ctx->mpeg2_dec.qtable.cpu)
+ return -ENOMEM;
+ return 0;
+}
+
+void hantro_mpeg2_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ dma_free_coherent(vpu->dev,
+ ctx->mpeg2_dec.qtable.size,
+ ctx->mpeg2_dec.qtable.cpu,
+ ctx->mpeg2_dec.qtable.dma);
+}
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
new file mode 100644
index 000000000000..68f45ee66821
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -0,0 +1,686 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Collabora, Ltd.
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <Alpha.Lin@rock-chips.com>
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2010-2011 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+#include "hantro_v4l2.h"
+
+static const struct hantro_fmt *
+hantro_get_formats(const struct hantro_ctx *ctx, unsigned int *num_fmts)
+{
+ const struct hantro_fmt *formats;
+
+ if (hantro_is_encoder_ctx(ctx)) {
+ formats = ctx->dev->variant->enc_fmts;
+ *num_fmts = ctx->dev->variant->num_enc_fmts;
+ } else {
+ formats = ctx->dev->variant->dec_fmts;
+ *num_fmts = ctx->dev->variant->num_dec_fmts;
+ }
+
+ return formats;
+}
+
+static const struct hantro_fmt *
+hantro_find_format(const struct hantro_fmt *formats, unsigned int num_fmts,
+ u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_fmts; i++)
+ if (formats[i].fourcc == fourcc)
+ return &formats[i];
+ return NULL;
+}
+
+static const struct hantro_fmt *
+hantro_get_default_fmt(const struct hantro_fmt *formats, unsigned int num_fmts,
+ bool bitstream)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_fmts; i++) {
+ if (bitstream == (formats[i].codec_mode !=
+ HANTRO_MODE_NONE))
+ return &formats[i];
+ }
+ return NULL;
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct hantro_dev *vpu = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ strscpy(cap->driver, vpu->dev->driver->name, sizeof(cap->driver));
+ strscpy(cap->card, vdev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform: %s",
+ vpu->dev->driver->name);
+ return 0;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ const struct hantro_fmt *formats, *fmt;
+ unsigned int num_fmts;
+
+ if (fsize->index != 0) {
+ vpu_debug(0, "invalid frame size index (expected 0, got %d)\n",
+ fsize->index);
+ return -EINVAL;
+ }
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ fmt = hantro_find_format(formats, num_fmts, fsize->pixel_format);
+ if (!fmt) {
+ vpu_debug(0, "unsupported bitstream format (%08x)\n",
+ fsize->pixel_format);
+ return -EINVAL;
+ }
+
+ /* This only makes sense for coded formats */
+ if (fmt->codec_mode == HANTRO_MODE_NONE)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = fmt->frmsize;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f, bool capture)
+
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ const struct hantro_fmt *fmt, *formats;
+ unsigned int num_fmts, i, j = 0;
+ bool skip_mode_none;
+
+ /*
+ * When dealing with an encoder:
+ * - on the capture side we want to filter out all MODE_NONE formats.
+ * - on the output side we want to filter out all formats that are
+ * not MODE_NONE.
+ * When dealing with a decoder:
+ * - on the capture side we want to filter out all formats that are
+ * not MODE_NONE.
+ * - on the output side we want to filter out all MODE_NONE formats.
+ */
+ skip_mode_none = capture == hantro_is_encoder_ctx(ctx);
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ for (i = 0; i < num_fmts; i++) {
+ bool mode_none = formats[i].codec_mode == HANTRO_MODE_NONE;
+
+ if (skip_mode_none == mode_none)
+ continue;
+ if (j == f->index) {
+ fmt = &formats[i];
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+ ++j;
+ }
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(file, priv, f, true);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(file, priv, f, false);
+}
+
+static int vidioc_g_fmt_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+
+ vpu_debug(4, "f->type = %d\n", f->type);
+
+ *pix_mp = ctx->src_fmt;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+
+ vpu_debug(4, "f->type = %d\n", f->type);
+
+ *pix_mp = ctx->dst_fmt;
+
+ return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
+ bool capture)
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ const struct hantro_fmt *formats, *fmt, *vpu_fmt;
+ unsigned int num_fmts;
+ bool coded;
+
+ coded = capture == hantro_is_encoder_ctx(ctx);
+
+ vpu_debug(4, "trying format %c%c%c%c\n",
+ (pix_mp->pixelformat & 0x7f),
+ (pix_mp->pixelformat >> 8) & 0x7f,
+ (pix_mp->pixelformat >> 16) & 0x7f,
+ (pix_mp->pixelformat >> 24) & 0x7f);
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ fmt = hantro_find_format(formats, num_fmts, pix_mp->pixelformat);
+ if (!fmt) {
+ fmt = hantro_get_default_fmt(formats, num_fmts, coded);
+ f->fmt.pix_mp.pixelformat = fmt->fourcc;
+ }
+
+ if (coded) {
+ pix_mp->num_planes = 1;
+ vpu_fmt = fmt;
+ } else if (hantro_is_encoder_ctx(ctx)) {
+ vpu_fmt = ctx->vpu_dst_fmt;
+ } else {
+ vpu_fmt = ctx->vpu_src_fmt;
+ /*
+ * Width/height on the CAPTURE end of a decoder are ignored and
+ * replaced by the OUTPUT ones.
+ */
+ pix_mp->width = ctx->src_fmt.width;
+ pix_mp->height = ctx->src_fmt.height;
+ }
+
+ pix_mp->field = V4L2_FIELD_NONE;
+
+ v4l2_apply_frmsize_constraints(&pix_mp->width, &pix_mp->height,
+ &vpu_fmt->frmsize);
+
+ if (!coded) {
+ /* Fill remaining fields */
+ v4l2_fill_pixfmt_mp(pix_mp, fmt->fourcc, pix_mp->width,
+ pix_mp->height);
+ } else if (!pix_mp->plane_fmt[0].sizeimage) {
+ /*
+ * For coded formats the application can specify
+ * sizeimage. If the application passes a zero sizeimage,
+ * let's default to the maximum frame size.
+ */
+ pix_mp->plane_fmt[0].sizeimage = fmt->header_size +
+ pix_mp->width * pix_mp->height * fmt->max_depth;
+ }
+
+ return 0;
+}
+
+static int vidioc_try_fmt_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_try_fmt(file, priv, f, true);
+}
+
+static int vidioc_try_fmt_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_try_fmt(file, priv, f, false);
+}
+
+static void
+hantro_reset_fmt(struct v4l2_pix_format_mplane *fmt,
+ const struct hantro_fmt *vpu_fmt)
+{
+ memset(fmt, 0, sizeof(*fmt));
+
+ fmt->pixelformat = vpu_fmt->fourcc;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_JPEG,
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+ fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static void
+hantro_reset_encoded_fmt(struct hantro_ctx *ctx)
+{
+ const struct hantro_fmt *vpu_fmt, *formats;
+ struct v4l2_pix_format_mplane *fmt;
+ unsigned int num_fmts;
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ vpu_fmt = hantro_get_default_fmt(formats, num_fmts, true);
+
+ if (hantro_is_encoder_ctx(ctx)) {
+ ctx->vpu_dst_fmt = vpu_fmt;
+ fmt = &ctx->dst_fmt;
+ } else {
+ ctx->vpu_src_fmt = vpu_fmt;
+ fmt = &ctx->src_fmt;
+ }
+
+ hantro_reset_fmt(fmt, vpu_fmt);
+ fmt->num_planes = 1;
+ fmt->width = vpu_fmt->frmsize.min_width;
+ fmt->height = vpu_fmt->frmsize.min_height;
+ fmt->plane_fmt[0].sizeimage = vpu_fmt->header_size +
+ fmt->width * fmt->height * vpu_fmt->max_depth;
+}
+
+static void
+hantro_reset_raw_fmt(struct hantro_ctx *ctx)
+{
+ const struct hantro_fmt *raw_vpu_fmt, *formats;
+ struct v4l2_pix_format_mplane *raw_fmt, *encoded_fmt;
+ unsigned int num_fmts;
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ raw_vpu_fmt = hantro_get_default_fmt(formats, num_fmts, false);
+
+ if (hantro_is_encoder_ctx(ctx)) {
+ ctx->vpu_src_fmt = raw_vpu_fmt;
+ raw_fmt = &ctx->src_fmt;
+ encoded_fmt = &ctx->dst_fmt;
+ } else {
+ ctx->vpu_dst_fmt = raw_vpu_fmt;
+ raw_fmt = &ctx->dst_fmt;
+ encoded_fmt = &ctx->src_fmt;
+ }
+
+ hantro_reset_fmt(raw_fmt, raw_vpu_fmt);
+ v4l2_fill_pixfmt_mp(raw_fmt, raw_vpu_fmt->fourcc,
+ encoded_fmt->width,
+ encoded_fmt->height);
+}
+
+void hantro_reset_fmts(struct hantro_ctx *ctx)
+{
+ hantro_reset_encoded_fmt(ctx);
+ hantro_reset_raw_fmt(ctx);
+}
+
+static void
+hantro_update_requires_request(struct hantro_ctx *ctx, u32 fourcc)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_JPEG:
+ ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = false;
+ break;
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ const struct hantro_fmt *formats;
+ unsigned int num_fmts;
+ struct vb2_queue *vq;
+ int ret;
+
+ /* Change not allowed if queue is busy. */
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ if (!hantro_is_encoder_ctx(ctx)) {
+ struct vb2_queue *peer_vq;
+
+ /*
+ * Since format change on the OUTPUT queue will reset
+ * the CAPTURE queue, we can't allow doing so
+ * when the CAPTURE queue has buffers allocated.
+ */
+ peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (vb2_is_busy(peer_vq))
+ return -EBUSY;
+ }
+
+ ret = vidioc_try_fmt_out_mplane(file, priv, f);
+ if (ret)
+ return ret;
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ ctx->vpu_src_fmt = hantro_find_format(formats, num_fmts,
+ pix_mp->pixelformat);
+ ctx->src_fmt = *pix_mp;
+
+ /*
+ * Current raw format might have become invalid with newly
+ * selected codec, so reset it to default just to be safe and
+ * keep internal driver state sane. User is mandated to set
+ * the raw format again after we return, so we don't need
+ * anything smarter.
+ * Note that hantro_reset_raw_fmt() also propagates size
+ * changes to the raw format.
+ */
+ if (!hantro_is_encoder_ctx(ctx))
+ hantro_reset_raw_fmt(ctx);
+
+ /* Colorimetry information are always propagated. */
+ ctx->dst_fmt.colorspace = pix_mp->colorspace;
+ ctx->dst_fmt.ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->dst_fmt.xfer_func = pix_mp->xfer_func;
+ ctx->dst_fmt.quantization = pix_mp->quantization;
+
+ hantro_update_requires_request(ctx, pix_mp->pixelformat);
+
+ vpu_debug(0, "OUTPUT codec mode: %d\n", ctx->vpu_src_fmt->codec_mode);
+ vpu_debug(0, "fmt - w: %d, h: %d\n",
+ pix_mp->width, pix_mp->height);
+ return 0;
+}
+
+static int vidioc_s_fmt_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ const struct hantro_fmt *formats;
+ struct vb2_queue *vq;
+ unsigned int num_fmts;
+ int ret;
+
+ /* Change not allowed if queue is busy. */
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ if (hantro_is_encoder_ctx(ctx)) {
+ struct vb2_queue *peer_vq;
+
+ /*
+ * Since format change on the CAPTURE queue will reset
+ * the OUTPUT queue, we can't allow doing so
+ * when the OUTPUT queue has buffers allocated.
+ */
+ peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (vb2_is_busy(peer_vq) &&
+ (pix_mp->pixelformat != ctx->dst_fmt.pixelformat ||
+ pix_mp->height != ctx->dst_fmt.height ||
+ pix_mp->width != ctx->dst_fmt.width))
+ return -EBUSY;
+ }
+
+ ret = vidioc_try_fmt_cap_mplane(file, priv, f);
+ if (ret)
+ return ret;
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ ctx->vpu_dst_fmt = hantro_find_format(formats, num_fmts,
+ pix_mp->pixelformat);
+ ctx->dst_fmt = *pix_mp;
+
+ /*
+ * Current raw format might have become invalid with newly
+ * selected codec, so reset it to default just to be safe and
+ * keep internal driver state sane. User is mandated to set
+ * the raw format again after we return, so we don't need
+ * anything smarter.
+ * Note that hantro_reset_raw_fmt() also propagates size
+ * changes to the raw format.
+ */
+ if (hantro_is_encoder_ctx(ctx))
+ hantro_reset_raw_fmt(ctx);
+
+ /* Colorimetry information are always propagated. */
+ ctx->src_fmt.colorspace = pix_mp->colorspace;
+ ctx->src_fmt.ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->src_fmt.xfer_func = pix_mp->xfer_func;
+ ctx->src_fmt.quantization = pix_mp->quantization;
+
+ vpu_debug(0, "CAPTURE codec mode: %d\n", ctx->vpu_dst_fmt->codec_mode);
+ vpu_debug(0, "fmt - w: %d, h: %d\n",
+ pix_mp->width, pix_mp->height);
+
+ hantro_update_requires_request(ctx, pix_mp->pixelformat);
+
+ return 0;
+}
+
+const struct v4l2_ioctl_ops hantro_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
+
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_cap_mplane,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_out_mplane,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_out_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_cap_mplane,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_out_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_cap_mplane,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+};
+
+static int
+hantro_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format_mplane *pixfmt;
+ int i;
+
+ switch (vq->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ pixfmt = &ctx->dst_fmt;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ pixfmt = &ctx->src_fmt;
+ break;
+ default:
+ vpu_err("invalid queue type: %d\n", vq->type);
+ return -EINVAL;
+ }
+
+ if (*num_planes) {
+ if (*num_planes != pixfmt->num_planes)
+ return -EINVAL;
+ for (i = 0; i < pixfmt->num_planes; ++i)
+ if (sizes[i] < pixfmt->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ return 0;
+ }
+
+ *num_planes = pixfmt->num_planes;
+ for (i = 0; i < pixfmt->num_planes; ++i)
+ sizes[i] = pixfmt->plane_fmt[i].sizeimage;
+ return 0;
+}
+
+static int
+hantro_buf_plane_check(struct vb2_buffer *vb, const struct hantro_fmt *vpu_fmt,
+ struct v4l2_pix_format_mplane *pixfmt)
+{
+ unsigned int sz;
+ int i;
+
+ for (i = 0; i < pixfmt->num_planes; ++i) {
+ sz = pixfmt->plane_fmt[i].sizeimage;
+ vpu_debug(4, "plane %d size: %ld, sizeimage: %u\n",
+ i, vb2_plane_size(vb, i), sz);
+ if (vb2_plane_size(vb, i) < sz) {
+ vpu_err("plane %d is too small for output\n", i);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int hantro_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vq);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ return hantro_buf_plane_check(vb, ctx->vpu_src_fmt,
+ &ctx->src_fmt);
+
+ return hantro_buf_plane_check(vb, ctx->vpu_dst_fmt, &ctx->dst_fmt);
+}
+
+static void hantro_buf_queue(struct vb2_buffer *vb)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static bool hantro_vq_is_coded(struct vb2_queue *q)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+
+ return hantro_is_encoder_ctx(ctx) != V4L2_TYPE_IS_OUTPUT(q->type);
+}
+
+static int hantro_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+ int ret = 0;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ ctx->sequence_out = 0;
+ else
+ ctx->sequence_cap = 0;
+
+ if (hantro_vq_is_coded(q)) {
+ enum hantro_codec_mode codec_mode;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ codec_mode = ctx->vpu_src_fmt->codec_mode;
+ else
+ codec_mode = ctx->vpu_dst_fmt->codec_mode;
+
+ vpu_debug(4, "Codec mode = %d\n", codec_mode);
+ ctx->codec_ops = &ctx->dev->variant->codec_ops[codec_mode];
+ if (ctx->codec_ops->init)
+ ret = ctx->codec_ops->init(ctx);
+ }
+
+ return ret;
+}
+
+static void
+hantro_return_bufs(struct vb2_queue *q,
+ struct vb2_v4l2_buffer *(*buf_remove)(struct v4l2_m2m_ctx *))
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+
+ for (;;) {
+ struct vb2_v4l2_buffer *vbuf;
+
+ vbuf = buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf)
+ break;
+ v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static void hantro_stop_streaming(struct vb2_queue *q)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+
+ if (hantro_vq_is_coded(q)) {
+ if (ctx->codec_ops && ctx->codec_ops->exit)
+ ctx->codec_ops->exit(ctx);
+ }
+
+ /*
+ * The mem2mem framework calls v4l2_m2m_cancel_job before
+ * .stop_streaming, so there isn't any job running and
+ * it is safe to return all the buffers.
+ */
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ hantro_return_bufs(q, v4l2_m2m_src_buf_remove);
+ else
+ hantro_return_bufs(q, v4l2_m2m_dst_buf_remove);
+}
+
+static void hantro_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->ctrl_handler);
+}
+
+static int hantro_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+const struct vb2_ops hantro_queue_ops = {
+ .queue_setup = hantro_queue_setup,
+ .buf_prepare = hantro_buf_prepare,
+ .buf_queue = hantro_buf_queue,
+ .buf_out_validate = hantro_buf_out_validate,
+ .buf_request_complete = hantro_buf_request_complete,
+ .start_streaming = hantro_start_streaming,
+ .stop_streaming = hantro_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
diff --git a/drivers/staging/media/hantro/hantro_v4l2.h b/drivers/staging/media/hantro/hantro_v4l2.h
new file mode 100644
index 000000000000..18bc682c8556
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_v4l2.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <Alpha.Lin@rock-chips.com>
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef HANTRO_V4L2_H_
+#define HANTRO_V4L2_H_
+
+#include "hantro.h"
+
+extern const struct v4l2_ioctl_ops hantro_ioctl_ops;
+extern const struct vb2_ops hantro_queue_ops;
+
+void hantro_reset_fmts(struct hantro_ctx *ctx);
+
+#endif /* HANTRO_V4L2_H_ */
diff --git a/drivers/staging/media/hantro/rk3288_vpu_hw.c b/drivers/staging/media/hantro/rk3288_vpu_hw.c
new file mode 100644
index 000000000000..bcacc4f51093
--- /dev/null
+++ b/drivers/staging/media/hantro/rk3288_vpu_hw.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+
+#include "hantro.h"
+#include "hantro_jpeg.h"
+#include "hantro_g1_regs.h"
+#include "hantro_h1_regs.h"
+
+#define RK3288_ACLK_MAX_FREQ (400 * 1000 * 1000)
+
+/*
+ * Supported formats.
+ */
+
+static const struct hantro_fmt rk3288_vpu_enc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUV420P,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUV420SP,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUYV422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_UYVY422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .codec_mode = HANTRO_MODE_JPEG_ENC,
+ .max_depth = 2,
+ .header_size = JPEG_HEADER_SIZE,
+ .frmsize = {
+ .min_width = 96,
+ .max_width = 8192,
+ .step_width = JPEG_MB_DIM,
+ .min_height = 32,
+ .max_height = 8192,
+ .step_height = JPEG_MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 1920,
+ .step_width = MPEG2_MB_DIM,
+ .min_height = 48,
+ .max_height = 1088,
+ .step_height = MPEG2_MB_DIM,
+ },
+ },
+};
+
+static irqreturn_t rk3288_vepu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status, bytesused;
+
+ status = vepu_read(vpu, H1_REG_INTERRUPT);
+ bytesused = vepu_read(vpu, H1_REG_STR_BUF_LIMIT) / 8;
+ state = (status & H1_REG_INTERRUPT_FRAME_RDY) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vepu_write(vpu, 0, H1_REG_INTERRUPT);
+ vepu_write(vpu, 0, H1_REG_AXI_CTRL);
+
+ hantro_irq_done(vpu, bytesused, state);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rk3288_vdpu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, G1_REG_INTERRUPT);
+ state = (status & G1_REG_INTERRUPT_DEC_RDY_INT) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+
+ hantro_irq_done(vpu, 0, state);
+
+ return IRQ_HANDLED;
+}
+
+static int rk3288_vpu_hw_init(struct hantro_dev *vpu)
+{
+ /* Bump ACLK to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3288_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static void rk3288_vpu_enc_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vepu_write(vpu, H1_REG_INTERRUPT_DIS_BIT, H1_REG_INTERRUPT);
+ vepu_write(vpu, 0, H1_REG_ENC_CTRL);
+ vepu_write(vpu, 0, H1_REG_AXI_CTRL);
+}
+
+static void rk3288_vpu_dec_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_IRQ_DIS, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+ vdpu_write(vpu, 1, G1_REG_SOFT_RESET);
+}
+
+/*
+ * Supported codec ops.
+ */
+
+static const struct hantro_codec_ops rk3288_vpu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = hantro_h1_jpeg_enc_run,
+ .reset = rk3288_vpu_enc_reset,
+ .init = hantro_jpeg_enc_init,
+ .exit = hantro_jpeg_enc_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = rk3288_vpu_dec_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+};
+
+/*
+ * VPU variant.
+ */
+
+static const struct hantro_irq rk3288_irqs[] = {
+ { "vepu", rk3288_vepu_irq },
+ { "vdpu", rk3288_vdpu_irq },
+};
+
+static const char * const rk3288_clk_names[] = {
+ "aclk", "hclk"
+};
+
+const struct hantro_variant rk3288_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rk3288_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rk3288_vpu_enc_fmts),
+ .dec_offset = 0x400,
+ .dec_fmts = rk3288_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3288_vpu_dec_fmts),
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER,
+ .codec_ops = rk3288_vpu_codec_ops,
+ .irqs = rk3288_irqs,
+ .num_irqs = ARRAY_SIZE(rk3288_irqs),
+ .init = rk3288_vpu_hw_init,
+ .clk_names = rk3288_clk_names,
+ .num_clocks = ARRAY_SIZE(rk3288_clk_names)
+};
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw.c b/drivers/staging/media/hantro/rk3399_vpu_hw.c
new file mode 100644
index 000000000000..5718f8063542
--- /dev/null
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+
+#include "hantro.h"
+#include "hantro_jpeg.h"
+#include "rk3399_vpu_regs.h"
+
+#define RK3399_ACLK_MAX_FREQ (400 * 1000 * 1000)
+
+/*
+ * Supported formats.
+ */
+
+static const struct hantro_fmt rk3399_vpu_enc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUV420P,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUV420SP,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUYV422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_UYVY422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .codec_mode = HANTRO_MODE_JPEG_ENC,
+ .max_depth = 2,
+ .header_size = JPEG_HEADER_SIZE,
+ .frmsize = {
+ .min_width = 96,
+ .max_width = 8192,
+ .step_width = JPEG_MB_DIM,
+ .min_height = 32,
+ .max_height = 8192,
+ .step_height = JPEG_MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 1920,
+ .step_width = MPEG2_MB_DIM,
+ .min_height = 48,
+ .max_height = 1088,
+ .step_height = MPEG2_MB_DIM,
+ },
+ },
+};
+
+static irqreturn_t rk3399_vepu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status, bytesused;
+
+ status = vepu_read(vpu, VEPU_REG_INTERRUPT);
+ bytesused = vepu_read(vpu, VEPU_REG_STR_BUF_LIMIT) / 8;
+ state = (status & VEPU_REG_INTERRUPT_FRAME_READY) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vepu_write(vpu, 0, VEPU_REG_INTERRUPT);
+ vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
+
+ hantro_irq_done(vpu, bytesused, state);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rk3399_vdpu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, VDPU_REG_INTERRUPT);
+ state = (status & VDPU_REG_INTERRUPT_DEC_IRQ) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, VDPU_REG_INTERRUPT);
+ vdpu_write(vpu, 0, VDPU_REG_AXI_CTRL);
+
+ hantro_irq_done(vpu, 0, state);
+
+ return IRQ_HANDLED;
+}
+
+static int rk3399_vpu_hw_init(struct hantro_dev *vpu)
+{
+ /* Bump ACLK to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3399_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static void rk3399_vpu_enc_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vepu_write(vpu, VEPU_REG_INTERRUPT_DIS_BIT, VEPU_REG_INTERRUPT);
+ vepu_write(vpu, 0, VEPU_REG_ENCODE_START);
+ vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
+}
+
+static void rk3399_vpu_dec_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vdpu_write(vpu, VDPU_REG_INTERRUPT_DEC_IRQ_DIS, VDPU_REG_INTERRUPT);
+ vdpu_write(vpu, 0, VDPU_REG_EN_FLAGS);
+ vdpu_write(vpu, 1, VDPU_REG_SOFT_RESET);
+}
+
+/*
+ * Supported codec ops.
+ */
+
+static const struct hantro_codec_ops rk3399_vpu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = rk3399_vpu_jpeg_enc_run,
+ .reset = rk3399_vpu_enc_reset,
+ .init = hantro_jpeg_enc_init,
+ .exit = hantro_jpeg_enc_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = rk3399_vpu_mpeg2_dec_run,
+ .reset = rk3399_vpu_dec_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+};
+
+/*
+ * VPU variant.
+ */
+
+static const struct hantro_irq rk3399_irqs[] = {
+ { "vepu", rk3399_vepu_irq },
+ { "vdpu", rk3399_vdpu_irq },
+};
+
+static const char * const rk3399_clk_names[] = {
+ "aclk", "hclk"
+};
+
+const struct hantro_variant rk3399_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rk3399_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rk3399_vpu_enc_fmts),
+ .dec_offset = 0x400,
+ .dec_fmts = rk3399_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts),
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER,
+ .codec_ops = rk3399_vpu_codec_ops,
+ .irqs = rk3399_irqs,
+ .num_irqs = ARRAY_SIZE(rk3399_irqs),
+ .init = rk3399_vpu_hw_init,
+ .clk_names = rk3399_clk_names,
+ .num_clocks = ARRAY_SIZE(rk3399_clk_names)
+};
diff --git a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
index 3d438797692e..ae66354d2d93 100644
--- a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Rockchip VPU codec driver
+ * Hantro VPU codec driver
*
* Copyright (C) 2018 Rockchip Electronics Co., Ltd.
*
@@ -25,16 +25,16 @@
#include <asm/unaligned.h>
#include <media/v4l2-mem2mem.h>
-#include "rockchip_vpu_jpeg.h"
-#include "rockchip_vpu.h"
-#include "rockchip_vpu_common.h"
-#include "rockchip_vpu_hw.h"
+#include "hantro_jpeg.h"
+#include "hantro.h"
+#include "hantro_v4l2.h"
+#include "hantro_hw.h"
#include "rk3399_vpu_regs.h"
#define VEPU_JPEG_QUANT_TABLE_COUNT 16
-static void rk3399_vpu_set_src_img_ctrl(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx)
+static void rk3399_vpu_set_src_img_ctrl(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
{
struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
u32 reg;
@@ -60,8 +60,8 @@ static void rk3399_vpu_set_src_img_ctrl(struct rockchip_vpu_dev *vpu,
vepu_write_relaxed(vpu, reg, VEPU_REG_ENC_CTRL1);
}
-static void rk3399_vpu_jpeg_enc_set_buffers(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx,
+static void rk3399_vpu_jpeg_enc_set_buffers(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
struct vb2_buffer *src_buf)
{
struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
@@ -69,9 +69,9 @@ static void rk3399_vpu_jpeg_enc_set_buffers(struct rockchip_vpu_dev *vpu,
WARN_ON(pix_fmt->num_planes > 3);
- vepu_write_relaxed(vpu, ctx->bounce_dma_addr,
+ vepu_write_relaxed(vpu, ctx->jpeg_enc.bounce_buffer.dma,
VEPU_REG_ADDR_OUTPUT_STREAM);
- vepu_write_relaxed(vpu, ctx->bounce_size,
+ vepu_write_relaxed(vpu, ctx->jpeg_enc.bounce_buffer.size,
VEPU_REG_STR_BUF_LIMIT);
if (pix_fmt->num_planes == 1) {
@@ -93,7 +93,7 @@ static void rk3399_vpu_jpeg_enc_set_buffers(struct rockchip_vpu_dev *vpu,
}
static void
-rk3399_vpu_jpeg_enc_set_qtable(struct rockchip_vpu_dev *vpu,
+rk3399_vpu_jpeg_enc_set_qtable(struct hantro_dev *vpu,
unsigned char *luma_qtable,
unsigned char *chroma_qtable)
{
@@ -108,22 +108,26 @@ rk3399_vpu_jpeg_enc_set_qtable(struct rockchip_vpu_dev *vpu,
}
}
-void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
+void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
{
- struct rockchip_vpu_dev *vpu = ctx->dev;
+ struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
- struct rockchip_vpu_jpeg_ctx jpeg_ctx;
+ struct hantro_jpeg_ctx jpeg_ctx;
+ struct media_request *src_req;
u32 reg;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ src_req = src_buf->vb2_buf.req_obj.req;
+ v4l2_ctrl_request_setup(src_req, &ctx->ctrl_handler);
+
memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
jpeg_ctx.width = ctx->dst_fmt.width;
jpeg_ctx.height = ctx->dst_fmt.height;
jpeg_ctx.quality = ctx->jpeg_quality;
- rockchip_vpu_jpeg_header_assemble(&jpeg_ctx);
+ hantro_jpeg_header_assemble(&jpeg_ctx);
/* Switch to JPEG encoder mode before writing registers */
vepu_write_relaxed(vpu, VEPU_REG_ENCODE_FORMAT_JPEG,
@@ -132,8 +136,8 @@ void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
rk3399_vpu_set_src_img_ctrl(vpu, ctx);
rk3399_vpu_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
rk3399_vpu_jpeg_enc_set_qtable(vpu,
- rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 0),
- rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 1));
+ hantro_jpeg_get_qtable(&jpeg_ctx, 0),
+ hantro_jpeg_get_qtable(&jpeg_ctx, 1));
reg = VEPU_REG_OUTPUT_SWAP32
| VEPU_REG_OUTPUT_SWAP16
@@ -153,6 +157,8 @@ void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
| VEPU_REG_ENCODE_FORMAT_JPEG
| VEPU_REG_ENCODE_ENABLE;
+ v4l2_ctrl_request_complete(src_req, &ctx->ctrl_handler);
+
/* Kick the watchdog and start encoding */
schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
vepu_write(vpu, reg, VEPU_REG_ENCODE_START);
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
new file mode 100644
index 000000000000..8685bddfbcab
--- /dev/null
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <media/v4l2-mem2mem.h>
+#include "hantro.h"
+#include "hantro_hw.h"
+
+#define VDPU_SWREG(nr) ((nr) * 4)
+
+#define VDPU_REG_DEC_OUT_BASE VDPU_SWREG(63)
+#define VDPU_REG_RLC_VLC_BASE VDPU_SWREG(64)
+#define VDPU_REG_QTABLE_BASE VDPU_SWREG(61)
+#define VDPU_REG_REFER0_BASE VDPU_SWREG(131)
+#define VDPU_REG_REFER2_BASE VDPU_SWREG(134)
+#define VDPU_REG_REFER3_BASE VDPU_SWREG(135)
+#define VDPU_REG_REFER1_BASE VDPU_SWREG(148)
+#define VDPU_REG_DEC_E(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_DEC_ADV_PRE_DIS(v) ((v) ? BIT(11) : 0)
+#define VDPU_REG_DEC_SCMD_DIS(v) ((v) ? BIT(10) : 0)
+#define VDPU_REG_FILTERING_DIS(v) ((v) ? BIT(8) : 0)
+#define VDPU_REG_DEC_LATENCY(v) (((v) << 1) & GENMASK(6, 1))
+
+#define VDPU_REG_INIT_QP(v) (((v) << 25) & GENMASK(30, 25))
+#define VDPU_REG_STREAM_LEN(v) (((v) << 0) & GENMASK(23, 0))
+
+#define VDPU_REG_APF_THRESHOLD(v) (((v) << 17) & GENMASK(30, 17))
+#define VDPU_REG_STARTMB_X(v) (((v) << 8) & GENMASK(16, 8))
+#define VDPU_REG_STARTMB_Y(v) (((v) << 0) & GENMASK(7, 0))
+
+#define VDPU_REG_DEC_MODE(v) (((v) << 0) & GENMASK(3, 0))
+
+#define VDPU_REG_DEC_STRENDIAN_E(v) ((v) ? BIT(5) : 0)
+#define VDPU_REG_DEC_STRSWAP32_E(v) ((v) ? BIT(4) : 0)
+#define VDPU_REG_DEC_OUTSWAP32_E(v) ((v) ? BIT(3) : 0)
+#define VDPU_REG_DEC_INSWAP32_E(v) ((v) ? BIT(2) : 0)
+#define VDPU_REG_DEC_OUT_ENDIAN(v) ((v) ? BIT(1) : 0)
+#define VDPU_REG_DEC_IN_ENDIAN(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_DEC_DATA_DISC_E(v) ((v) ? BIT(22) : 0)
+#define VDPU_REG_DEC_MAX_BURST(v) (((v) << 16) & GENMASK(20, 16))
+#define VDPU_REG_DEC_AXI_WR_ID(v) (((v) << 8) & GENMASK(15, 8))
+#define VDPU_REG_DEC_AXI_RD_ID(v) (((v) << 0) & GENMASK(7, 0))
+
+#define VDPU_REG_RLC_MODE_E(v) ((v) ? BIT(20) : 0)
+#define VDPU_REG_PIC_INTERLACE_E(v) ((v) ? BIT(17) : 0)
+#define VDPU_REG_PIC_FIELDMODE_E(v) ((v) ? BIT(16) : 0)
+#define VDPU_REG_PIC_B_E(v) ((v) ? BIT(15) : 0)
+#define VDPU_REG_PIC_INTER_E(v) ((v) ? BIT(14) : 0)
+#define VDPU_REG_PIC_TOPFIELD_E(v) ((v) ? BIT(13) : 0)
+#define VDPU_REG_FWD_INTERLACE_E(v) ((v) ? BIT(12) : 0)
+#define VDPU_REG_WRITE_MVS_E(v) ((v) ? BIT(10) : 0)
+#define VDPU_REG_DEC_TIMEOUT_E(v) ((v) ? BIT(5) : 0)
+#define VDPU_REG_DEC_CLK_GATE_E(v) ((v) ? BIT(4) : 0)
+
+#define VDPU_REG_PIC_MB_WIDTH(v) (((v) << 23) & GENMASK(31, 23))
+#define VDPU_REG_PIC_MB_HEIGHT_P(v) (((v) << 11) & GENMASK(18, 11))
+#define VDPU_REG_ALT_SCAN_E(v) ((v) ? BIT(6) : 0)
+#define VDPU_REG_TOPFIELDFIRST_E(v) ((v) ? BIT(5) : 0)
+
+#define VDPU_REG_STRM_START_BIT(v) (((v) << 26) & GENMASK(31, 26))
+#define VDPU_REG_QSCALE_TYPE(v) ((v) ? BIT(24) : 0)
+#define VDPU_REG_CON_MV_E(v) ((v) ? BIT(4) : 0)
+#define VDPU_REG_INTRA_DC_PREC(v) (((v) << 2) & GENMASK(3, 2))
+#define VDPU_REG_INTRA_VLC_TAB(v) ((v) ? BIT(1) : 0)
+#define VDPU_REG_FRAME_PRED_DCT(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_ALT_SCAN_FLAG_E(v) ((v) ? BIT(19) : 0)
+#define VDPU_REG_FCODE_FWD_HOR(v) (((v) << 15) & GENMASK(18, 15))
+#define VDPU_REG_FCODE_FWD_VER(v) (((v) << 11) & GENMASK(14, 11))
+#define VDPU_REG_FCODE_BWD_HOR(v) (((v) << 7) & GENMASK(10, 7))
+#define VDPU_REG_FCODE_BWD_VER(v) (((v) << 3) & GENMASK(6, 3))
+#define VDPU_REG_MV_ACCURACY_FWD(v) ((v) ? BIT(2) : 0)
+#define VDPU_REG_MV_ACCURACY_BWD(v) ((v) ? BIT(1) : 0)
+
+#define PICT_TOP_FIELD 1
+#define PICT_BOTTOM_FIELD 2
+#define PICT_FRAME 3
+
+static void
+rk3399_vpu_mpeg2_dec_set_quantization(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
+{
+ struct v4l2_ctrl_mpeg2_quantization *quantization;
+
+ quantization = hantro_get_ctrl(ctx,
+ V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION);
+ hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, quantization);
+ vdpu_write_relaxed(vpu, ctx->mpeg2_dec.qtable.dma,
+ VDPU_REG_QTABLE_BASE);
+}
+
+static void
+rk3399_vpu_mpeg2_dec_set_buffers(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf,
+ const struct v4l2_mpeg2_sequence *sequence,
+ const struct v4l2_mpeg2_picture *picture,
+ const struct v4l2_ctrl_mpeg2_slice_params *slice_params)
+{
+ dma_addr_t forward_addr = 0, backward_addr = 0;
+ dma_addr_t current_addr, addr;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
+
+ switch (picture->picture_coding_type) {
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
+ backward_addr = hantro_get_ref(vq,
+ slice_params->backward_ref_ts);
+ /* fall-through */
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
+ forward_addr = hantro_get_ref(vq,
+ slice_params->forward_ref_ts);
+ }
+
+ /* Source bitstream buffer */
+ addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ vdpu_write_relaxed(vpu, addr, VDPU_REG_RLC_VLC_BASE);
+
+ /* Destination frame buffer */
+ addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ current_addr = addr;
+
+ if (picture->picture_structure == PICT_BOTTOM_FIELD)
+ addr += ALIGN(ctx->dst_fmt.width, 16);
+ vdpu_write_relaxed(vpu, addr, VDPU_REG_DEC_OUT_BASE);
+
+ if (!forward_addr)
+ forward_addr = current_addr;
+ if (!backward_addr)
+ backward_addr = current_addr;
+
+ /* Set forward ref frame (top/bottom field) */
+ if (picture->picture_structure == PICT_FRAME ||
+ picture->picture_coding_type == V4L2_MPEG2_PICTURE_CODING_TYPE_B ||
+ (picture->picture_structure == PICT_TOP_FIELD &&
+ picture->top_field_first) ||
+ (picture->picture_structure == PICT_BOTTOM_FIELD &&
+ !picture->top_field_first)) {
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER1_BASE);
+ } else if (picture->picture_structure == PICT_TOP_FIELD) {
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, current_addr, VDPU_REG_REFER1_BASE);
+ } else if (picture->picture_structure == PICT_BOTTOM_FIELD) {
+ vdpu_write_relaxed(vpu, current_addr, VDPU_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER1_BASE);
+ }
+
+ /* Set backward ref frame (top/bottom field) */
+ vdpu_write_relaxed(vpu, backward_addr, VDPU_REG_REFER2_BASE);
+ vdpu_write_relaxed(vpu, backward_addr, VDPU_REG_REFER3_BASE);
+}
+
+void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ const struct v4l2_ctrl_mpeg2_slice_params *slice_params;
+ const struct v4l2_mpeg2_sequence *sequence;
+ const struct v4l2_mpeg2_picture *picture;
+ u32 reg;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ /* Apply request controls if any */
+ v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+
+ slice_params = hantro_get_ctrl(ctx,
+ V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS);
+ sequence = &slice_params->sequence;
+ picture = &slice_params->picture;
+
+ reg = VDPU_REG_DEC_ADV_PRE_DIS(0) |
+ VDPU_REG_DEC_SCMD_DIS(0) |
+ VDPU_REG_FILTERING_DIS(1) |
+ VDPU_REG_DEC_LATENCY(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(50));
+
+ reg = VDPU_REG_INIT_QP(1) |
+ VDPU_REG_STREAM_LEN(slice_params->bit_size >> 3);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(51));
+
+ reg = VDPU_REG_APF_THRESHOLD(8) |
+ VDPU_REG_STARTMB_X(0) |
+ VDPU_REG_STARTMB_Y(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(52));
+
+ reg = VDPU_REG_DEC_MODE(5);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(53));
+
+ reg = VDPU_REG_DEC_STRENDIAN_E(1) |
+ VDPU_REG_DEC_STRSWAP32_E(1) |
+ VDPU_REG_DEC_OUTSWAP32_E(1) |
+ VDPU_REG_DEC_INSWAP32_E(1) |
+ VDPU_REG_DEC_OUT_ENDIAN(1) |
+ VDPU_REG_DEC_IN_ENDIAN(1);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(54));
+
+ reg = VDPU_REG_DEC_DATA_DISC_E(0) |
+ VDPU_REG_DEC_MAX_BURST(16) |
+ VDPU_REG_DEC_AXI_WR_ID(0) |
+ VDPU_REG_DEC_AXI_RD_ID(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(56));
+
+ reg = VDPU_REG_RLC_MODE_E(0) |
+ VDPU_REG_PIC_INTERLACE_E(!sequence->progressive_sequence) |
+ VDPU_REG_PIC_FIELDMODE_E(picture->picture_structure != PICT_FRAME) |
+ VDPU_REG_PIC_B_E(picture->picture_coding_type == V4L2_MPEG2_PICTURE_CODING_TYPE_B) |
+ VDPU_REG_PIC_INTER_E(picture->picture_coding_type != V4L2_MPEG2_PICTURE_CODING_TYPE_I) |
+ VDPU_REG_PIC_TOPFIELD_E(picture->picture_structure == PICT_TOP_FIELD) |
+ VDPU_REG_FWD_INTERLACE_E(0) |
+ VDPU_REG_WRITE_MVS_E(0) |
+ VDPU_REG_DEC_TIMEOUT_E(1) |
+ VDPU_REG_DEC_CLK_GATE_E(1);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(57));
+
+ reg = VDPU_REG_PIC_MB_WIDTH(MPEG2_MB_WIDTH(ctx->dst_fmt.width)) |
+ VDPU_REG_PIC_MB_HEIGHT_P(MPEG2_MB_HEIGHT(ctx->dst_fmt.height)) |
+ VDPU_REG_ALT_SCAN_E(picture->alternate_scan) |
+ VDPU_REG_TOPFIELDFIRST_E(picture->top_field_first);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(120));
+
+ reg = VDPU_REG_STRM_START_BIT(slice_params->data_bit_offset) |
+ VDPU_REG_QSCALE_TYPE(picture->q_scale_type) |
+ VDPU_REG_CON_MV_E(picture->concealment_motion_vectors) |
+ VDPU_REG_INTRA_DC_PREC(picture->intra_dc_precision) |
+ VDPU_REG_INTRA_VLC_TAB(picture->intra_vlc_format) |
+ VDPU_REG_FRAME_PRED_DCT(picture->frame_pred_frame_dct);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(122));
+
+ reg = VDPU_REG_ALT_SCAN_FLAG_E(picture->alternate_scan) |
+ VDPU_REG_FCODE_FWD_HOR(picture->f_code[0][0]) |
+ VDPU_REG_FCODE_FWD_VER(picture->f_code[0][1]) |
+ VDPU_REG_FCODE_BWD_HOR(picture->f_code[1][0]) |
+ VDPU_REG_FCODE_BWD_VER(picture->f_code[1][1]) |
+ VDPU_REG_MV_ACCURACY_FWD(1) |
+ VDPU_REG_MV_ACCURACY_BWD(1);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(136));
+
+ rk3399_vpu_mpeg2_dec_set_quantization(vpu, ctx);
+
+ rk3399_vpu_mpeg2_dec_set_buffers(vpu, ctx, &src_buf->vb2_buf,
+ &dst_buf->vb2_buf,
+ sequence, picture, slice_params);
+
+ /* Controls no longer in-use, we can complete them */
+ v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+
+ /* Kick the watchdog and start decoding */
+ schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
+
+ reg = vdpu_read(vpu, VDPU_SWREG(57)) | VDPU_REG_DEC_E(1);
+ vdpu_write(vpu, reg, VDPU_SWREG(57));
+}
diff --git a/drivers/staging/media/rockchip/vpu/rk3399_vpu_regs.h b/drivers/staging/media/hantro/rk3399_vpu_regs.h
index fbe294177ec9..88d096920f30 100644
--- a/drivers/staging/media/rockchip/vpu/rk3399_vpu_regs.h
+++ b/drivers/staging/media/hantro/rk3399_vpu_regs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Rockchip VPU codec driver
+ * Hantro VPU codec driver
*
* Copyright (C) 2018 Rockchip Electronics Co., Ltd.
* Alpha Lin <alpha.lin@rock-chips.com>
diff --git a/drivers/staging/media/imx/Makefile b/drivers/staging/media/imx/Makefile
index d2d909a36239..aa6c4b4ad37e 100644
--- a/drivers/staging/media/imx/Makefile
+++ b/drivers/staging/media/imx/Makefile
@@ -1,16 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
-imx-media-objs := imx-media-dev.o imx-media-internal-sd.o imx-media-of.o
-imx-media-objs += imx-media-dev-common.o
-imx-media-common-objs := imx-media-utils.o imx-media-fim.o
-imx-media-ic-objs := imx-ic-common.o imx-ic-prp.o imx-ic-prpencvf.o
+imx6-media-objs := imx-media-dev.o imx-media-internal-sd.o \
+ imx-ic-common.o imx-ic-prp.o imx-ic-prpencvf.o imx-media-vdic.o
-obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media.o
+imx-media-common-objs := imx-media-capture.o imx-media-dev-common.o \
+ imx-media-of.o imx-media-utils.o
+
+imx6-media-csi-objs := imx-media-csi.o imx-media-fim.o
+
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx6-media.o
obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-common.o
-obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-capture.o
-obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-vdic.o
-obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-ic.o
-obj-$(CONFIG_VIDEO_IMX_CSI) += imx-media-csi.o
+obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-media-csi.o
obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-mipi-csi2.o
obj-$(CONFIG_VIDEO_IMX7_CSI) += imx7-media-csi.o
diff --git a/drivers/staging/media/imx/imx-ic-common.c b/drivers/staging/media/imx/imx-ic-common.c
index 18cd4cb92431..6df1ffb53895 100644
--- a/drivers/staging/media/imx/imx-ic-common.c
+++ b/drivers/staging/media/imx/imx-ic-common.c
@@ -4,8 +4,6 @@
*
* Copyright (c) 2014-2016 Mentor Graphics Inc.
*/
-#include <linux/module.h>
-#include <linux/platform_device.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include "imx-media.h"
@@ -20,23 +18,23 @@ static struct imx_ic_ops *ic_ops[IC_NUM_OPS] = {
[IC_TASK_VIEWFINDER] = &imx_ic_prpencvf_ops,
};
-static int imx_ic_probe(struct platform_device *pdev)
+struct v4l2_subdev *imx_media_ic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id)
{
- struct imx_media_ipu_internal_sd_pdata *pdata;
struct imx_ic_priv *priv;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- platform_set_drvdata(pdev, &priv->sd);
- priv->dev = &pdev->dev;
+ priv->ipu_dev = ipu_dev;
+ priv->ipu = ipu;
- /* get our ipu_id, grp_id and IC task id */
- pdata = priv->dev->platform_data;
- priv->ipu_id = pdata->ipu_id;
- switch (pdata->grp_id) {
+ /* get our IC task id */
+ switch (grp_id) {
case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
priv->task_id = IC_TASK_PRP;
break;
@@ -47,7 +45,7 @@ static int imx_ic_probe(struct platform_device *pdev)
priv->task_id = IC_TASK_VIEWFINDER;
break;
default:
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
v4l2_subdev_init(&priv->sd, ic_ops[priv->task_id]->subdev_ops);
@@ -55,55 +53,35 @@ static int imx_ic_probe(struct platform_device *pdev)
priv->sd.internal_ops = ic_ops[priv->task_id]->internal_ops;
priv->sd.entity.ops = ic_ops[priv->task_id]->entity_ops;
priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
- priv->sd.dev = &pdev->dev;
- priv->sd.owner = THIS_MODULE;
+ priv->sd.owner = ipu_dev->driver->owner;
priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
- priv->sd.grp_id = pdata->grp_id;
- strscpy(priv->sd.name, pdata->sd_name, sizeof(priv->sd.name));
+ priv->sd.grp_id = grp_id;
+ imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
+ priv->sd.grp_id, ipu_get_num(ipu));
ret = ic_ops[priv->task_id]->init(priv);
if (ret)
- return ret;
+ return ERR_PTR(ret);
- ret = v4l2_async_register_subdev(&priv->sd);
- if (ret)
+ ret = v4l2_device_register_subdev(v4l2_dev, &priv->sd);
+ if (ret) {
ic_ops[priv->task_id]->remove(priv);
+ return ERR_PTR(ret);
+ }
- return ret;
+ return &priv->sd;
}
-static int imx_ic_remove(struct platform_device *pdev)
+int imx_media_ic_unregister(struct v4l2_subdev *sd)
{
- struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct imx_ic_priv *priv = container_of(sd, struct imx_ic_priv, sd);
v4l2_info(sd, "Removing\n");
ic_ops[priv->task_id]->remove(priv);
- v4l2_async_unregister_subdev(sd);
+ v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
return 0;
}
-
-static const struct platform_device_id imx_ic_ids[] = {
- { .name = "imx-ipuv3-ic" },
- { },
-};
-MODULE_DEVICE_TABLE(platform, imx_ic_ids);
-
-static struct platform_driver imx_ic_driver = {
- .probe = imx_ic_probe,
- .remove = imx_ic_remove,
- .id_table = imx_ic_ids,
- .driver = {
- .name = "imx-ipuv3-ic",
- },
-};
-module_platform_driver(imx_ic_driver);
-
-MODULE_DESCRIPTION("i.MX IC subdev driver");
-MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-ipuv3-ic");
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
index 10ffe00f1a54..5b4af3cfe670 100644
--- a/drivers/staging/media/imx/imx-ic-prp.c
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -35,16 +35,12 @@
#define S_ALIGN 1 /* multiple of 2 */
struct prp_priv {
- struct imx_media_dev *md;
struct imx_ic_priv *ic_priv;
struct media_pad pad[PRP_NUM_PADS];
/* lock to protect all members below */
struct mutex lock;
- /* IPU units we require */
- struct ipu_soc *ipu;
-
struct v4l2_subdev *src_sd;
struct v4l2_subdev *sink_sd_prpenc;
struct v4l2_subdev *sink_sd_prpvf;
@@ -62,7 +58,7 @@ static inline struct prp_priv *sd_to_priv(struct v4l2_subdev *sd)
{
struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
- return ic_priv->prp_priv;
+ return ic_priv->task_priv;
}
static int prp_start(struct prp_priv *priv)
@@ -70,12 +66,10 @@ static int prp_start(struct prp_priv *priv)
struct imx_ic_priv *ic_priv = priv->ic_priv;
bool src_is_vdic;
- priv->ipu = priv->md->ipu[ic_priv->ipu_id];
-
/* set IC to receive from CSI or VDI depending on source */
src_is_vdic = !!(priv->src_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_VDIC);
- ipu_set_ic_src_mux(priv->ipu, priv->csi_id, src_is_vdic);
+ ipu_set_ic_src_mux(ic_priv->ipu, priv->csi_id, src_is_vdic);
return 0;
}
@@ -216,12 +210,12 @@ static int prp_link_setup(struct media_entity *entity,
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
- struct prp_priv *priv = ic_priv->prp_priv;
+ struct prp_priv *priv = ic_priv->task_priv;
struct v4l2_subdev *remote_sd;
int ret = 0;
- dev_dbg(ic_priv->dev, "link setup %s -> %s", remote->entity->name,
- local->entity->name);
+ dev_dbg(ic_priv->ipu_dev, "%s: link setup %s -> %s",
+ ic_priv->sd.name, remote->entity->name, local->entity->name);
remote_sd = media_entity_to_v4l2_subdev(remote->entity);
@@ -295,7 +289,7 @@ static int prp_link_validate(struct v4l2_subdev *sd,
struct v4l2_subdev_format *sink_fmt)
{
struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
- struct prp_priv *priv = ic_priv->prp_priv;
+ struct prp_priv *priv = ic_priv->task_priv;
struct v4l2_subdev *csi;
int ret;
@@ -304,8 +298,8 @@ static int prp_link_validate(struct v4l2_subdev *sd,
if (ret)
return ret;
- csi = imx_media_find_upstream_subdev(priv->md, &ic_priv->sd.entity,
- IMX_MEDIA_GRP_ID_IPU_CSI);
+ csi = imx_media_pipeline_subdev(&ic_priv->sd.entity,
+ IMX_MEDIA_GRP_ID_IPU_CSI, true);
if (IS_ERR(csi))
csi = NULL;
@@ -351,7 +345,7 @@ out:
static int prp_s_stream(struct v4l2_subdev *sd, int enable)
{
struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
- struct prp_priv *priv = ic_priv->prp_priv;
+ struct prp_priv *priv = ic_priv->task_priv;
int ret = 0;
mutex_lock(&priv->lock);
@@ -368,7 +362,8 @@ static int prp_s_stream(struct v4l2_subdev *sd, int enable)
if (priv->stream_count != !enable)
goto update_count;
- dev_dbg(ic_priv->dev, "stream %s\n", enable ? "ON" : "OFF");
+ dev_dbg(ic_priv->ipu_dev, "%s: stream %s\n", sd->name,
+ enable ? "ON" : "OFF");
if (enable)
ret = prp_start(priv);
@@ -440,9 +435,6 @@ static int prp_registered(struct v4l2_subdev *sd)
int i, ret;
u32 code;
- /* get media device */
- priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
-
for (i = 0; i < PRP_NUM_PADS; i++) {
priv->pad[i].flags = (i == PRP_SINK_PAD) ?
MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
@@ -494,12 +486,12 @@ static int prp_init(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv;
- priv = devm_kzalloc(ic_priv->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
mutex_init(&priv->lock);
- ic_priv->prp_priv = priv;
+ ic_priv->task_priv = priv;
priv->ic_priv = ic_priv;
return 0;
@@ -507,7 +499,7 @@ static int prp_init(struct imx_ic_priv *ic_priv)
static void prp_remove(struct imx_ic_priv *ic_priv)
{
- struct prp_priv *priv = ic_priv->prp_priv;
+ struct prp_priv *priv = ic_priv->task_priv;
mutex_destroy(&priv->lock);
}
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 64037b0a8387..82bba68c554e 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -50,7 +50,6 @@
#define S_ALIGN 1 /* multiple of 2 */
struct prp_priv {
- struct imx_media_dev *md;
struct imx_ic_priv *ic_priv;
struct media_pad pad[PRPENCVF_NUM_PADS];
/* the video device at output pad */
@@ -60,7 +59,6 @@ struct prp_priv {
struct mutex lock;
/* IPU units we require */
- struct ipu_soc *ipu;
struct ipu_ic *ic;
struct ipuv3_channel *out_ch;
struct ipuv3_channel *rot_in_ch;
@@ -156,9 +154,7 @@ static int prp_get_ipu_resources(struct prp_priv *priv)
struct ipuv3_channel *out_ch, *rot_in_ch, *rot_out_ch;
int ret, task = ic_priv->task_id;
- priv->ipu = priv->md->ipu[ic_priv->ipu_id];
-
- ic = ipu_ic_get(priv->ipu, task);
+ ic = ipu_ic_get(ic_priv->ipu, task);
if (IS_ERR(ic)) {
v4l2_err(&ic_priv->sd, "failed to get IC\n");
ret = PTR_ERR(ic);
@@ -166,7 +162,7 @@ static int prp_get_ipu_resources(struct prp_priv *priv)
}
priv->ic = ic;
- out_ch = ipu_idmac_get(priv->ipu, prp_channel[task].out_ch);
+ out_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].out_ch);
if (IS_ERR(out_ch)) {
v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
prp_channel[task].out_ch);
@@ -175,7 +171,7 @@ static int prp_get_ipu_resources(struct prp_priv *priv)
}
priv->out_ch = out_ch;
- rot_in_ch = ipu_idmac_get(priv->ipu, prp_channel[task].rot_in_ch);
+ rot_in_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].rot_in_ch);
if (IS_ERR(rot_in_ch)) {
v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
prp_channel[task].rot_in_ch);
@@ -184,7 +180,7 @@ static int prp_get_ipu_resources(struct prp_priv *priv)
}
priv->rot_in_ch = rot_in_ch;
- rot_out_ch = ipu_idmac_get(priv->ipu, prp_channel[task].rot_out_ch);
+ rot_out_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].rot_out_ch);
if (IS_ERR(rot_out_ch)) {
v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
prp_channel[task].rot_out_ch);
@@ -464,13 +460,13 @@ static int prp_setup_rotation(struct prp_priv *priv)
incc = priv->cc[PRPENCVF_SINK_PAD];
outcc = vdev->cc;
- ret = imx_media_alloc_dma_buf(priv->md, &priv->rot_buf[0],
+ ret = imx_media_alloc_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[0],
outfmt->sizeimage);
if (ret) {
v4l2_err(&ic_priv->sd, "failed to alloc rot_buf[0], %d\n", ret);
return ret;
}
- ret = imx_media_alloc_dma_buf(priv->md, &priv->rot_buf[1],
+ ret = imx_media_alloc_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[1],
outfmt->sizeimage);
if (ret) {
v4l2_err(&ic_priv->sd, "failed to alloc rot_buf[1], %d\n", ret);
@@ -543,14 +539,16 @@ static int prp_setup_rotation(struct prp_priv *priv)
unsetup_vb2:
prp_unsetup_vb2_buf(priv, VB2_BUF_STATE_QUEUED);
free_rot1:
- imx_media_free_dma_buf(priv->md, &priv->rot_buf[1]);
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[1]);
free_rot0:
- imx_media_free_dma_buf(priv->md, &priv->rot_buf[0]);
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[0]);
return ret;
}
static void prp_unsetup_rotation(struct prp_priv *priv)
{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+
ipu_ic_task_disable(priv->ic);
ipu_idmac_disable_channel(priv->out_ch);
@@ -561,8 +559,8 @@ static void prp_unsetup_rotation(struct prp_priv *priv)
ipu_ic_disable(priv->ic);
- imx_media_free_dma_buf(priv->md, &priv->rot_buf[0]);
- imx_media_free_dma_buf(priv->md, &priv->rot_buf[1]);
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[0]);
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[1]);
}
static int prp_setup_norotation(struct prp_priv *priv)
@@ -602,7 +600,7 @@ static int prp_setup_norotation(struct prp_priv *priv)
ipu_cpmem_dump(priv->out_ch);
ipu_ic_dump(priv->ic);
- ipu_dump(priv->ipu);
+ ipu_dump(ic_priv->ipu);
ipu_ic_enable(priv->ic);
@@ -654,7 +652,7 @@ static int prp_start(struct prp_priv *priv)
outfmt = &vdev->fmt.fmt.pix;
- ret = imx_media_alloc_dma_buf(priv->md, &priv->underrun_buf,
+ ret = imx_media_alloc_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf,
outfmt->sizeimage);
if (ret)
goto out_put_ipu;
@@ -674,10 +672,10 @@ static int prp_start(struct prp_priv *priv)
if (ret)
goto out_free_underrun;
- priv->nfb4eof_irq = ipu_idmac_channel_irq(priv->ipu,
+ priv->nfb4eof_irq = ipu_idmac_channel_irq(ic_priv->ipu,
priv->out_ch,
IPU_IRQ_NFB4EOF);
- ret = devm_request_irq(ic_priv->dev, priv->nfb4eof_irq,
+ ret = devm_request_irq(ic_priv->ipu_dev, priv->nfb4eof_irq,
prp_nfb4eof_interrupt, 0,
"imx-ic-prp-nfb4eof", priv);
if (ret) {
@@ -688,12 +686,12 @@ static int prp_start(struct prp_priv *priv)
if (ipu_rot_mode_is_irt(priv->rot_mode))
priv->eof_irq = ipu_idmac_channel_irq(
- priv->ipu, priv->rot_out_ch, IPU_IRQ_EOF);
+ ic_priv->ipu, priv->rot_out_ch, IPU_IRQ_EOF);
else
priv->eof_irq = ipu_idmac_channel_irq(
- priv->ipu, priv->out_ch, IPU_IRQ_EOF);
+ ic_priv->ipu, priv->out_ch, IPU_IRQ_EOF);
- ret = devm_request_irq(ic_priv->dev, priv->eof_irq,
+ ret = devm_request_irq(ic_priv->ipu_dev, priv->eof_irq,
prp_eof_interrupt, 0,
"imx-ic-prp-eof", priv);
if (ret) {
@@ -718,13 +716,13 @@ static int prp_start(struct prp_priv *priv)
return 0;
out_free_eof_irq:
- devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
+ devm_free_irq(ic_priv->ipu_dev, priv->eof_irq, priv);
out_free_nfb4eof_irq:
- devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
+ devm_free_irq(ic_priv->ipu_dev, priv->nfb4eof_irq, priv);
out_unsetup:
prp_unsetup(priv, VB2_BUF_STATE_QUEUED);
out_free_underrun:
- imx_media_free_dma_buf(priv->md, &priv->underrun_buf);
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf);
out_put_ipu:
prp_put_ipu_resources(priv);
return ret;
@@ -756,12 +754,12 @@ static void prp_stop(struct prp_priv *priv)
v4l2_warn(&ic_priv->sd,
"upstream stream off failed: %d\n", ret);
- devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
- devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
+ devm_free_irq(ic_priv->ipu_dev, priv->eof_irq, priv);
+ devm_free_irq(ic_priv->ipu_dev, priv->nfb4eof_irq, priv);
prp_unsetup(priv, VB2_BUF_STATE_ERROR);
- imx_media_free_dma_buf(priv->md, &priv->underrun_buf);
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf);
/* cancel the EOF timeout timer */
del_timer_sync(&priv->eof_timeout_timer);
@@ -904,11 +902,8 @@ static int prp_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *sdformat)
{
struct prp_priv *priv = sd_to_priv(sd);
- struct imx_media_video_dev *vdev = priv->vdev;
const struct imx_media_pixfmt *cc;
- struct v4l2_pix_format vdev_fmt;
struct v4l2_mbus_framefmt *fmt;
- struct v4l2_rect vdev_compose;
int ret = 0;
if (sdformat->pad >= PRPENCVF_NUM_PADS)
@@ -944,19 +939,9 @@ static int prp_set_fmt(struct v4l2_subdev *sd,
priv->cc[PRPENCVF_SRC_PAD] = outcc;
}
- if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY)
- goto out;
-
- priv->cc[sdformat->pad] = cc;
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[sdformat->pad] = cc;
- /* propagate output pad format to capture device */
- imx_media_mbus_fmt_to_pix_fmt(&vdev_fmt, &vdev_compose,
- &priv->format_mbus[PRPENCVF_SRC_PAD],
- priv->cc[PRPENCVF_SRC_PAD]);
- mutex_unlock(&priv->lock);
- imx_media_capture_device_set_format(vdev, &vdev_fmt, &vdev_compose);
-
- return 0;
out:
mutex_unlock(&priv->lock);
return ret;
@@ -1011,8 +996,8 @@ static int prp_link_setup(struct media_entity *entity,
struct v4l2_subdev *remote_sd;
int ret = 0;
- dev_dbg(ic_priv->dev, "link setup %s -> %s", remote->entity->name,
- local->entity->name);
+ dev_dbg(ic_priv->ipu_dev, "%s: link setup %s -> %s",
+ ic_priv->sd.name, remote->entity->name, local->entity->name);
mutex_lock(&priv->lock);
@@ -1178,7 +1163,8 @@ static int prp_s_stream(struct v4l2_subdev *sd, int enable)
if (priv->stream_count != !enable)
goto update_count;
- dev_dbg(ic_priv->dev, "stream %s\n", enable ? "ON" : "OFF");
+ dev_dbg(ic_priv->ipu_dev, "%s: stream %s\n", sd->name,
+ enable ? "ON" : "OFF");
if (enable)
ret = prp_start(priv);
@@ -1241,9 +1227,6 @@ static int prp_registered(struct v4l2_subdev *sd)
int i, ret;
u32 code;
- /* get media device */
- priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
-
for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
priv->pad[i].flags = (i == PRPENCVF_SINK_PAD) ?
MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
@@ -1266,14 +1249,10 @@ static int prp_registered(struct v4l2_subdev *sd)
if (ret)
return ret;
- ret = imx_media_capture_device_register(priv->md, priv->vdev);
+ ret = imx_media_capture_device_register(priv->vdev);
if (ret)
return ret;
- ret = imx_media_add_video_device(priv->md, priv->vdev);
- if (ret)
- goto unreg;
-
ret = prp_init_controls(priv);
if (ret)
goto unreg;
@@ -1325,7 +1304,7 @@ static int prp_init(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv;
- priv = devm_kzalloc(ic_priv->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -1335,7 +1314,8 @@ static int prp_init(struct imx_ic_priv *ic_priv)
spin_lock_init(&priv->irqlock);
timer_setup(&priv->eof_timeout_timer, prp_eof_timeout, 0);
- priv->vdev = imx_media_capture_device_init(&ic_priv->sd,
+ priv->vdev = imx_media_capture_device_init(ic_priv->ipu_dev,
+ &ic_priv->sd,
PRPENCVF_SRC_PAD);
if (IS_ERR(priv->vdev))
return PTR_ERR(priv->vdev);
diff --git a/drivers/staging/media/imx/imx-ic.h b/drivers/staging/media/imx/imx-ic.h
index 0dbcf2a7ab5f..587c191c3eab 100644
--- a/drivers/staging/media/imx/imx-ic.h
+++ b/drivers/staging/media/imx/imx-ic.h
@@ -10,11 +10,10 @@
#include <media/v4l2-subdev.h>
struct imx_ic_priv {
- struct device *dev;
+ struct device *ipu_dev;
+ struct ipu_soc *ipu;
struct v4l2_subdev sd;
- int ipu_id;
int task_id;
- void *prp_priv;
void *task_priv;
};
@@ -29,6 +28,5 @@ struct imx_ic_ops {
extern struct imx_ic_ops imx_ic_prp_ops;
extern struct imx_ic_ops imx_ic_prpencvf_ops;
-extern struct imx_ic_ops imx_ic_pp_ops;
#endif
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index 9430c835c434..b33a07bc9105 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -202,6 +202,7 @@ static int capture_g_fmt_vid_cap(struct file *file, void *fh,
static int __capture_try_fmt_vid_cap(struct capture_priv *priv,
struct v4l2_subdev_format *fmt_src,
struct v4l2_format *f,
+ const struct imx_media_pixfmt **retcc,
struct v4l2_rect *compose)
{
const struct imx_media_pixfmt *cc, *cc_src;
@@ -242,8 +243,17 @@ static int __capture_try_fmt_vid_cap(struct capture_priv *priv,
}
}
- imx_media_mbus_fmt_to_pix_fmt(&f->fmt.pix, compose,
- &fmt_src->format, cc);
+ imx_media_mbus_fmt_to_pix_fmt(&f->fmt.pix, &fmt_src->format, cc);
+
+ if (retcc)
+ *retcc = cc;
+
+ if (compose) {
+ compose->left = 0;
+ compose->top = 0;
+ compose->width = fmt_src->format.width;
+ compose->height = fmt_src->format.height;
+ }
return 0;
}
@@ -261,7 +271,7 @@ static int capture_try_fmt_vid_cap(struct file *file, void *fh,
if (ret)
return ret;
- return __capture_try_fmt_vid_cap(priv, &fmt_src, f, NULL);
+ return __capture_try_fmt_vid_cap(priv, &fmt_src, f, NULL, NULL);
}
static int capture_s_fmt_vid_cap(struct file *file, void *fh,
@@ -269,7 +279,6 @@ static int capture_s_fmt_vid_cap(struct file *file, void *fh,
{
struct capture_priv *priv = video_drvdata(file);
struct v4l2_subdev_format fmt_src;
- struct v4l2_rect compose;
int ret;
if (vb2_is_busy(&priv->q)) {
@@ -283,14 +292,12 @@ static int capture_s_fmt_vid_cap(struct file *file, void *fh,
if (ret)
return ret;
- ret = __capture_try_fmt_vid_cap(priv, &fmt_src, f, &compose);
+ ret = __capture_try_fmt_vid_cap(priv, &fmt_src, f, &priv->vdev.cc,
+ &priv->vdev.compose);
if (ret)
return ret;
priv->vdev.fmt.fmt.pix = f->fmt.pix;
- priv->vdev.cc = imx_media_find_format(f->fmt.pix.pixelformat,
- CS_SEL_ANY, true);
- priv->vdev.compose = compose;
return 0;
}
@@ -520,6 +527,33 @@ static void capture_buf_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&priv->q_lock, flags);
}
+static int capture_validate_fmt(struct capture_priv *priv)
+{
+ struct v4l2_subdev_format fmt_src;
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_rect compose;
+ struct v4l2_format f;
+ int ret;
+
+ fmt_src.pad = priv->src_sd_pad;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret)
+ return ret;
+
+ v4l2_fill_pix_format(&f.fmt.pix, &fmt_src.format);
+
+ ret = __capture_try_fmt_vid_cap(priv, &fmt_src, &f, &cc, &compose);
+ if (ret)
+ return ret;
+
+ return (priv->vdev.fmt.fmt.pix.width != f.fmt.pix.width ||
+ priv->vdev.fmt.fmt.pix.height != f.fmt.pix.height ||
+ priv->vdev.cc->cs != cc->cs ||
+ priv->vdev.compose.width != compose.width ||
+ priv->vdev.compose.height != compose.height) ? -EINVAL : 0;
+}
+
static int capture_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct capture_priv *priv = vb2_get_drv_priv(vq);
@@ -527,6 +561,12 @@ static int capture_start_streaming(struct vb2_queue *vq, unsigned int count)
unsigned long flags;
int ret;
+ ret = capture_validate_fmt(priv);
+ if (ret) {
+ v4l2_err(priv->src_sd, "capture format not valid\n");
+ goto return_bufs;
+ }
+
ret = imx_media_pipeline_set_stream(priv->md, &priv->src_sd->entity,
true);
if (ret) {
@@ -614,7 +654,6 @@ static int capture_release(struct file *file)
struct capture_priv *priv = video_drvdata(file);
struct video_device *vfd = priv->vdev.vfd;
struct vb2_queue *vq = &priv->q;
- int ret = 0;
mutex_lock(&priv->mutex);
@@ -627,7 +666,7 @@ static int capture_release(struct file *file)
v4l2_fh_release(file);
mutex_unlock(&priv->mutex);
- return ret;
+ return 0;
}
static const struct v4l2_file_operations capture_fops = {
@@ -649,21 +688,6 @@ static struct video_device capture_videodev = {
.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING,
};
-void imx_media_capture_device_set_format(struct imx_media_video_dev *vdev,
- const struct v4l2_pix_format *pix,
- const struct v4l2_rect *compose)
-{
- struct capture_priv *priv = to_capture_priv(vdev);
-
- mutex_lock(&priv->mutex);
- priv->vdev.fmt.fmt.pix = *pix;
- priv->vdev.cc = imx_media_find_format(pix->pixelformat, CS_SEL_ANY,
- true);
- priv->vdev.compose = *compose;
- mutex_unlock(&priv->mutex);
-}
-EXPORT_SYMBOL_GPL(imx_media_capture_device_set_format);
-
struct imx_media_buffer *
imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev)
{
@@ -701,19 +725,20 @@ void imx_media_capture_device_error(struct imx_media_video_dev *vdev)
}
EXPORT_SYMBOL_GPL(imx_media_capture_device_error);
-int imx_media_capture_device_register(struct imx_media_dev *md,
- struct imx_media_video_dev *vdev)
+int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
{
struct capture_priv *priv = to_capture_priv(vdev);
struct v4l2_subdev *sd = priv->src_sd;
+ struct v4l2_device *v4l2_dev = sd->v4l2_dev;
struct video_device *vfd = vdev->vfd;
struct vb2_queue *vq = &priv->q;
struct v4l2_subdev_format fmt_src;
int ret;
- priv->md = md;
+ /* get media device */
+ priv->md = container_of(v4l2_dev->mdev, struct imx_media_dev, md);
- vfd->v4l2_dev = sd->v4l2_dev;
+ vfd->v4l2_dev = v4l2_dev;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
if (ret) {
@@ -765,8 +790,10 @@ int imx_media_capture_device_register(struct imx_media_dev *md,
}
vdev->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- imx_media_mbus_fmt_to_pix_fmt(&vdev->fmt.fmt.pix, &vdev->compose,
+ imx_media_mbus_fmt_to_pix_fmt(&vdev->fmt.fmt.pix,
&fmt_src.format, NULL);
+ vdev->compose.width = fmt_src.format.width;
+ vdev->compose.height = fmt_src.format.height;
vdev->cc = imx_media_find_format(vdev->fmt.fmt.pix.pixelformat,
CS_SEL_ANY, false);
@@ -775,6 +802,9 @@ int imx_media_capture_device_register(struct imx_media_dev *md,
vfd->ctrl_handler = &priv->ctrl_hdlr;
+ /* add vdev to the video device list */
+ imx_media_add_video_device(priv->md, vdev);
+
return 0;
unreg:
video_unregister_device(vfd);
@@ -799,18 +829,19 @@ void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev)
EXPORT_SYMBOL_GPL(imx_media_capture_device_unregister);
struct imx_media_video_dev *
-imx_media_capture_device_init(struct v4l2_subdev *src_sd, int pad)
+imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
+ int pad)
{
struct capture_priv *priv;
struct video_device *vfd;
- priv = devm_kzalloc(src_sd->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return ERR_PTR(-ENOMEM);
priv->src_sd = src_sd;
priv->src_sd_pad = pad;
- priv->dev = src_sd->dev;
+ priv->dev = dev;
mutex_init(&priv->mutex);
spin_lock_init(&priv->q_lock);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 1d248aca40a9..0eeb0db6d83f 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -56,7 +56,6 @@ struct csi_skip_desc {
struct csi_priv {
struct device *dev;
struct ipu_soc *ipu;
- struct imx_media_dev *md;
struct v4l2_subdev sd;
struct media_pad pad[CSI_NUM_PADS];
/* the video device at IDMAC output pad */
@@ -178,8 +177,8 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
* CSI-2 receiver if it is in the path, otherwise stay
* with video mux.
*/
- sd = imx_media_find_upstream_subdev(priv->md, src,
- IMX_MEDIA_GRP_ID_CSI2);
+ sd = imx_media_pipeline_subdev(src, IMX_MEDIA_GRP_ID_CSI2,
+ true);
if (!IS_ERR(sd))
src = &sd->entity;
}
@@ -193,9 +192,9 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
src = &priv->sd.entity;
/* get source pad of entity directly upstream from src */
- pad = imx_media_find_upstream_pad(priv->md, src, 0);
- if (IS_ERR(pad))
- return PTR_ERR(pad);
+ pad = imx_media_pipeline_pad(src, 0, 0, true);
+ if (!pad)
+ return -ENODEV;
sd = media_entity_to_v4l2_subdev(pad->entity);
@@ -608,7 +607,7 @@ static int csi_idmac_start(struct csi_priv *priv)
outfmt = &vdev->fmt.fmt.pix;
- ret = imx_media_alloc_dma_buf(priv->md, &priv->underrun_buf,
+ ret = imx_media_alloc_dma_buf(priv->dev, &priv->underrun_buf,
outfmt->sizeimage);
if (ret)
goto out_put_ipu;
@@ -662,7 +661,7 @@ out_free_nfb4eof_irq:
out_unsetup:
csi_idmac_unsetup(priv, VB2_BUF_STATE_QUEUED);
out_free_dma_buf:
- imx_media_free_dma_buf(priv->md, &priv->underrun_buf);
+ imx_media_free_dma_buf(priv->dev, &priv->underrun_buf);
out_put_ipu:
csi_idmac_put_ipu_resources(priv);
return ret;
@@ -694,7 +693,7 @@ static void csi_idmac_stop(struct csi_priv *priv)
csi_idmac_unsetup(priv, VB2_BUF_STATE_ERROR);
- imx_media_free_dma_buf(priv->md, &priv->underrun_buf);
+ imx_media_free_dma_buf(priv->dev, &priv->underrun_buf);
/* cancel the EOF timeout timer */
del_timer_sync(&priv->eof_timeout_timer);
@@ -1134,8 +1133,7 @@ static int csi_link_validate(struct v4l2_subdev *sd,
*/
#if 0
mutex_unlock(&priv->lock);
- vc_num = imx_media_find_mipi_csi2_channel(priv->md,
- &priv->sd.entity);
+ vc_num = imx_media_find_mipi_csi2_channel(&priv->sd.entity);
if (vc_num < 0)
return vc_num;
mutex_lock(&priv->lock);
@@ -1502,13 +1500,10 @@ static int csi_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *sdformat)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
- struct imx_media_video_dev *vdev = priv->vdev;
struct v4l2_fwnode_endpoint upstream_ep = { .bus_type = 0 };
const struct imx_media_pixfmt *cc;
- struct v4l2_pix_format vdev_fmt;
struct v4l2_mbus_framefmt *fmt;
struct v4l2_rect *crop, *compose;
- struct v4l2_rect vdev_compose;
int ret;
if (sdformat->pad >= CSI_NUM_PADS)
@@ -1558,19 +1553,9 @@ static int csi_set_fmt(struct v4l2_subdev *sd,
}
}
- if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY)
- goto out;
-
- priv->cc[sdformat->pad] = cc;
-
- /* propagate IDMAC output pad format to capture device */
- imx_media_mbus_fmt_to_pix_fmt(&vdev_fmt, &vdev_compose,
- &priv->format_mbus[CSI_SRC_PAD_IDMAC],
- priv->cc[CSI_SRC_PAD_IDMAC]);
- mutex_unlock(&priv->lock);
- imx_media_capture_device_set_format(vdev, &vdev_fmt, &vdev_compose);
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[sdformat->pad] = cc;
- return 0;
out:
mutex_unlock(&priv->lock);
return ret;
@@ -1762,9 +1747,6 @@ static int csi_registered(struct v4l2_subdev *sd)
int i, ret;
u32 code;
- /* get media device */
- priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
-
/* get handle to IPU CSI */
csi = ipu_csi_get(priv->ipu, priv->csi_id);
if (IS_ERR(csi)) {
@@ -1812,17 +1794,12 @@ static int csi_registered(struct v4l2_subdev *sd)
if (ret)
goto free_fim;
- ret = imx_media_capture_device_register(priv->md, priv->vdev);
+ ret = imx_media_capture_device_register(priv->vdev);
if (ret)
goto free_fim;
- ret = imx_media_add_video_device(priv->md, priv->vdev);
- if (ret)
- goto unreg;
-
return 0;
-unreg:
- imx_media_capture_device_unregister(priv->vdev);
+
free_fim:
if (priv->fim)
imx_media_fim_free(priv->fim);
@@ -1983,7 +1960,7 @@ static int imx_csi_probe(struct platform_device *pdev)
imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
priv->sd.grp_id, ipu_get_num(priv->ipu));
- priv->vdev = imx_media_capture_device_init(&priv->sd,
+ priv->vdev = imx_media_capture_device_init(priv->sd.dev, &priv->sd,
CSI_SRC_PAD_IDMAC);
if (IS_ERR(priv->vdev))
return PTR_ERR(priv->vdev);
diff --git a/drivers/staging/media/imx/imx-media-dev-common.c b/drivers/staging/media/imx/imx-media-dev-common.c
index 6cd93419b81d..66b505f7e8df 100644
--- a/drivers/staging/media/imx/imx-media-dev-common.c
+++ b/drivers/staging/media/imx/imx-media-dev-common.c
@@ -8,9 +8,341 @@
#include <linux/of_graph.h>
#include <linux/of_platform.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
#include "imx-media.h"
-static const struct v4l2_async_notifier_operations imx_media_subdev_ops = {
+static inline struct imx_media_dev *notifier2dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct imx_media_dev, notifier);
+}
+
+/* async subdev bound notifier */
+static int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ v4l2_info(sd->v4l2_dev, "subdev %s bound\n", sd->name);
+
+ return 0;
+}
+
+/*
+ * Create the media links for all subdevs that registered.
+ * Called after all async subdevs have bound.
+ */
+static int imx_media_create_links(struct v4l2_async_notifier *notifier)
+{
+ struct imx_media_dev *imxmd = notifier2dev(notifier);
+ struct v4l2_subdev *sd;
+
+ list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
+ switch (sd->grp_id) {
+ case IMX_MEDIA_GRP_ID_IPU_VDIC:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPENC:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPVF:
+ /*
+ * links have already been created for the
+ * sync-registered subdevs.
+ */
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_CSI0:
+ case IMX_MEDIA_GRP_ID_IPU_CSI1:
+ case IMX_MEDIA_GRP_ID_CSI:
+ imx_media_create_csi_of_links(imxmd, sd);
+ break;
+ default:
+ /*
+ * if this subdev has fwnode links, create media
+ * links for them.
+ */
+ imx_media_create_of_links(imxmd, sd);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * adds given video device to given imx-media source pad vdev list.
+ * Continues upstream from the pad entity's sink pads.
+ */
+static int imx_media_add_vdev_to_pad(struct imx_media_dev *imxmd,
+ struct imx_media_video_dev *vdev,
+ struct media_pad *srcpad)
+{
+ struct media_entity *entity = srcpad->entity;
+ struct imx_media_pad_vdev *pad_vdev;
+ struct list_head *pad_vdev_list;
+ struct media_link *link;
+ struct v4l2_subdev *sd;
+ int i, ret;
+
+ /* skip this entity if not a v4l2_subdev */
+ if (!is_media_entity_v4l2_subdev(entity))
+ return 0;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ pad_vdev_list = to_pad_vdev_list(sd, srcpad->index);
+ if (!pad_vdev_list) {
+ v4l2_warn(&imxmd->v4l2_dev, "%s:%u has no vdev list!\n",
+ entity->name, srcpad->index);
+ /*
+ * shouldn't happen, but no reason to fail driver load,
+ * just skip this entity.
+ */
+ return 0;
+ }
+
+ /* just return if we've been here before */
+ list_for_each_entry(pad_vdev, pad_vdev_list, list) {
+ if (pad_vdev->vdev == vdev)
+ return 0;
+ }
+
+ dev_dbg(imxmd->md.dev, "adding %s to pad %s:%u\n",
+ vdev->vfd->entity.name, entity->name, srcpad->index);
+
+ pad_vdev = devm_kzalloc(imxmd->md.dev, sizeof(*pad_vdev), GFP_KERNEL);
+ if (!pad_vdev)
+ return -ENOMEM;
+
+ /* attach this vdev to this pad */
+ pad_vdev->vdev = vdev;
+ list_add_tail(&pad_vdev->list, pad_vdev_list);
+
+ /* move upstream from this entity's sink pads */
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *pad = &entity->pads[i];
+
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ list_for_each_entry(link, &entity->links, list) {
+ if (link->sink != pad)
+ continue;
+ ret = imx_media_add_vdev_to_pad(imxmd, vdev,
+ link->source);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * For every subdevice, allocate an array of list_head's, one list_head
+ * for each pad, to hold the list of video devices reachable from that
+ * pad.
+ */
+static int imx_media_alloc_pad_vdev_lists(struct imx_media_dev *imxmd)
+{
+ struct list_head *vdev_lists;
+ struct media_entity *entity;
+ struct v4l2_subdev *sd;
+ int i;
+
+ list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
+ entity = &sd->entity;
+ vdev_lists = devm_kcalloc(imxmd->md.dev,
+ entity->num_pads, sizeof(*vdev_lists),
+ GFP_KERNEL);
+ if (!vdev_lists)
+ return -ENOMEM;
+
+ /* attach to the subdev's host private pointer */
+ sd->host_priv = vdev_lists;
+
+ for (i = 0; i < entity->num_pads; i++)
+ INIT_LIST_HEAD(to_pad_vdev_list(sd, i));
+ }
+
+ return 0;
+}
+
+/* form the vdev lists in all imx-media source pads */
+static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
+{
+ struct imx_media_video_dev *vdev;
+ struct media_link *link;
+ int ret;
+
+ ret = imx_media_alloc_pad_vdev_lists(imxmd);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(vdev, &imxmd->vdev_list, list) {
+ link = list_first_entry(&vdev->vfd->entity.links,
+ struct media_link, list);
+ ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* async subdev complete notifier */
+int imx_media_probe_complete(struct v4l2_async_notifier *notifier)
+{
+ struct imx_media_dev *imxmd = notifier2dev(notifier);
+ int ret;
+
+ mutex_lock(&imxmd->mutex);
+
+ ret = imx_media_create_links(notifier);
+ if (ret)
+ goto unlock;
+
+ ret = imx_media_create_pad_vdev_lists(imxmd);
+ if (ret)
+ goto unlock;
+
+ ret = v4l2_device_register_subdev_nodes(&imxmd->v4l2_dev);
+unlock:
+ mutex_unlock(&imxmd->mutex);
+ if (ret)
+ return ret;
+
+ return media_device_register(&imxmd->md);
+}
+EXPORT_SYMBOL_GPL(imx_media_probe_complete);
+
+/*
+ * adds controls to a video device from an entity subdevice.
+ * Continues upstream from the entity's sink pads.
+ */
+static int imx_media_inherit_controls(struct imx_media_dev *imxmd,
+ struct video_device *vfd,
+ struct media_entity *entity)
+{
+ int i, ret = 0;
+
+ if (is_media_entity_v4l2_subdev(entity)) {
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+
+ dev_dbg(imxmd->md.dev,
+ "adding controls to %s from %s\n",
+ vfd->entity.name, sd->entity.name);
+
+ ret = v4l2_ctrl_add_handler(vfd->ctrl_handler,
+ sd->ctrl_handler,
+ NULL, true);
+ if (ret)
+ return ret;
+ }
+
+ /* move upstream */
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *pad, *spad = &entity->pads[i];
+
+ if (!(spad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ pad = media_entity_remote_pad(spad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ continue;
+
+ ret = imx_media_inherit_controls(imxmd, vfd, pad->entity);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int imx_media_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct imx_media_dev *imxmd = container_of(link->graph_obj.mdev,
+ struct imx_media_dev, md);
+ struct media_entity *source = link->source->entity;
+ struct imx_media_pad_vdev *pad_vdev;
+ struct list_head *pad_vdev_list;
+ struct video_device *vfd;
+ struct v4l2_subdev *sd;
+ int pad_idx, ret;
+
+ ret = v4l2_pipeline_link_notify(link, flags, notification);
+ if (ret)
+ return ret;
+
+ /* don't bother if source is not a subdev */
+ if (!is_media_entity_v4l2_subdev(source))
+ return 0;
+
+ sd = media_entity_to_v4l2_subdev(source);
+ pad_idx = link->source->index;
+
+ pad_vdev_list = to_pad_vdev_list(sd, pad_idx);
+ if (!pad_vdev_list) {
+ /* nothing to do if source sd has no pad vdev list */
+ return 0;
+ }
+
+ /*
+ * Before disabling a link, reset controls for all video
+ * devices reachable from this link.
+ *
+ * After enabling a link, refresh controls for all video
+ * devices reachable from this link.
+ */
+ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
+ !(flags & MEDIA_LNK_FL_ENABLED)) {
+ list_for_each_entry(pad_vdev, pad_vdev_list, list) {
+ vfd = pad_vdev->vdev->vfd;
+ dev_dbg(imxmd->md.dev,
+ "reset controls for %s\n",
+ vfd->entity.name);
+ v4l2_ctrl_handler_free(vfd->ctrl_handler);
+ v4l2_ctrl_handler_init(vfd->ctrl_handler, 0);
+ }
+ } else if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ (link->flags & MEDIA_LNK_FL_ENABLED)) {
+ list_for_each_entry(pad_vdev, pad_vdev_list, list) {
+ vfd = pad_vdev->vdev->vfd;
+ dev_dbg(imxmd->md.dev,
+ "refresh controls for %s\n",
+ vfd->entity.name);
+ ret = imx_media_inherit_controls(imxmd, vfd,
+ &vfd->entity);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void imx_media_notify(struct v4l2_subdev *sd, unsigned int notification,
+ void *arg)
+{
+ struct media_entity *entity = &sd->entity;
+ int i;
+
+ if (notification != V4L2_DEVICE_NOTIFY_EVENT)
+ return;
+
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *pad = &entity->pads[i];
+ struct imx_media_pad_vdev *pad_vdev;
+ struct list_head *pad_vdev_list;
+
+ pad_vdev_list = to_pad_vdev_list(sd, pad->index);
+ if (!pad_vdev_list)
+ continue;
+ list_for_each_entry(pad_vdev, pad_vdev_list, list)
+ v4l2_event_queue(pad_vdev->vdev->vfd, arg);
+ }
+}
+
+static const struct v4l2_async_notifier_operations imx_media_notifier_ops = {
.bound = imx_media_subdev_bound,
.complete = imx_media_probe_complete,
};
@@ -19,7 +351,8 @@ static const struct media_device_ops imx_media_md_ops = {
.link_notify = imx_media_link_notify,
};
-struct imx_media_dev *imx_media_dev_init(struct device *dev)
+struct imx_media_dev *imx_media_dev_init(struct device *dev,
+ const struct media_device_ops *ops)
{
struct imx_media_dev *imxmd;
int ret;
@@ -31,7 +364,7 @@ struct imx_media_dev *imx_media_dev_init(struct device *dev)
dev_set_drvdata(dev, imxmd);
strscpy(imxmd->md.model, "imx-media", sizeof(imxmd->md.model));
- imxmd->md.ops = &imx_media_md_ops;
+ imxmd->md.ops = ops ? ops : &imx_media_md_ops;
imxmd->md.dev = dev;
mutex_init(&imxmd->mutex);
@@ -50,8 +383,6 @@ struct imx_media_dev *imx_media_dev_init(struct device *dev)
goto cleanup;
}
- dev_set_drvdata(imxmd->v4l2_dev.dev, imxmd);
-
INIT_LIST_HEAD(&imxmd->vdev_list);
v4l2_async_notifier_init(&imxmd->notifier);
@@ -65,7 +396,8 @@ cleanup:
}
EXPORT_SYMBOL_GPL(imx_media_dev_init);
-int imx_media_dev_notifier_register(struct imx_media_dev *imxmd)
+int imx_media_dev_notifier_register(struct imx_media_dev *imxmd,
+ const struct v4l2_async_notifier_operations *ops)
{
int ret;
@@ -76,7 +408,7 @@ int imx_media_dev_notifier_register(struct imx_media_dev *imxmd)
}
/* prepare the async subdev notifier and register it */
- imxmd->notifier.ops = &imx_media_subdev_ops;
+ imxmd->notifier.ops = ops ? ops : &imx_media_notifier_ops;
ret = v4l2_async_notifier_register(&imxmd->v4l2_dev,
&imxmd->notifier);
if (ret) {
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index 6be95584006d..6ac371f6e971 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -2,24 +2,13 @@
/*
* V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
*
- * Copyright (c) 2016 Mentor Graphics Inc.
+ * Copyright (c) 2016-2019 Mentor Graphics Inc.
*/
-#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/module.h>
-#include <linux/of_graph.h>
-#include <linux/of_platform.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/timer.h>
-#include <media/v4l2-ctrls.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-event.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-mc.h>
-#include <video/imx-ipu-v3.h>
#include <media/imx.h>
#include "imx-media.h"
@@ -28,433 +17,31 @@ static inline struct imx_media_dev *notifier2dev(struct v4l2_async_notifier *n)
return container_of(n, struct imx_media_dev, notifier);
}
-/*
- * Adds a subdev to the root notifier's async subdev list. If fwnode is
- * non-NULL, adds the async as a V4L2_ASYNC_MATCH_FWNODE match type,
- * otherwise as a V4L2_ASYNC_MATCH_DEVNAME match type using the dev_name
- * of the given platform_device. This is called during driver load when
- * forming the async subdev list.
- */
-int imx_media_add_async_subdev(struct imx_media_dev *imxmd,
- struct fwnode_handle *fwnode,
- struct platform_device *pdev)
-{
- struct device_node *np = to_of_node(fwnode);
- struct imx_media_async_subdev *imxasd;
- struct v4l2_async_subdev *asd;
- const char *devname = NULL;
- int ret;
-
- if (fwnode) {
- asd = v4l2_async_notifier_add_fwnode_subdev(&imxmd->notifier,
- fwnode,
- sizeof(*imxasd));
- } else {
- devname = dev_name(&pdev->dev);
- asd = v4l2_async_notifier_add_devname_subdev(&imxmd->notifier,
- devname,
- sizeof(*imxasd));
- }
-
- if (IS_ERR(asd)) {
- ret = PTR_ERR(asd);
- if (ret == -EEXIST) {
- if (np)
- dev_dbg(imxmd->md.dev, "%s: already added %pOFn\n",
- __func__, np);
- else
- dev_dbg(imxmd->md.dev, "%s: already added %s\n",
- __func__, devname);
- }
- return ret;
- }
-
- imxasd = to_imx_media_asd(asd);
-
- if (devname)
- imxasd->pdev = pdev;
-
- if (np)
- dev_dbg(imxmd->md.dev, "%s: added %pOFn, match type FWNODE\n",
- __func__, np);
- else
- dev_dbg(imxmd->md.dev, "%s: added %s, match type DEVNAME\n",
- __func__, devname);
-
- return 0;
-}
-
-/*
- * get IPU from this CSI and add it to the list of IPUs
- * the media driver will control.
- */
-static int imx_media_get_ipu(struct imx_media_dev *imxmd,
- struct v4l2_subdev *csi_sd)
-{
- struct ipu_soc *ipu;
- int ipu_id;
-
- ipu = dev_get_drvdata(csi_sd->dev->parent);
- if (!ipu) {
- v4l2_err(&imxmd->v4l2_dev,
- "CSI %s has no parent IPU!\n", csi_sd->name);
- return -ENODEV;
- }
-
- ipu_id = ipu_get_num(ipu);
- if (ipu_id > 1) {
- v4l2_err(&imxmd->v4l2_dev, "invalid IPU id %d!\n", ipu_id);
- return -ENODEV;
- }
-
- if (!imxmd->ipu[ipu_id])
- imxmd->ipu[ipu_id] = ipu;
-
- return 0;
-}
-
/* async subdev bound notifier */
-int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+static int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
{
struct imx_media_dev *imxmd = notifier2dev(notifier);
- int ret = 0;
-
- mutex_lock(&imxmd->mutex);
+ int ret;
if (sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI) {
- ret = imx_media_get_ipu(imxmd, sd);
+ /* register the IPU internal subdevs */
+ ret = imx_media_register_ipu_internal_subdevs(imxmd, sd);
if (ret)
- goto out;
+ return ret;
}
v4l2_info(&imxmd->v4l2_dev, "subdev %s bound\n", sd->name);
-out:
- mutex_unlock(&imxmd->mutex);
- return ret;
-}
-
-/*
- * Create the media links for all subdevs that registered.
- * Called after all async subdevs have bound.
- */
-static int imx_media_create_links(struct v4l2_async_notifier *notifier)
-{
- struct imx_media_dev *imxmd = notifier2dev(notifier);
- struct v4l2_subdev *sd;
- int ret;
-
- list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
- switch (sd->grp_id) {
- case IMX_MEDIA_GRP_ID_IPU_VDIC:
- case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
- case IMX_MEDIA_GRP_ID_IPU_IC_PRPENC:
- case IMX_MEDIA_GRP_ID_IPU_IC_PRPVF:
- case IMX_MEDIA_GRP_ID_IPU_CSI0:
- case IMX_MEDIA_GRP_ID_IPU_CSI1:
- ret = imx_media_create_ipu_internal_links(imxmd, sd);
- if (ret)
- return ret;
- /*
- * the CSIs straddle between the external and the IPU
- * internal entities, so create the external links
- * to the CSI sink pads.
- */
- if (sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI)
- imx_media_create_csi_of_links(imxmd, sd);
- break;
- case IMX_MEDIA_GRP_ID_CSI:
- imx_media_create_csi_of_links(imxmd, sd);
-
- break;
- default:
- /*
- * if this subdev has fwnode links, create media
- * links for them.
- */
- imx_media_create_of_links(imxmd, sd);
- break;
- }
- }
-
- return 0;
-}
-
-/*
- * adds given video device to given imx-media source pad vdev list.
- * Continues upstream from the pad entity's sink pads.
- */
-static int imx_media_add_vdev_to_pad(struct imx_media_dev *imxmd,
- struct imx_media_video_dev *vdev,
- struct media_pad *srcpad)
-{
- struct media_entity *entity = srcpad->entity;
- struct imx_media_pad_vdev *pad_vdev;
- struct list_head *pad_vdev_list;
- struct media_link *link;
- struct v4l2_subdev *sd;
- int i, ret;
-
- /* skip this entity if not a v4l2_subdev */
- if (!is_media_entity_v4l2_subdev(entity))
- return 0;
-
- sd = media_entity_to_v4l2_subdev(entity);
-
- pad_vdev_list = to_pad_vdev_list(sd, srcpad->index);
- if (!pad_vdev_list) {
- v4l2_warn(&imxmd->v4l2_dev, "%s:%u has no vdev list!\n",
- entity->name, srcpad->index);
- /*
- * shouldn't happen, but no reason to fail driver load,
- * just skip this entity.
- */
- return 0;
- }
-
- /* just return if we've been here before */
- list_for_each_entry(pad_vdev, pad_vdev_list, list) {
- if (pad_vdev->vdev == vdev)
- return 0;
- }
-
- dev_dbg(imxmd->md.dev, "adding %s to pad %s:%u\n",
- vdev->vfd->entity.name, entity->name, srcpad->index);
-
- pad_vdev = devm_kzalloc(imxmd->md.dev, sizeof(*pad_vdev), GFP_KERNEL);
- if (!pad_vdev)
- return -ENOMEM;
-
- /* attach this vdev to this pad */
- pad_vdev->vdev = vdev;
- list_add_tail(&pad_vdev->list, pad_vdev_list);
-
- /* move upstream from this entity's sink pads */
- for (i = 0; i < entity->num_pads; i++) {
- struct media_pad *pad = &entity->pads[i];
-
- if (!(pad->flags & MEDIA_PAD_FL_SINK))
- continue;
-
- list_for_each_entry(link, &entity->links, list) {
- if (link->sink != pad)
- continue;
- ret = imx_media_add_vdev_to_pad(imxmd, vdev,
- link->source);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-/*
- * For every subdevice, allocate an array of list_head's, one list_head
- * for each pad, to hold the list of video devices reachable from that
- * pad.
- */
-static int imx_media_alloc_pad_vdev_lists(struct imx_media_dev *imxmd)
-{
- struct list_head *vdev_lists;
- struct media_entity *entity;
- struct v4l2_subdev *sd;
- int i;
-
- list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
- entity = &sd->entity;
- vdev_lists = devm_kcalloc(imxmd->md.dev,
- entity->num_pads, sizeof(*vdev_lists),
- GFP_KERNEL);
- if (!vdev_lists)
- return -ENOMEM;
-
- /* attach to the subdev's host private pointer */
- sd->host_priv = vdev_lists;
-
- for (i = 0; i < entity->num_pads; i++)
- INIT_LIST_HEAD(to_pad_vdev_list(sd, i));
- }
-
- return 0;
-}
-
-/* form the vdev lists in all imx-media source pads */
-static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
-{
- struct imx_media_video_dev *vdev;
- struct media_link *link;
- int ret;
-
- ret = imx_media_alloc_pad_vdev_lists(imxmd);
- if (ret)
- return ret;
-
- list_for_each_entry(vdev, &imxmd->vdev_list, list) {
- link = list_first_entry(&vdev->vfd->entity.links,
- struct media_link, list);
- ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source);
- if (ret)
- return ret;
- }
return 0;
}
/* async subdev complete notifier */
-int imx_media_probe_complete(struct v4l2_async_notifier *notifier)
-{
- struct imx_media_dev *imxmd = notifier2dev(notifier);
- int ret;
-
- mutex_lock(&imxmd->mutex);
-
- ret = imx_media_create_links(notifier);
- if (ret)
- goto unlock;
-
- ret = imx_media_create_pad_vdev_lists(imxmd);
- if (ret)
- goto unlock;
-
- ret = v4l2_device_register_subdev_nodes(&imxmd->v4l2_dev);
-unlock:
- mutex_unlock(&imxmd->mutex);
- if (ret)
- return ret;
-
- return media_device_register(&imxmd->md);
-}
-
-/*
- * adds controls to a video device from an entity subdevice.
- * Continues upstream from the entity's sink pads.
- */
-static int imx_media_inherit_controls(struct imx_media_dev *imxmd,
- struct video_device *vfd,
- struct media_entity *entity)
-{
- int i, ret = 0;
-
- if (is_media_entity_v4l2_subdev(entity)) {
- struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
-
- dev_dbg(imxmd->md.dev,
- "adding controls to %s from %s\n",
- vfd->entity.name, sd->entity.name);
-
- ret = v4l2_ctrl_add_handler(vfd->ctrl_handler,
- sd->ctrl_handler,
- NULL, true);
- if (ret)
- return ret;
- }
-
- /* move upstream */
- for (i = 0; i < entity->num_pads; i++) {
- struct media_pad *pad, *spad = &entity->pads[i];
-
- if (!(spad->flags & MEDIA_PAD_FL_SINK))
- continue;
-
- pad = media_entity_remote_pad(spad);
- if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
- continue;
-
- ret = imx_media_inherit_controls(imxmd, vfd, pad->entity);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-int imx_media_link_notify(struct media_link *link, u32 flags,
- unsigned int notification)
-{
- struct media_entity *source = link->source->entity;
- struct imx_media_pad_vdev *pad_vdev;
- struct list_head *pad_vdev_list;
- struct imx_media_dev *imxmd;
- struct video_device *vfd;
- struct v4l2_subdev *sd;
- int pad_idx, ret;
-
- ret = v4l2_pipeline_link_notify(link, flags, notification);
- if (ret)
- return ret;
-
- /* don't bother if source is not a subdev */
- if (!is_media_entity_v4l2_subdev(source))
- return 0;
-
- sd = media_entity_to_v4l2_subdev(source);
- pad_idx = link->source->index;
-
- imxmd = dev_get_drvdata(sd->v4l2_dev->dev);
-
- pad_vdev_list = to_pad_vdev_list(sd, pad_idx);
- if (!pad_vdev_list) {
- /* shouldn't happen, but no reason to fail link setup */
- return 0;
- }
-
- /*
- * Before disabling a link, reset controls for all video
- * devices reachable from this link.
- *
- * After enabling a link, refresh controls for all video
- * devices reachable from this link.
- */
- if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
- !(flags & MEDIA_LNK_FL_ENABLED)) {
- list_for_each_entry(pad_vdev, pad_vdev_list, list) {
- vfd = pad_vdev->vdev->vfd;
- dev_dbg(imxmd->md.dev,
- "reset controls for %s\n",
- vfd->entity.name);
- v4l2_ctrl_handler_free(vfd->ctrl_handler);
- v4l2_ctrl_handler_init(vfd->ctrl_handler, 0);
- }
- } else if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
- (link->flags & MEDIA_LNK_FL_ENABLED)) {
- list_for_each_entry(pad_vdev, pad_vdev_list, list) {
- vfd = pad_vdev->vdev->vfd;
- dev_dbg(imxmd->md.dev,
- "refresh controls for %s\n",
- vfd->entity.name);
- ret = imx_media_inherit_controls(imxmd, vfd,
- &vfd->entity);
- if (ret)
- break;
- }
- }
-
- return ret;
-}
-
-void imx_media_notify(struct v4l2_subdev *sd, unsigned int notification,
- void *arg)
-{
- struct media_entity *entity = &sd->entity;
- int i;
-
- if (notification != V4L2_DEVICE_NOTIFY_EVENT)
- return;
-
- for (i = 0; i < entity->num_pads; i++) {
- struct media_pad *pad = &entity->pads[i];
- struct imx_media_pad_vdev *pad_vdev;
- struct list_head *pad_vdev_list;
-
- pad_vdev_list = to_pad_vdev_list(sd, pad->index);
- if (!pad_vdev_list)
- continue;
- list_for_each_entry(pad_vdev, pad_vdev_list, list)
- v4l2_event_queue(pad_vdev->vdev->vfd, arg);
- }
-}
+static const struct v4l2_async_notifier_operations imx_media_notifier_ops = {
+ .bound = imx_media_subdev_bound,
+ .complete = imx_media_probe_complete,
+};
static int imx_media_probe(struct platform_device *pdev)
{
@@ -463,7 +50,7 @@ static int imx_media_probe(struct platform_device *pdev)
struct imx_media_dev *imxmd;
int ret;
- imxmd = imx_media_dev_init(dev);
+ imxmd = imx_media_dev_init(dev, NULL);
if (IS_ERR(imxmd))
return PTR_ERR(imxmd);
@@ -474,14 +61,12 @@ static int imx_media_probe(struct platform_device *pdev)
goto cleanup;
}
- ret = imx_media_dev_notifier_register(imxmd);
+ ret = imx_media_dev_notifier_register(imxmd, &imx_media_notifier_ops);
if (ret)
- goto del_int;
+ goto cleanup;
return 0;
-del_int:
- imx_media_remove_ipu_internal_subdevs(imxmd);
cleanup:
v4l2_async_notifier_cleanup(&imxmd->notifier);
v4l2_device_unregister(&imxmd->v4l2_dev);
@@ -498,7 +83,7 @@ static int imx_media_remove(struct platform_device *pdev)
v4l2_info(&imxmd->v4l2_dev, "Removing imx-media\n");
v4l2_async_notifier_unregister(&imxmd->notifier);
- imx_media_remove_ipu_internal_subdevs(imxmd);
+ imx_media_unregister_ipu_internal_subdevs(imxmd);
v4l2_async_notifier_cleanup(&imxmd->notifier);
media_device_unregister(&imxmd->md);
v4l2_device_unregister(&imxmd->v4l2_dev);
diff --git a/drivers/staging/media/imx/imx-media-fim.c b/drivers/staging/media/imx/imx-media-fim.c
index 2ab64bc30f5c..3a9182933508 100644
--- a/drivers/staging/media/imx/imx-media-fim.c
+++ b/drivers/staging/media/imx/imx-media-fim.c
@@ -37,8 +37,6 @@ enum {
#define FIM_CL_TOLERANCE_MAX_DEF 0 /* no max tolerance (unbounded) */
struct imx_media_fim {
- struct imx_media_dev *md;
-
/* the owning subdev of this fim instance */
struct v4l2_subdev *sd;
@@ -416,7 +414,6 @@ void imx_media_fim_eof_monitor(struct imx_media_fim *fim, ktime_t timestamp)
spin_unlock_irqrestore(&fim->lock, flags);
}
-EXPORT_SYMBOL_GPL(imx_media_fim_eof_monitor);
/* Called by the subdev in its s_stream callback */
int imx_media_fim_set_stream(struct imx_media_fim *fim,
@@ -453,7 +450,6 @@ out:
v4l2_ctrl_unlock(fim->ctrl[FIM_CL_ENABLE]);
return ret;
}
-EXPORT_SYMBOL_GPL(imx_media_fim_set_stream);
int imx_media_fim_add_controls(struct imx_media_fim *fim)
{
@@ -461,7 +457,6 @@ int imx_media_fim_add_controls(struct imx_media_fim *fim)
return v4l2_ctrl_add_handler(fim->sd->ctrl_handler,
&fim->ctrl_handler, NULL, false);
}
-EXPORT_SYMBOL_GPL(imx_media_fim_add_controls);
/* Called by the subdev in its subdev registered callback */
struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd)
@@ -473,8 +468,6 @@ struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd)
if (!fim)
return ERR_PTR(-ENOMEM);
- /* get media device */
- fim->md = dev_get_drvdata(sd->v4l2_dev->dev);
fim->sd = sd;
spin_lock_init(&fim->lock);
@@ -485,10 +478,8 @@ struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd)
return fim;
}
-EXPORT_SYMBOL_GPL(imx_media_fim_init);
void imx_media_fim_free(struct imx_media_fim *fim)
{
v4l2_ctrl_handler_free(&fim->ctrl_handler);
}
-EXPORT_SYMBOL_GPL(imx_media_fim_free);
diff --git a/drivers/staging/media/imx/imx-media-internal-sd.c b/drivers/staging/media/imx/imx-media-internal-sd.c
index df49ebfbe98a..cb1e4cdd5079 100644
--- a/drivers/staging/media/imx/imx-media-internal-sd.c
+++ b/drivers/staging/media/imx/imx-media-internal-sd.c
@@ -9,208 +9,138 @@
#include <linux/platform_device.h>
#include "imx-media.h"
-enum isd_enum {
- isd_csi0 = 0,
- isd_csi1,
- isd_vdic,
- isd_ic_prp,
- isd_ic_prpenc,
- isd_ic_prpvf,
- num_isd,
-};
-
-static const struct internal_subdev_id {
- enum isd_enum index;
- const char *name;
- u32 grp_id;
-} isd_id[num_isd] = {
- [isd_csi0] = {
- .index = isd_csi0,
- .grp_id = IMX_MEDIA_GRP_ID_IPU_CSI0,
- .name = "imx-ipuv3-csi",
- },
- [isd_csi1] = {
- .index = isd_csi1,
- .grp_id = IMX_MEDIA_GRP_ID_IPU_CSI1,
- .name = "imx-ipuv3-csi",
- },
- [isd_vdic] = {
- .index = isd_vdic,
- .grp_id = IMX_MEDIA_GRP_ID_IPU_VDIC,
- .name = "imx-ipuv3-vdic",
- },
- [isd_ic_prp] = {
- .index = isd_ic_prp,
- .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRP,
- .name = "imx-ipuv3-ic",
- },
- [isd_ic_prpenc] = {
- .index = isd_ic_prpenc,
- .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPENC,
- .name = "imx-ipuv3-ic",
- },
- [isd_ic_prpvf] = {
- .index = isd_ic_prpvf,
- .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPVF,
- .name = "imx-ipuv3-ic",
- },
-};
+/* max pads per internal-sd */
+#define MAX_INTERNAL_PADS 8
+/* max links per internal-sd pad */
+#define MAX_INTERNAL_LINKS 8
struct internal_subdev;
struct internal_link {
- const struct internal_subdev *remote;
+ int remote;
int local_pad;
int remote_pad;
};
-/* max pads per internal-sd */
-#define MAX_INTERNAL_PADS 8
-/* max links per internal-sd pad */
-#define MAX_INTERNAL_LINKS 8
-
struct internal_pad {
+ int num_links;
struct internal_link link[MAX_INTERNAL_LINKS];
};
-static const struct internal_subdev {
- const struct internal_subdev_id *id;
+struct internal_subdev {
+ u32 grp_id;
struct internal_pad pad[MAX_INTERNAL_PADS];
-} int_subdev[num_isd] = {
- [isd_csi0] = {
- .id = &isd_id[isd_csi0],
+
+ struct v4l2_subdev * (*sync_register)(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id);
+ int (*sync_unregister)(struct v4l2_subdev *sd);
+};
+
+static const struct internal_subdev int_subdev[NUM_IPU_SUBDEVS] = {
+ [IPU_CSI0] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_CSI0,
.pad[CSI_SRC_PAD_DIRECT] = {
+ .num_links = 2,
.link = {
{
.local_pad = CSI_SRC_PAD_DIRECT,
- .remote = &int_subdev[isd_ic_prp],
+ .remote = IPU_IC_PRP,
.remote_pad = PRP_SINK_PAD,
}, {
.local_pad = CSI_SRC_PAD_DIRECT,
- .remote = &int_subdev[isd_vdic],
+ .remote = IPU_VDIC,
.remote_pad = VDIC_SINK_PAD_DIRECT,
},
},
},
},
- [isd_csi1] = {
- .id = &isd_id[isd_csi1],
+ [IPU_CSI1] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_CSI1,
.pad[CSI_SRC_PAD_DIRECT] = {
+ .num_links = 2,
.link = {
{
.local_pad = CSI_SRC_PAD_DIRECT,
- .remote = &int_subdev[isd_ic_prp],
+ .remote = IPU_IC_PRP,
.remote_pad = PRP_SINK_PAD,
}, {
.local_pad = CSI_SRC_PAD_DIRECT,
- .remote = &int_subdev[isd_vdic],
+ .remote = IPU_VDIC,
.remote_pad = VDIC_SINK_PAD_DIRECT,
},
},
},
},
- [isd_vdic] = {
- .id = &isd_id[isd_vdic],
+ [IPU_VDIC] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_VDIC,
+ .sync_register = imx_media_vdic_register,
+ .sync_unregister = imx_media_vdic_unregister,
.pad[VDIC_SRC_PAD_DIRECT] = {
+ .num_links = 1,
.link = {
{
.local_pad = VDIC_SRC_PAD_DIRECT,
- .remote = &int_subdev[isd_ic_prp],
+ .remote = IPU_IC_PRP,
.remote_pad = PRP_SINK_PAD,
},
},
},
},
- [isd_ic_prp] = {
- .id = &isd_id[isd_ic_prp],
+ [IPU_IC_PRP] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRP,
+ .sync_register = imx_media_ic_register,
+ .sync_unregister = imx_media_ic_unregister,
.pad[PRP_SRC_PAD_PRPENC] = {
+ .num_links = 1,
.link = {
{
.local_pad = PRP_SRC_PAD_PRPENC,
- .remote = &int_subdev[isd_ic_prpenc],
- .remote_pad = 0,
+ .remote = IPU_IC_PRPENC,
+ .remote_pad = PRPENCVF_SINK_PAD,
},
},
},
.pad[PRP_SRC_PAD_PRPVF] = {
+ .num_links = 1,
.link = {
{
.local_pad = PRP_SRC_PAD_PRPVF,
- .remote = &int_subdev[isd_ic_prpvf],
- .remote_pad = 0,
+ .remote = IPU_IC_PRPVF,
+ .remote_pad = PRPENCVF_SINK_PAD,
},
},
},
},
- [isd_ic_prpenc] = {
- .id = &isd_id[isd_ic_prpenc],
+ [IPU_IC_PRPENC] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPENC,
+ .sync_register = imx_media_ic_register,
+ .sync_unregister = imx_media_ic_unregister,
},
- [isd_ic_prpvf] = {
- .id = &isd_id[isd_ic_prpvf],
+ [IPU_IC_PRPVF] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPVF,
+ .sync_register = imx_media_ic_register,
+ .sync_unregister = imx_media_ic_unregister,
},
};
-/* form a device name given an internal subdev and ipu id */
-static inline void isd_to_devname(char *devname, int sz,
- const struct internal_subdev *isd,
- int ipu_id)
-{
- int pdev_id = ipu_id * num_isd + isd->id->index;
-
- snprintf(devname, sz, "%s.%d", isd->id->name, pdev_id);
-}
-
-static const struct internal_subdev *find_intsd_by_grp_id(u32 grp_id)
-{
- enum isd_enum i;
-
- for (i = 0; i < num_isd; i++) {
- const struct internal_subdev *isd = &int_subdev[i];
-
- if (isd->id->grp_id == grp_id)
- return isd;
- }
-
- return NULL;
-}
-
-static struct v4l2_subdev *find_sink(struct imx_media_dev *imxmd,
- struct v4l2_subdev *src,
- const struct internal_link *link)
-{
- char sink_devname[32];
- int ipu_id;
-
- /*
- * retrieve IPU id from subdev name, note: can't get this from
- * struct imx_media_ipu_internal_sd_pdata because if src is
- * a CSI, it has different struct ipu_client_platformdata which
- * does not contain IPU id.
- */
- if (sscanf(src->name, "ipu%d", &ipu_id) != 1)
- return NULL;
-
- isd_to_devname(sink_devname, sizeof(sink_devname),
- link->remote, ipu_id - 1);
-
- return imx_media_find_subdev_by_devname(imxmd, sink_devname);
-}
-
-static int create_ipu_internal_link(struct imx_media_dev *imxmd,
- struct v4l2_subdev *src,
- const struct internal_link *link)
+static int create_internal_link(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *src,
+ struct v4l2_subdev *sink,
+ const struct internal_link *link)
{
- struct v4l2_subdev *sink;
int ret;
- sink = find_sink(imxmd, src, link);
- if (!sink)
- return -ENODEV;
+ /* skip if this link already created */
+ if (media_entity_find_link(&src->entity.pads[link->local_pad],
+ &sink->entity.pads[link->remote_pad]))
+ return 0;
v4l2_info(&imxmd->v4l2_dev, "%s:%d -> %s:%d\n",
src->name, link->local_pad,
@@ -219,25 +149,21 @@ static int create_ipu_internal_link(struct imx_media_dev *imxmd,
ret = media_create_pad_link(&src->entity, link->local_pad,
&sink->entity, link->remote_pad, 0);
if (ret)
- v4l2_err(&imxmd->v4l2_dev,
- "create_pad_link failed: %d\n", ret);
+ v4l2_err(&imxmd->v4l2_dev, "%s failed: %d\n", __func__, ret);
return ret;
}
-int imx_media_create_ipu_internal_links(struct imx_media_dev *imxmd,
- struct v4l2_subdev *sd)
+static int create_ipu_internal_links(struct imx_media_dev *imxmd,
+ const struct internal_subdev *intsd,
+ struct v4l2_subdev *sd,
+ int ipu_id)
{
- const struct internal_subdev *intsd;
const struct internal_pad *intpad;
const struct internal_link *link;
struct media_pad *pad;
int i, j, ret;
- intsd = find_intsd_by_grp_id(sd->grp_id);
- if (!intsd)
- return -ENODEV;
-
/* create the source->sink links */
for (i = 0; i < sd->entity.num_pads; i++) {
intpad = &intsd->pad[i];
@@ -246,13 +172,13 @@ int imx_media_create_ipu_internal_links(struct imx_media_dev *imxmd,
if (!(pad->flags & MEDIA_PAD_FL_SOURCE))
continue;
- for (j = 0; ; j++) {
- link = &intpad->link[j];
+ for (j = 0; j < intpad->num_links; j++) {
+ struct v4l2_subdev *sink;
- if (!link->remote)
- break;
+ link = &intpad->link[j];
+ sink = imxmd->sync_sd[ipu_id][link->remote];
- ret = create_ipu_internal_link(imxmd, sd, link);
+ ret = create_internal_link(imxmd, sd, sink, link);
if (ret)
return ret;
}
@@ -261,85 +187,116 @@ int imx_media_create_ipu_internal_links(struct imx_media_dev *imxmd,
return 0;
}
-/* register an internal subdev as a platform device */
-static int add_internal_subdev(struct imx_media_dev *imxmd,
- const struct internal_subdev *isd,
- int ipu_id)
+int imx_media_register_ipu_internal_subdevs(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *csi)
{
- struct imx_media_ipu_internal_sd_pdata pdata;
- struct platform_device_info pdevinfo = {};
- struct platform_device *pdev;
+ struct device *ipu_dev = csi->dev->parent;
+ const struct internal_subdev *intsd;
+ struct v4l2_subdev *sd;
+ struct ipu_soc *ipu;
+ int i, ipu_id, ret;
- pdata.grp_id = isd->id->grp_id;
+ ipu = dev_get_drvdata(ipu_dev);
+ if (!ipu) {
+ v4l2_err(&imxmd->v4l2_dev, "invalid IPU device!\n");
+ return -ENODEV;
+ }
- /* the id of IPU this subdev will control */
- pdata.ipu_id = ipu_id;
+ ipu_id = ipu_get_num(ipu);
+ if (ipu_id > 1) {
+ v4l2_err(&imxmd->v4l2_dev, "invalid IPU id %d!\n", ipu_id);
+ return -ENODEV;
+ }
- /* create subdev name */
- imx_media_grp_id_to_sd_name(pdata.sd_name, sizeof(pdata.sd_name),
- pdata.grp_id, ipu_id);
+ mutex_lock(&imxmd->mutex);
- pdevinfo.name = isd->id->name;
- pdevinfo.id = ipu_id * num_isd + isd->id->index;
- pdevinfo.parent = imxmd->md.dev;
- pdevinfo.data = &pdata;
- pdevinfo.size_data = sizeof(pdata);
- pdevinfo.dma_mask = DMA_BIT_MASK(32);
+ /* register the synchronous subdevs */
+ for (i = 0; i < NUM_IPU_SUBDEVS; i++) {
+ intsd = &int_subdev[i];
- pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ sd = imxmd->sync_sd[ipu_id][i];
- return imx_media_add_async_subdev(imxmd, NULL, pdev);
-}
+ /*
+ * skip if this sync subdev already registered or its
+ * not a sync subdev (one of the CSIs)
+ */
+ if (sd || !intsd->sync_register)
+ continue;
-/* adds the internal subdevs in one ipu */
-int imx_media_add_ipu_internal_subdevs(struct imx_media_dev *imxmd,
- int ipu_id)
-{
- enum isd_enum i;
- int ret;
+ mutex_unlock(&imxmd->mutex);
+ sd = intsd->sync_register(&imxmd->v4l2_dev, ipu_dev, ipu,
+ intsd->grp_id);
+ mutex_lock(&imxmd->mutex);
+ if (IS_ERR(sd)) {
+ ret = PTR_ERR(sd);
+ goto err_unwind;
+ }
- for (i = 0; i < num_isd; i++) {
- const struct internal_subdev *isd = &int_subdev[i];
+ imxmd->sync_sd[ipu_id][i] = sd;
+ }
- /*
- * the CSIs are represented in the device-tree, so those
- * devices are already added to the async subdev list by
- * of_parse_subdev().
- */
- switch (isd->id->grp_id) {
- case IMX_MEDIA_GRP_ID_IPU_CSI0:
- case IMX_MEDIA_GRP_ID_IPU_CSI1:
- ret = 0;
- break;
- default:
- ret = add_internal_subdev(imxmd, isd, ipu_id);
- break;
+ /*
+ * all the sync subdevs are registered, create the media links
+ * between them.
+ */
+ for (i = 0; i < NUM_IPU_SUBDEVS; i++) {
+ intsd = &int_subdev[i];
+
+ if (intsd->grp_id == csi->grp_id) {
+ sd = csi;
+ } else {
+ sd = imxmd->sync_sd[ipu_id][i];
+ if (!sd)
+ continue;
}
- if (ret)
- goto remove;
+ ret = create_ipu_internal_links(imxmd, intsd, sd, ipu_id);
+ if (ret) {
+ mutex_unlock(&imxmd->mutex);
+ imx_media_unregister_ipu_internal_subdevs(imxmd);
+ return ret;
+ }
}
+ mutex_unlock(&imxmd->mutex);
return 0;
-remove:
- imx_media_remove_ipu_internal_subdevs(imxmd);
+err_unwind:
+ while (--i >= 0) {
+ intsd = &int_subdev[i];
+ sd = imxmd->sync_sd[ipu_id][i];
+ if (!sd || !intsd->sync_unregister)
+ continue;
+ mutex_unlock(&imxmd->mutex);
+ intsd->sync_unregister(sd);
+ mutex_lock(&imxmd->mutex);
+ }
+
+ mutex_unlock(&imxmd->mutex);
return ret;
}
-void imx_media_remove_ipu_internal_subdevs(struct imx_media_dev *imxmd)
+void imx_media_unregister_ipu_internal_subdevs(struct imx_media_dev *imxmd)
{
- struct imx_media_async_subdev *imxasd;
- struct v4l2_async_subdev *asd;
+ const struct internal_subdev *intsd;
+ struct v4l2_subdev *sd;
+ int i, j;
- list_for_each_entry(asd, &imxmd->notifier.asd_list, asd_list) {
- imxasd = to_imx_media_asd(asd);
+ mutex_lock(&imxmd->mutex);
- if (!imxasd->pdev)
- continue;
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < NUM_IPU_SUBDEVS; j++) {
+ intsd = &int_subdev[j];
+ sd = imxmd->sync_sd[i][j];
+
+ if (!sd || !intsd->sync_unregister)
+ continue;
- platform_device_unregister(imxasd->pdev);
+ mutex_unlock(&imxmd->mutex);
+ intsd->sync_unregister(sd);
+ mutex_lock(&imxmd->mutex);
+ }
}
+
+ mutex_unlock(&imxmd->mutex);
}
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
index 990e82aa8e42..2d3efd2a6dde 100644
--- a/drivers/staging/media/imx/imx-media-of.c
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -19,6 +19,9 @@
int imx_media_of_add_csi(struct imx_media_dev *imxmd,
struct device_node *csi_np)
{
+ struct v4l2_async_subdev *asd;
+ int ret = 0;
+
if (!of_device_is_available(csi_np)) {
dev_dbg(imxmd->md.dev, "%s: %pOFn not enabled\n", __func__,
csi_np);
@@ -26,18 +29,25 @@ int imx_media_of_add_csi(struct imx_media_dev *imxmd,
}
/* add CSI fwnode to async notifier */
- return imx_media_add_async_subdev(imxmd, of_fwnode_handle(csi_np),
- NULL);
+ asd = v4l2_async_notifier_add_fwnode_subdev(&imxmd->notifier,
+ of_fwnode_handle(csi_np),
+ sizeof(*asd));
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ if (ret == -EEXIST)
+ dev_dbg(imxmd->md.dev, "%s: already added %pOFn\n",
+ __func__, csi_np);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(imx_media_of_add_csi);
int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
struct device_node *np)
{
- bool ipu_found[2] = {false, false};
struct device_node *csi_np;
int i, ret;
- u32 ipu_id;
for (i = 0; ; i++) {
csi_np = of_parse_phandle(np, "ports", i);
@@ -55,34 +65,15 @@ int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
/* other error, can't continue */
goto err_out;
}
-
- ret = of_alias_get_id(csi_np->parent, "ipu");
- if (ret < 0)
- goto err_out;
- if (ret > 1) {
- ret = -EINVAL;
- goto err_out;
- }
-
- ipu_id = ret;
-
- if (!ipu_found[ipu_id]) {
- ret = imx_media_add_ipu_internal_subdevs(imxmd,
- ipu_id);
- if (ret)
- goto err_out;
- }
-
- ipu_found[ipu_id] = true;
}
return 0;
err_out:
- imx_media_remove_ipu_internal_subdevs(imxmd);
of_node_put(csi_np);
return ret;
}
+EXPORT_SYMBOL_GPL(imx_media_add_of_subdevs);
/*
* Create a single media link to/from sd using a fwnode link.
@@ -152,6 +143,7 @@ int imx_media_create_of_links(struct imx_media_dev *imxmd,
return 0;
}
+EXPORT_SYMBOL_GPL(imx_media_create_of_links);
/*
* Create media links to the given CSI subdevice's sink pads,
@@ -195,3 +187,4 @@ int imx_media_create_csi_of_links(struct imx_media_dev *imxmd,
return 0;
}
+EXPORT_SYMBOL_GPL(imx_media_create_csi_of_links);
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index b41842dba5ec..b5b8a3b7730a 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -573,8 +573,7 @@ void imx_media_fill_default_mbus_fields(struct v4l2_mbus_framefmt *tryfmt,
EXPORT_SYMBOL_GPL(imx_media_fill_default_mbus_fields);
int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
- struct v4l2_rect *compose,
- const struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_mbus_framefmt *mbus,
const struct imx_media_pixfmt *cc)
{
u32 width;
@@ -621,17 +620,6 @@ int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
pix->sizeimage = cc->planar ? ((stride * pix->height * cc->bpp) >> 3) :
stride * pix->height;
- /*
- * set capture compose rectangle, which is fixed to the
- * source subdevice mbus format.
- */
- if (compose) {
- compose->left = 0;
- compose->top = 0;
- compose->width = mbus->width;
- compose->height = mbus->height;
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(imx_media_mbus_fmt_to_pix_fmt);
@@ -643,11 +631,13 @@ int imx_media_mbus_fmt_to_ipu_image(struct ipu_image *image,
memset(image, 0, sizeof(*image));
- ret = imx_media_mbus_fmt_to_pix_fmt(&image->pix, &image->rect,
- mbus, NULL);
+ ret = imx_media_mbus_fmt_to_pix_fmt(&image->pix, mbus, NULL);
if (ret)
return ret;
+ image->rect.width = mbus->width;
+ image->rect.height = mbus->height;
+
return 0;
}
EXPORT_SYMBOL_GPL(imx_media_mbus_fmt_to_ipu_image);
@@ -675,29 +665,28 @@ int imx_media_ipu_image_to_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
}
EXPORT_SYMBOL_GPL(imx_media_ipu_image_to_mbus_fmt);
-void imx_media_free_dma_buf(struct imx_media_dev *imxmd,
+void imx_media_free_dma_buf(struct device *dev,
struct imx_media_dma_buf *buf)
{
if (buf->virt)
- dma_free_coherent(imxmd->md.dev, buf->len,
- buf->virt, buf->phys);
+ dma_free_coherent(dev, buf->len, buf->virt, buf->phys);
buf->virt = NULL;
buf->phys = 0;
}
EXPORT_SYMBOL_GPL(imx_media_free_dma_buf);
-int imx_media_alloc_dma_buf(struct imx_media_dev *imxmd,
+int imx_media_alloc_dma_buf(struct device *dev,
struct imx_media_dma_buf *buf,
int size)
{
- imx_media_free_dma_buf(imxmd, buf);
+ imx_media_free_dma_buf(dev, buf);
buf->len = PAGE_ALIGN(size);
- buf->virt = dma_alloc_coherent(imxmd->md.dev, buf->len, &buf->phys,
+ buf->virt = dma_alloc_coherent(dev, buf->len, &buf->phys,
GFP_DMA | GFP_KERNEL);
if (!buf->virt) {
- dev_err(imxmd->md.dev, "failed to alloc dma buffer\n");
+ dev_err(dev, "%s: failed\n", __func__);
return -ENOMEM;
}
@@ -764,35 +753,37 @@ imx_media_find_subdev_by_devname(struct imx_media_dev *imxmd,
EXPORT_SYMBOL_GPL(imx_media_find_subdev_by_devname);
/*
- * Adds a video device to the master video device list. This is called by
- * an async subdev that owns a video device when it is registered.
+ * Adds a video device to the master video device list. This is called
+ * when a video device is registered.
*/
-int imx_media_add_video_device(struct imx_media_dev *imxmd,
- struct imx_media_video_dev *vdev)
+void imx_media_add_video_device(struct imx_media_dev *imxmd,
+ struct imx_media_video_dev *vdev)
{
mutex_lock(&imxmd->mutex);
list_add_tail(&vdev->list, &imxmd->vdev_list);
mutex_unlock(&imxmd->mutex);
- return 0;
}
EXPORT_SYMBOL_GPL(imx_media_add_video_device);
/*
- * Search upstream/downstream for a subdevice in the current pipeline
- * with given grp_id, starting from start_entity. Returns the subdev's
- * source/sink pad that it was reached from. If grp_id is zero, just
- * returns the nearest source/sink pad to start_entity. Must be called
- * with mdev->graph_mutex held.
+ * Search upstream/downstream for a subdevice or video device pad in the
+ * current pipeline, starting from start_entity. Returns the device's
+ * source/sink pad that it was reached from. Must be called with
+ * mdev->graph_mutex held.
+ *
+ * If grp_id != 0, finds a subdevice's pad of given grp_id.
+ * Else If buftype != 0, finds a video device's pad of given buffer type.
+ * Else, returns the nearest source/sink pad to start_entity.
*/
-static struct media_pad *
-find_pipeline_pad(struct imx_media_dev *imxmd,
- struct media_entity *start_entity,
- u32 grp_id, bool upstream)
+struct media_pad *
+imx_media_pipeline_pad(struct media_entity *start_entity, u32 grp_id,
+ enum v4l2_buf_type buftype, bool upstream)
{
struct media_entity *me = start_entity;
struct media_pad *pad = NULL;
+ struct video_device *vfd;
struct v4l2_subdev *sd;
int i;
@@ -804,16 +795,27 @@ find_pipeline_pad(struct imx_media_dev *imxmd,
continue;
pad = media_entity_remote_pad(spad);
- if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ if (!pad)
continue;
- if (grp_id != 0) {
- sd = media_entity_to_v4l2_subdev(pad->entity);
- if (sd->grp_id & grp_id)
- return pad;
+ if (grp_id) {
+ if (is_media_entity_v4l2_subdev(pad->entity)) {
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ if (sd->grp_id & grp_id)
+ return pad;
+ }
+
+ return imx_media_pipeline_pad(pad->entity, grp_id,
+ buftype, upstream);
+ } else if (buftype) {
+ if (is_media_entity_v4l2_video_device(pad->entity)) {
+ vfd = media_entity_to_video_device(pad->entity);
+ if (buftype == vfd->queue->type)
+ return pad;
+ }
- return find_pipeline_pad(imxmd, pad->entity,
- grp_id, upstream);
+ return imx_media_pipeline_pad(pad->entity, grp_id,
+ buftype, upstream);
} else {
return pad;
}
@@ -821,28 +823,33 @@ find_pipeline_pad(struct imx_media_dev *imxmd,
return NULL;
}
+EXPORT_SYMBOL_GPL(imx_media_pipeline_pad);
/*
- * Search upstream for a subdev in the current pipeline with
- * given grp_id. Must be called with mdev->graph_mutex held.
+ * Search upstream/downstream for a subdev or video device in the current
+ * pipeline. Must be called with mdev->graph_mutex held.
*/
-static struct v4l2_subdev *
-find_upstream_subdev(struct imx_media_dev *imxmd,
- struct media_entity *start_entity,
- u32 grp_id)
+static struct media_entity *
+find_pipeline_entity(struct media_entity *start, u32 grp_id,
+ enum v4l2_buf_type buftype, bool upstream)
{
+ struct media_pad *pad = NULL;
+ struct video_device *vfd;
struct v4l2_subdev *sd;
- struct media_pad *pad;
- if (is_media_entity_v4l2_subdev(start_entity)) {
- sd = media_entity_to_v4l2_subdev(start_entity);
+ if (grp_id && is_media_entity_v4l2_subdev(start)) {
+ sd = media_entity_to_v4l2_subdev(start);
if (sd->grp_id & grp_id)
- return sd;
+ return &sd->entity;
+ } else if (buftype && is_media_entity_v4l2_video_device(start)) {
+ vfd = media_entity_to_video_device(pad->entity);
+ if (buftype == vfd->queue->type)
+ return &vfd->entity;
}
- pad = find_pipeline_pad(imxmd, start_entity, grp_id, true);
+ pad = imx_media_pipeline_pad(start, grp_id, buftype, upstream);
- return pad ? media_entity_to_v4l2_subdev(pad->entity) : NULL;
+ return pad ? pad->entity : NULL;
}
/*
@@ -850,62 +857,57 @@ find_upstream_subdev(struct imx_media_dev *imxmd,
* start entity in the current pipeline.
* Must be called with mdev->graph_mutex held.
*/
-int imx_media_find_mipi_csi2_channel(struct imx_media_dev *imxmd,
- struct media_entity *start_entity)
+int imx_media_pipeline_csi2_channel(struct media_entity *start_entity)
{
struct media_pad *pad;
int ret = -EPIPE;
- pad = find_pipeline_pad(imxmd, start_entity, IMX_MEDIA_GRP_ID_CSI2,
- true);
- if (pad) {
+ pad = imx_media_pipeline_pad(start_entity, IMX_MEDIA_GRP_ID_CSI2,
+ 0, true);
+ if (pad)
ret = pad->index - 1;
- dev_dbg(imxmd->md.dev, "found vc%d from %s\n",
- ret, start_entity->name);
- }
return ret;
}
-EXPORT_SYMBOL_GPL(imx_media_find_mipi_csi2_channel);
+EXPORT_SYMBOL_GPL(imx_media_pipeline_csi2_channel);
/*
- * Find a source pad reached upstream from the given start entity in
- * the current pipeline. Must be called with mdev->graph_mutex held.
+ * Find a subdev reached upstream from the given start entity in
+ * the current pipeline.
+ * Must be called with mdev->graph_mutex held.
*/
-struct media_pad *
-imx_media_find_upstream_pad(struct imx_media_dev *imxmd,
- struct media_entity *start_entity,
- u32 grp_id)
+struct v4l2_subdev *
+imx_media_pipeline_subdev(struct media_entity *start_entity, u32 grp_id,
+ bool upstream)
{
- struct media_pad *pad;
+ struct media_entity *me;
- pad = find_pipeline_pad(imxmd, start_entity, grp_id, true);
- if (!pad)
+ me = find_pipeline_entity(start_entity, grp_id, 0, upstream);
+ if (!me)
return ERR_PTR(-ENODEV);
- return pad;
+ return media_entity_to_v4l2_subdev(me);
}
-EXPORT_SYMBOL_GPL(imx_media_find_upstream_pad);
+EXPORT_SYMBOL_GPL(imx_media_pipeline_subdev);
/*
* Find a subdev reached upstream from the given start entity in
* the current pipeline.
* Must be called with mdev->graph_mutex held.
*/
-struct v4l2_subdev *
-imx_media_find_upstream_subdev(struct imx_media_dev *imxmd,
- struct media_entity *start_entity,
- u32 grp_id)
+struct video_device *
+imx_media_pipeline_video_device(struct media_entity *start_entity,
+ enum v4l2_buf_type buftype, bool upstream)
{
- struct v4l2_subdev *sd;
+ struct media_entity *me;
- sd = find_upstream_subdev(imxmd, start_entity, grp_id);
- if (!sd)
+ me = find_pipeline_entity(start_entity, 0, buftype, upstream);
+ if (!me)
return ERR_PTR(-ENODEV);
- return sd;
+ return media_entity_to_video_device(me);
}
-EXPORT_SYMBOL_GPL(imx_media_find_upstream_subdev);
+EXPORT_SYMBOL_GPL(imx_media_pipeline_video_device);
/*
* Turn current pipeline streaming on/off starting from entity.
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
index 4487374c9435..4d90eecb04a2 100644
--- a/drivers/staging/media/imx/imx-media-vdic.c
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -4,13 +4,6 @@
*
* Copyright (c) 2017 Mentor Graphics Inc.
*/
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -65,12 +58,11 @@ struct vdic_pipeline_ops {
#define S_ALIGN 1 /* multiple of 2 */
struct vdic_priv {
- struct device *dev;
- struct ipu_soc *ipu;
- struct imx_media_dev *md;
+ struct device *ipu_dev;
+ struct ipu_soc *ipu;
+
struct v4l2_subdev sd;
struct media_pad pad[VDIC_NUM_PADS];
- int ipu_id;
/* lock to protect all members below */
struct mutex lock;
@@ -145,8 +137,6 @@ static int vdic_get_ipu_resources(struct vdic_priv *priv)
struct ipuv3_channel *ch;
struct ipu_vdi *vdi;
- priv->ipu = priv->md->ipu[priv->ipu_id];
-
vdi = ipu_vdi_get(priv->ipu);
if (IS_ERR(vdi)) {
v4l2_err(&priv->sd, "failed to get VDIC\n");
@@ -511,7 +501,8 @@ static int vdic_s_stream(struct v4l2_subdev *sd, int enable)
if (priv->stream_count != !enable)
goto update_count;
- dev_dbg(priv->dev, "stream %s\n", enable ? "ON" : "OFF");
+ dev_dbg(priv->ipu_dev, "%s: stream %s\n", sd->name,
+ enable ? "ON" : "OFF");
if (enable)
ret = vdic_start(priv);
@@ -686,8 +677,8 @@ static int vdic_link_setup(struct media_entity *entity,
struct v4l2_subdev *remote_sd;
int ret = 0;
- dev_dbg(priv->dev, "link setup %s -> %s", remote->entity->name,
- local->entity->name);
+ dev_dbg(priv->ipu_dev, "%s: link setup %s -> %s",
+ sd->name, remote->entity->name, local->entity->name);
mutex_lock(&priv->lock);
@@ -860,9 +851,6 @@ static int vdic_registered(struct v4l2_subdev *sd)
int i, ret;
u32 code;
- /* get media device */
- priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
-
for (i = 0; i < VDIC_NUM_PADS; i++) {
priv->pad[i].flags = (i == VDIC_SRC_PAD_DIRECT) ?
MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
@@ -934,77 +922,53 @@ static const struct v4l2_subdev_internal_ops vdic_internal_ops = {
.unregistered = vdic_unregistered,
};
-static int imx_vdic_probe(struct platform_device *pdev)
+struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id)
{
- struct imx_media_ipu_internal_sd_pdata *pdata;
struct vdic_priv *priv;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- platform_set_drvdata(pdev, &priv->sd);
- priv->dev = &pdev->dev;
-
- pdata = priv->dev->platform_data;
- priv->ipu_id = pdata->ipu_id;
+ priv->ipu_dev = ipu_dev;
+ priv->ipu = ipu;
v4l2_subdev_init(&priv->sd, &vdic_subdev_ops);
v4l2_set_subdevdata(&priv->sd, priv);
priv->sd.internal_ops = &vdic_internal_ops;
priv->sd.entity.ops = &vdic_entity_ops;
priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
- priv->sd.dev = &pdev->dev;
- priv->sd.owner = THIS_MODULE;
+ priv->sd.owner = ipu_dev->driver->owner;
priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
- /* get our group id */
- priv->sd.grp_id = pdata->grp_id;
- strscpy(priv->sd.name, pdata->sd_name, sizeof(priv->sd.name));
+ priv->sd.grp_id = grp_id;
+ imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
+ priv->sd.grp_id, ipu_get_num(ipu));
mutex_init(&priv->lock);
- ret = v4l2_async_register_subdev(&priv->sd);
+ ret = v4l2_device_register_subdev(v4l2_dev, &priv->sd);
if (ret)
goto free;
- return 0;
+ return &priv->sd;
free:
mutex_destroy(&priv->lock);
- return ret;
+ return ERR_PTR(ret);
}
-static int imx_vdic_remove(struct platform_device *pdev)
+int imx_media_vdic_unregister(struct v4l2_subdev *sd)
{
- struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
v4l2_info(sd, "Removing\n");
- v4l2_async_unregister_subdev(sd);
+ v4l2_device_unregister_subdev(sd);
mutex_destroy(&priv->lock);
media_entity_cleanup(&sd->entity);
return 0;
}
-
-static const struct platform_device_id imx_vdic_ids[] = {
- { .name = "imx-ipuv3-vdic" },
- { },
-};
-MODULE_DEVICE_TABLE(platform, imx_vdic_ids);
-
-static struct platform_driver imx_vdic_driver = {
- .probe = imx_vdic_probe,
- .remove = imx_vdic_remove,
- .id_table = imx_vdic_ids,
- .driver = {
- .name = "imx-ipuv3-vdic",
- },
-};
-module_platform_driver(imx_vdic_driver);
-
-MODULE_DESCRIPTION("i.MX VDIC subdev driver");
-MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-ipuv3-vdic");
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index 6587aa49e005..8a60bdafe2da 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -16,6 +16,19 @@
#include <video/imx-ipu-v3.h>
/*
+ * Enumeration of the IPU internal sub-devices
+ */
+enum {
+ IPU_CSI0 = 0,
+ IPU_CSI1,
+ IPU_VDIC,
+ IPU_IC_PRP,
+ IPU_IC_PRPENC,
+ IPU_IC_PRPVF,
+ NUM_IPU_SUBDEVS,
+};
+
+/*
* Pad definitions for the subdevs with multiple source or
* sink pads
*/
@@ -111,25 +124,6 @@ struct imx_media_pad_vdev {
struct list_head list;
};
-struct imx_media_ipu_internal_sd_pdata {
- char sd_name[V4L2_SUBDEV_NAME_SIZE];
- u32 grp_id;
- int ipu_id;
-};
-
-struct imx_media_async_subdev {
- /* the base asd - must be first in this struct */
- struct v4l2_async_subdev asd;
- /* the platform device of IPU-internal subdevs */
- struct platform_device *pdev;
-};
-
-static inline struct imx_media_async_subdev *
-to_imx_media_asd(struct v4l2_async_subdev *asd)
-{
- return container_of(asd, struct imx_media_async_subdev, asd);
-}
-
struct imx_media_dev {
struct media_device md;
struct v4l2_device v4l2_dev;
@@ -142,11 +136,11 @@ struct imx_media_dev {
/* master video device list */
struct list_head vdev_list;
- /* IPUs this media driver control, valid after subdevs bound */
- struct ipu_soc *ipu[2];
-
/* for async subdev registration */
struct v4l2_async_notifier notifier;
+
+ /* the IPU internal subdev's registered synchronously */
+ struct v4l2_subdev *sync_sd[2][NUM_IPU_SUBDEVS];
};
enum codespace_sel {
@@ -176,8 +170,7 @@ void imx_media_fill_default_mbus_fields(struct v4l2_mbus_framefmt *tryfmt,
struct v4l2_mbus_framefmt *fmt,
bool ic_route);
int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
- struct v4l2_rect *compose,
- const struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_mbus_framefmt *mbus,
const struct imx_media_pixfmt *cc);
int imx_media_mbus_fmt_to_ipu_image(struct ipu_image *image,
struct v4l2_mbus_framefmt *mbus);
@@ -191,18 +184,18 @@ imx_media_find_subdev_by_fwnode(struct imx_media_dev *imxmd,
struct v4l2_subdev *
imx_media_find_subdev_by_devname(struct imx_media_dev *imxmd,
const char *devname);
-int imx_media_add_video_device(struct imx_media_dev *imxmd,
- struct imx_media_video_dev *vdev);
-int imx_media_find_mipi_csi2_channel(struct imx_media_dev *imxmd,
- struct media_entity *start_entity);
+void imx_media_add_video_device(struct imx_media_dev *imxmd,
+ struct imx_media_video_dev *vdev);
+int imx_media_pipeline_csi2_channel(struct media_entity *start_entity);
struct media_pad *
-imx_media_find_upstream_pad(struct imx_media_dev *imxmd,
- struct media_entity *start_entity,
- u32 grp_id);
+imx_media_pipeline_pad(struct media_entity *start_entity, u32 grp_id,
+ enum v4l2_buf_type buftype, bool upstream);
struct v4l2_subdev *
-imx_media_find_upstream_subdev(struct imx_media_dev *imxmd,
- struct media_entity *start_entity,
- u32 grp_id);
+imx_media_pipeline_subdev(struct media_entity *start_entity, u32 grp_id,
+ bool upstream);
+struct video_device *
+imx_media_pipeline_video_device(struct media_entity *start_entity,
+ enum v4l2_buf_type buftype, bool upstream);
struct imx_media_dma_buf {
void *virt;
@@ -210,9 +203,9 @@ struct imx_media_dma_buf {
unsigned long len;
};
-void imx_media_free_dma_buf(struct imx_media_dev *imxmd,
+void imx_media_free_dma_buf(struct device *dev,
struct imx_media_dma_buf *buf);
-int imx_media_alloc_dma_buf(struct imx_media_dev *imxmd,
+int imx_media_alloc_dma_buf(struct device *dev,
struct imx_media_dma_buf *buf,
int size);
@@ -220,22 +213,12 @@ int imx_media_pipeline_set_stream(struct imx_media_dev *imxmd,
struct media_entity *entity,
bool on);
-/* imx-media-dev.c */
-int imx_media_add_async_subdev(struct imx_media_dev *imxmd,
- struct fwnode_handle *fwnode,
- struct platform_device *pdev);
-
-int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd);
-int imx_media_link_notify(struct media_link *link, u32 flags,
- unsigned int notification);
-void imx_media_notify(struct v4l2_subdev *sd, unsigned int notification,
- void *arg);
+/* imx-media-dev-common.c */
int imx_media_probe_complete(struct v4l2_async_notifier *notifier);
-
-struct imx_media_dev *imx_media_dev_init(struct device *dev);
-int imx_media_dev_notifier_register(struct imx_media_dev *imxmd);
+struct imx_media_dev *imx_media_dev_init(struct device *dev,
+ const struct media_device_ops *ops);
+int imx_media_dev_notifier_register(struct imx_media_dev *imxmd,
+ const struct v4l2_async_notifier_operations *ops);
/* imx-media-fim.c */
struct imx_media_fim;
@@ -248,11 +231,9 @@ struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd);
void imx_media_fim_free(struct imx_media_fim *fim);
/* imx-media-internal-sd.c */
-int imx_media_add_ipu_internal_subdevs(struct imx_media_dev *imxmd,
- int ipu_id);
-int imx_media_create_ipu_internal_links(struct imx_media_dev *imxmd,
- struct v4l2_subdev *sd);
-void imx_media_remove_ipu_internal_subdevs(struct imx_media_dev *imxmd);
+int imx_media_register_ipu_internal_subdevs(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *csi);
+void imx_media_unregister_ipu_internal_subdevs(struct imx_media_dev *imxmd);
/* imx-media-of.c */
int imx_media_add_of_subdevs(struct imx_media_dev *dev,
@@ -264,18 +245,29 @@ int imx_media_create_csi_of_links(struct imx_media_dev *imxmd,
int imx_media_of_add_csi(struct imx_media_dev *imxmd,
struct device_node *csi_np);
+/* imx-media-vdic.c */
+struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id);
+int imx_media_vdic_unregister(struct v4l2_subdev *sd);
+
+/* imx-ic-common.c */
+struct v4l2_subdev *imx_media_ic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id);
+int imx_media_ic_unregister(struct v4l2_subdev *sd);
+
/* imx-media-capture.c */
struct imx_media_video_dev *
-imx_media_capture_device_init(struct v4l2_subdev *src_sd, int pad);
+imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
+ int pad);
void imx_media_capture_device_remove(struct imx_media_video_dev *vdev);
-int imx_media_capture_device_register(struct imx_media_dev *md,
- struct imx_media_video_dev *vdev);
+int imx_media_capture_device_register(struct imx_media_video_dev *vdev);
void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev);
struct imx_media_buffer *
imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev);
-void imx_media_capture_device_set_format(struct imx_media_video_dev *vdev,
- const struct v4l2_pix_format *pix,
- const struct v4l2_rect *compose);
void imx_media_capture_device_error(struct imx_media_video_dev *vdev);
/* subdev group ids */
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index a708a0340eb1..f775870df7e0 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -152,8 +152,6 @@
#define CSI_CSICR18 0x48
#define CSI_CSICR19 0x4c
-static const char * const imx7_csi_clk_id[] = {"axi", "dcic", "mclk"};
-
struct imx7_csi {
struct device *dev;
struct v4l2_subdev sd;
@@ -180,9 +178,7 @@ struct imx7_csi {
void __iomem *regbase;
int irq;
-
- int num_clks;
- struct clk_bulk_data *clks;
+ struct clk *mclk;
/* active vb2 buffers to send to video dev sink */
struct imx_media_buffer *active_vb2_buf[2];
@@ -199,23 +195,15 @@ struct imx7_csi {
struct completion last_eof_completion;
};
-#define imx7_csi_reg_read(_csi, _offset) \
- __raw_readl((_csi)->regbase + (_offset))
-#define imx7_csi_reg_write(_csi, _val, _offset) \
- __raw_writel(_val, (_csi)->regbase + (_offset))
-
-static void imx7_csi_clk_enable(struct imx7_csi *csi)
+static u32 imx7_csi_reg_read(struct imx7_csi *csi, unsigned int offset)
{
- int ret;
-
- ret = clk_bulk_prepare_enable(csi->num_clks, csi->clks);
- if (ret < 0)
- dev_err(csi->dev, "failed to enable clocks\n");
+ return readl(csi->regbase + offset);
}
-static void imx7_csi_clk_disable(struct imx7_csi *csi)
+static void imx7_csi_reg_write(struct imx7_csi *csi, unsigned int value,
+ unsigned int offset)
{
- clk_bulk_disable_unprepare(csi->num_clks, csi->clks);
+ writel(value, csi->regbase + offset);
}
static void imx7_csi_hw_reset(struct imx7_csi *csi)
@@ -229,9 +217,9 @@ static void imx7_csi_hw_reset(struct imx7_csi *csi)
imx7_csi_reg_write(csi, CSICR3_RESET_VAL, CSI_CSICR3);
}
-static unsigned long imx7_csi_irq_clear(struct imx7_csi *csi)
+static u32 imx7_csi_irq_clear(struct imx7_csi *csi)
{
- unsigned long isr;
+ u32 isr;
isr = imx7_csi_reg_read(csi, CSI_CSISR);
imx7_csi_reg_write(csi, isr, CSI_CSISR);
@@ -257,7 +245,7 @@ static void imx7_csi_init_interface(struct imx7_csi *csi)
static void imx7_csi_hw_enable_irq(struct imx7_csi *csi)
{
- unsigned long cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
+ u32 cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
cr1 |= BIT_SOF_INTEN;
cr1 |= BIT_RFF_OR_INT;
@@ -273,7 +261,7 @@ static void imx7_csi_hw_enable_irq(struct imx7_csi *csi)
static void imx7_csi_hw_disable_irq(struct imx7_csi *csi)
{
- unsigned long cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
+ u32 cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
cr1 &= ~BIT_SOF_INTEN;
cr1 &= ~BIT_RFF_OR_INT;
@@ -286,7 +274,7 @@ static void imx7_csi_hw_disable_irq(struct imx7_csi *csi)
static void imx7_csi_hw_enable(struct imx7_csi *csi)
{
- unsigned long cr = imx7_csi_reg_read(csi, CSI_CSICR18);
+ u32 cr = imx7_csi_reg_read(csi, CSI_CSICR18);
cr |= BIT_CSI_HW_ENABLE;
@@ -295,7 +283,7 @@ static void imx7_csi_hw_enable(struct imx7_csi *csi)
static void imx7_csi_hw_disable(struct imx7_csi *csi)
{
- unsigned long cr = imx7_csi_reg_read(csi, CSI_CSICR18);
+ u32 cr = imx7_csi_reg_read(csi, CSI_CSICR18);
cr &= ~BIT_CSI_HW_ENABLE;
@@ -304,7 +292,7 @@ static void imx7_csi_hw_disable(struct imx7_csi *csi)
static void imx7_csi_dma_reflash(struct imx7_csi *csi)
{
- unsigned long cr3 = imx7_csi_reg_read(csi, CSI_CSICR18);
+ u32 cr3 = imx7_csi_reg_read(csi, CSI_CSICR18);
cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
cr3 |= BIT_DMA_REFLASH_RFF;
@@ -313,7 +301,7 @@ static void imx7_csi_dma_reflash(struct imx7_csi *csi)
static void imx7_csi_rx_fifo_clear(struct imx7_csi *csi)
{
- unsigned long cr1;
+ u32 cr1;
cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
imx7_csi_reg_write(csi, cr1 & ~BIT_FCC, CSI_CSICR1);
@@ -331,7 +319,7 @@ static void imx7_csi_buf_stride_set(struct imx7_csi *csi, u32 stride)
static void imx7_csi_deinterlace_enable(struct imx7_csi *csi, bool enable)
{
- unsigned long cr18 = imx7_csi_reg_read(csi, CSI_CSICR18);
+ u32 cr18 = imx7_csi_reg_read(csi, CSI_CSICR18);
if (enable)
cr18 |= BIT_DEINTERLACE_EN;
@@ -343,8 +331,8 @@ static void imx7_csi_deinterlace_enable(struct imx7_csi *csi, bool enable)
static void imx7_csi_dmareq_rff_enable(struct imx7_csi *csi)
{
- unsigned long cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
- unsigned long cr2 = imx7_csi_reg_read(csi, CSI_CSICR2);
+ u32 cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
+ u32 cr2 = imx7_csi_reg_read(csi, CSI_CSICR2);
/* Burst Type of DMA Transfer from RxFIFO. INCR16 */
cr2 |= 0xC0000000;
@@ -360,7 +348,7 @@ static void imx7_csi_dmareq_rff_enable(struct imx7_csi *csi)
static void imx7_csi_dmareq_rff_disable(struct imx7_csi *csi)
{
- unsigned long cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
+ u32 cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
cr3 &= ~BIT_DMA_REQ_EN_RFF;
cr3 &= ~BIT_HRESP_ERR_EN;
@@ -408,17 +396,23 @@ static void imx7_csi_error_recovery(struct imx7_csi *csi)
imx7_csi_hw_enable(csi);
}
-static void imx7_csi_init(struct imx7_csi *csi)
+static int imx7_csi_init(struct imx7_csi *csi)
{
+ int ret;
+
if (csi->is_init)
- return;
+ return 0;
- imx7_csi_clk_enable(csi);
+ ret = clk_prepare_enable(csi->mclk);
+ if (ret < 0)
+ return ret;
imx7_csi_hw_reset(csi);
imx7_csi_init_interface(csi);
imx7_csi_dmareq_rff_enable(csi);
csi->is_init = true;
+
+ return 0;
}
static void imx7_csi_deinit(struct imx7_csi *csi)
@@ -429,7 +423,7 @@ static void imx7_csi_deinit(struct imx7_csi *csi)
imx7_csi_hw_reset(csi);
imx7_csi_init_interface(csi);
imx7_csi_dmareq_rff_disable(csi);
- imx7_csi_clk_disable(csi);
+ clk_disable_unprepare(csi->mclk);
csi->is_init = false;
}
@@ -448,11 +442,19 @@ static int imx7_csi_get_upstream_endpoint(struct imx7_csi *csi,
src = &csi->src_sd->entity;
+ /*
+ * if the source is neither a mux or csi2 get the one directly upstream
+ * from this csi
+ */
+ if (src->function != MEDIA_ENT_F_VID_IF_BRIDGE &&
+ src->function != MEDIA_ENT_F_VID_MUX)
+ src = &csi->sd.entity;
+
skip_video_mux:
/* get source pad of entity directly upstream from src */
- pad = imx_media_find_upstream_pad(csi->imxmd, src, 0);
- if (IS_ERR(pad))
- return PTR_ERR(pad);
+ pad = imx_media_pipeline_pad(src, 0, 0, true);
+ if (!pad)
+ return -ENODEV;
sd = media_entity_to_v4l2_subdev(pad->entity);
@@ -531,7 +533,7 @@ static int imx7_csi_link_setup(struct media_entity *entity,
init:
if (csi->sink || csi->src_sd)
- imx7_csi_init(csi);
+ ret = imx7_csi_init(csi);
else
imx7_csi_deinit(csi);
@@ -653,7 +655,7 @@ static void imx7_csi_vb2_buf_done(struct imx7_csi *csi)
static irqreturn_t imx7_csi_irq_handler(int irq, void *data)
{
struct imx7_csi *csi = data;
- unsigned long status;
+ u32 status;
spin_lock(&csi->irqlock);
@@ -714,7 +716,7 @@ static int imx7_csi_dma_start(struct imx7_csi *csi)
struct v4l2_pix_format *out_pix = &vdev->fmt.fmt.pix;
int ret;
- ret = imx_media_alloc_dma_buf(csi->imxmd, &csi->underrun_buf,
+ ret = imx_media_alloc_dma_buf(csi->dev, &csi->underrun_buf,
out_pix->sizeimage);
if (ret < 0) {
v4l2_warn(&csi->sd, "consider increasing the CMA area\n");
@@ -754,7 +756,7 @@ static void imx7_csi_dma_stop(struct imx7_csi *csi)
imx7_csi_dma_unsetup_vb2_buf(csi, VB2_BUF_STATE_ERROR);
- imx_media_free_dma_buf(csi->imxmd, &csi->underrun_buf);
+ imx_media_free_dma_buf(csi->dev, &csi->underrun_buf);
}
static int imx7_csi_configure(struct imx7_csi *csi)
@@ -811,7 +813,7 @@ static int imx7_csi_configure(struct imx7_csi *csi)
return 0;
}
-static int imx7_csi_enable(struct imx7_csi *csi)
+static void imx7_csi_enable(struct imx7_csi *csi)
{
imx7_csi_sw_reset(csi);
@@ -819,10 +821,7 @@ static int imx7_csi_enable(struct imx7_csi *csi)
imx7_csi_dmareq_rff_enable(csi);
imx7_csi_hw_enable_irq(csi);
imx7_csi_hw_enable(csi);
- return 0;
}
-
- return 0;
}
static void imx7_csi_disable(struct imx7_csi *csi)
@@ -1021,7 +1020,6 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi,
break;
default:
return -EINVAL;
- break;
}
return 0;
}
@@ -1031,11 +1029,8 @@ static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *sdformat)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
- struct imx_media_video_dev *vdev = csi->vdev;
const struct imx_media_pixfmt *outcc;
struct v4l2_mbus_framefmt *outfmt;
- struct v4l2_pix_format vdev_fmt;
- struct v4l2_rect vdev_compose;
const struct imx_media_pixfmt *cc;
struct v4l2_mbus_framefmt *fmt;
struct v4l2_subdev_format format;
@@ -1080,19 +1075,8 @@ static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
csi->cc[IMX7_CSI_PAD_SRC] = outcc;
}
- if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY)
- goto out_unlock;
-
- csi->cc[sdformat->pad] = cc;
-
- /* propagate output pad format to capture device */
- imx_media_mbus_fmt_to_pix_fmt(&vdev_fmt, &vdev_compose,
- &csi->format_mbus[IMX7_CSI_PAD_SRC],
- csi->cc[IMX7_CSI_PAD_SRC]);
- mutex_unlock(&csi->lock);
- imx_media_capture_device_set_format(vdev, &vdev_fmt, &vdev_compose);
-
- return 0;
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ csi->cc[sdformat->pad] = cc;
out_unlock:
mutex_unlock(&csi->lock);
@@ -1126,17 +1110,7 @@ static int imx7_csi_registered(struct v4l2_subdev *sd)
if (ret < 0)
return ret;
- ret = imx_media_capture_device_register(csi->imxmd, csi->vdev);
- if (ret < 0)
- return ret;
-
- ret = imx_media_add_video_device(csi->imxmd, csi->vdev);
- if (ret < 0) {
- imx_media_capture_device_unregister(csi->vdev);
- return ret;
- }
-
- return 0;
+ return imx_media_capture_device_register(csi->vdev);
}
static void imx7_csi_unregistered(struct v4l2_subdev *sd)
@@ -1200,31 +1174,12 @@ static int imx7_csi_parse_endpoint(struct device *dev,
return fwnode_device_is_available(asd->match.fwnode) ? 0 : -EINVAL;
}
-static int imx7_csi_clocks_get(struct imx7_csi *csi)
-{
- struct device *dev = csi->dev;
- int i;
-
- csi->num_clks = ARRAY_SIZE(imx7_csi_clk_id);
- csi->clks = devm_kcalloc(dev, csi->num_clks, sizeof(*csi->clks),
- GFP_KERNEL);
-
- if (!csi->clks)
- return -ENOMEM;
-
- for (i = 0; i < csi->num_clks; i++)
- csi->clks[i].id = imx7_csi_clk_id[i];
-
- return devm_clk_bulk_get(dev, csi->num_clks, csi->clks);
-}
-
static int imx7_csi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct imx_media_dev *imxmd;
struct imx7_csi *csi;
- struct resource *res;
int ret;
csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
@@ -1233,24 +1188,22 @@ static int imx7_csi_probe(struct platform_device *pdev)
csi->dev = dev;
- ret = imx7_csi_clocks_get(csi);
- if (ret < 0) {
- dev_err(dev, "Failed to get clocks");
- return -ENODEV;
+ csi->mclk = devm_clk_get(&pdev->dev, "mclk");
+ if (IS_ERR(csi->mclk)) {
+ ret = PTR_ERR(csi->mclk);
+ dev_err(dev, "Failed to get mclk: %d", ret);
+ return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
csi->irq = platform_get_irq(pdev, 0);
- if (!res || csi->irq < 0) {
+ if (csi->irq < 0) {
dev_err(dev, "Missing platform resources data\n");
- return -ENODEV;
+ return csi->irq;
}
- csi->regbase = devm_ioremap_resource(dev, res);
- if (IS_ERR(csi->regbase)) {
- dev_err(dev, "Failed platform resources map\n");
- return -ENODEV;
- }
+ csi->regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(csi->regbase))
+ return PTR_ERR(csi->regbase);
spin_lock_init(&csi->irqlock);
mutex_init(&csi->lock);
@@ -1260,12 +1213,11 @@ static int imx7_csi_probe(struct platform_device *pdev)
(void *)csi);
if (ret < 0) {
dev_err(dev, "Request CSI IRQ failed.\n");
- ret = -ENODEV;
goto destroy_mutex;
}
/* add media device */
- imxmd = imx_media_dev_init(dev);
+ imxmd = imx_media_dev_init(dev, NULL);
if (IS_ERR(imxmd)) {
ret = PTR_ERR(imxmd);
goto destroy_mutex;
@@ -1276,7 +1228,7 @@ static int imx7_csi_probe(struct platform_device *pdev)
if (ret < 0 && ret != -ENODEV && ret != -EEXIST)
goto cleanup;
- ret = imx_media_dev_notifier_register(imxmd);
+ ret = imx_media_dev_notifier_register(imxmd, NULL);
if (ret < 0)
goto cleanup;
@@ -1292,7 +1244,8 @@ static int imx7_csi_probe(struct platform_device *pdev)
csi->sd.grp_id = IMX_MEDIA_GRP_ID_CSI;
snprintf(csi->sd.name, sizeof(csi->sd.name), "csi");
- csi->vdev = imx_media_capture_device_init(&csi->sd, IMX7_CSI_PAD_SRC);
+ csi->vdev = imx_media_capture_device_init(csi->sd.dev, &csi->sd,
+ IMX7_CSI_PAD_SRC);
if (IS_ERR(csi->vdev))
return PTR_ERR(csi->vdev);
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index 19455f425416..d1cdf011c8f1 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -456,13 +456,9 @@ static void mipi_csis_set_params(struct csi_state *state)
MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW_CTRL);
}
-static void mipi_csis_clk_enable(struct csi_state *state)
+static int mipi_csis_clk_enable(struct csi_state *state)
{
- int ret;
-
- ret = clk_bulk_prepare_enable(state->num_clks, state->clks);
- if (ret < 0)
- dev_err(state->dev, "failed to enable clocks\n");
+ return clk_bulk_prepare_enable(state->num_clks, state->clks);
}
static void mipi_csis_clk_disable(struct csi_state *state)
@@ -784,6 +780,17 @@ static irqreturn_t mipi_csis_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int mipi_csis_registered(struct v4l2_subdev *mipi_sd)
+{
+ struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+
+ state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ return media_entity_pads_init(&state->mipi_sd.entity, CSIS_PADS_NUM,
+ state->pads);
+}
+
static const struct v4l2_subdev_core_ops mipi_csis_core_ops = {
.log_status = mipi_csis_log_status,
};
@@ -809,6 +816,10 @@ static const struct v4l2_subdev_ops mipi_csis_subdev_ops = {
.pad = &mipi_csis_pad_ops,
};
+static const struct v4l2_subdev_internal_ops mipi_csis_internal_ops = {
+ .registered = mipi_csis_registered,
+};
+
static int mipi_csis_parse_dt(struct platform_device *pdev,
struct csi_state *state)
{
@@ -869,6 +880,7 @@ static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
mipi_sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
mipi_sd->entity.ops = &mipi_csis_entity_ops;
+ mipi_sd->internal_ops = &mipi_csis_internal_ops;
mipi_sd->dev = &pdev->dev;
@@ -890,7 +902,6 @@ static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
return ret;
}
-
static int mipi_csis_dump_regs_show(struct seq_file *m, void *private)
{
struct csi_state *state = m->private;
@@ -938,7 +949,7 @@ static int mipi_csis_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *mem_res;
struct csi_state *state;
- int ret = -ENOMEM;
+ int ret;
state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
if (!state)
@@ -973,7 +984,11 @@ static int mipi_csis_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- mipi_csis_clk_enable(state);
+ ret = mipi_csis_clk_enable(state);
+ if (ret < 0) {
+ dev_err(state->dev, "failed to enable clocks: %d\n", ret);
+ return ret;
+ }
ret = devm_request_irq(dev, state->irq, mipi_csis_irq_handler,
0, dev_name(dev), state);
@@ -990,13 +1005,6 @@ static int mipi_csis_probe(struct platform_device *pdev)
if (ret < 0)
goto disable_clock;
- state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
- ret = media_entity_pads_init(&state->mipi_sd.entity, CSIS_PADS_NUM,
- state->pads);
- if (ret < 0)
- goto unregister_subdev;
-
memcpy(state->events, mipi_csis_events, sizeof(state->events));
mipi_csis_debugfs_init(state);
@@ -1016,7 +1024,6 @@ static int mipi_csis_probe(struct platform_device *pdev)
unregister_all:
mipi_csis_debugfs_exit(state);
media_entity_cleanup(&state->mipi_sd.entity);
-unregister_subdev:
v4l2_async_unregister_subdev(&state->mipi_sd);
disable_clock:
mipi_csis_clk_disable(state);
diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
index 1e7184e4311d..c7cd27efac8a 100644
--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
@@ -2472,7 +2472,7 @@ struct ipu3_uapi_acc_param {
struct ipu3_uapi_yuvp1_yds_config yds2 __attribute__((aligned(32)));
struct ipu3_uapi_yuvp2_tcc_static_config tcc __attribute__((aligned(32)));
struct ipu3_uapi_anr_config anr;
- struct ipu3_uapi_awb_fr_config_s awb_fr;
+ struct ipu3_uapi_awb_fr_config_s awb_fr __attribute__((aligned(32)));
struct ipu3_uapi_ae_config ae;
struct ipu3_uapi_af_config_s af;
struct ipu3_uapi_awb_config awb;
diff --git a/drivers/staging/media/ipu3/ipu3-css-fw.c b/drivers/staging/media/ipu3/ipu3-css-fw.c
index 4122d4e42db6..45aff76198e2 100644
--- a/drivers/staging/media/ipu3/ipu3-css-fw.c
+++ b/drivers/staging/media/ipu3/ipu3-css-fw.c
@@ -200,13 +200,11 @@ int imgu_css_fw_init(struct imgu_css *css)
goto bad_fw;
for (j = 0; j < bi->info.isp.num_output_formats; j++)
- if (bi->info.isp.output_formats[j] < 0 ||
- bi->info.isp.output_formats[j] >=
+ if (bi->info.isp.output_formats[j] >=
IMGU_ABI_FRAME_FORMAT_NUM)
goto bad_fw;
for (j = 0; j < bi->info.isp.num_vf_formats; j++)
- if (bi->info.isp.vf_formats[j] < 0 ||
- bi->info.isp.vf_formats[j] >=
+ if (bi->info.isp.vf_formats[j] >=
IMGU_ABI_FRAME_FORMAT_NUM)
goto bad_fw;
diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c
index 23cf5b2cfe8b..fd1ed84c400c 100644
--- a/drivers/staging/media/ipu3/ipu3-css.c
+++ b/drivers/staging/media/ipu3/ipu3-css.c
@@ -24,9 +24,8 @@
#define IPU3_CSS_MAX_H 3136
#define IPU3_CSS_MAX_W 4224
-/* filter size from graph settings is fixed as 4 */
-#define FILTER_SIZE 4
-#define MIN_ENVELOPE 8
+/* minimal envelope size(GDC in - out) should be 4 */
+#define MIN_ENVELOPE 4
/*
* pre-allocated buffer size for CSS ABI, auxiliary frames
@@ -1827,9 +1826,9 @@ int imgu_css_fmt_try(struct imgu_css *css,
vf->width = imgu_css_adjust(vf->width, VF_ALIGN_W);
vf->height = imgu_css_adjust(vf->height, 1);
- s = (bds->width - gdc->width) / 2 - FILTER_SIZE;
+ s = (bds->width - gdc->width) / 2;
env->width = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
- s = (bds->height - gdc->height) / 2 - FILTER_SIZE;
+ s = (bds->height - gdc->height) / 2;
env->height = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
ret = imgu_css_find_binary(css, pipe, q, r);
@@ -2251,9 +2250,8 @@ int imgu_css_set_parameters(struct imgu_css *css, unsigned int pipe,
css_pipe->aux_frames[a].height,
css_pipe->rect[g].width,
css_pipe->rect[g].height,
- css_pipe->rect[e].width + FILTER_SIZE,
- css_pipe->rect[e].height +
- FILTER_SIZE);
+ css_pipe->rect[e].width,
+ css_pipe->rect[e].height);
}
}
diff --git a/drivers/staging/media/ipu3/ipu3-dmamap.c b/drivers/staging/media/ipu3/ipu3-dmamap.c
index d978a00e1e0b..7431322379f6 100644
--- a/drivers/staging/media/ipu3/ipu3-dmamap.c
+++ b/drivers/staging/media/ipu3/ipu3-dmamap.c
@@ -31,12 +31,11 @@ static void imgu_dmamap_free_buffer(struct page **pages,
* Based on the implementation of __iommu_dma_alloc_pages()
* defined in drivers/iommu/dma-iommu.c
*/
-static struct page **imgu_dmamap_alloc_buffer(size_t size,
- unsigned long order_mask,
- gfp_t gfp)
+static struct page **imgu_dmamap_alloc_buffer(size_t size, gfp_t gfp)
{
struct page **pages;
unsigned int i = 0, count = size >> PAGE_SHIFT;
+ unsigned int order_mask = 1;
const gfp_t high_order_gfp = __GFP_NOWARN | __GFP_NORETRY;
/* Allocate mem for array of page ptrs */
@@ -45,10 +44,6 @@ static struct page **imgu_dmamap_alloc_buffer(size_t size,
if (!pages)
return NULL;
- order_mask &= (2U << MAX_ORDER) - 1;
- if (!order_mask)
- return NULL;
-
gfp |= __GFP_HIGHMEM | __GFP_ZERO;
while (count) {
@@ -99,7 +94,6 @@ void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
size_t len)
{
unsigned long shift = iova_shift(&imgu->iova_domain);
- unsigned int alloc_sizes = imgu->mmu->pgsize_bitmap;
struct device *dev = &imgu->pci_dev->dev;
size_t size = PAGE_ALIGN(len);
struct page **pages;
@@ -114,8 +108,7 @@ void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
if (!iova)
return NULL;
- pages = imgu_dmamap_alloc_buffer(size, alloc_sizes >> PAGE_SHIFT,
- GFP_KERNEL);
+ pages = imgu_dmamap_alloc_buffer(size, GFP_KERNEL);
if (!pages)
goto out_free_iova;
@@ -257,7 +250,7 @@ int imgu_dmamap_init(struct imgu_device *imgu)
if (ret)
return ret;
- order = __ffs(imgu->mmu->pgsize_bitmap);
+ order = __ffs(IPU3_PAGE_SIZE);
base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn);
diff --git a/drivers/staging/media/ipu3/ipu3-mmu.c b/drivers/staging/media/ipu3/ipu3-mmu.c
index cfc2bdfb14b3..3d969b0522ab 100644
--- a/drivers/staging/media/ipu3/ipu3-mmu.c
+++ b/drivers/staging/media/ipu3/ipu3-mmu.c
@@ -20,9 +20,6 @@
#include "ipu3-mmu.h"
-#define IPU3_PAGE_SHIFT 12
-#define IPU3_PAGE_SIZE (1UL << IPU3_PAGE_SHIFT)
-
#define IPU3_PT_BITS 10
#define IPU3_PT_PTES (1UL << IPU3_PT_BITS)
#define IPU3_PT_SIZE (IPU3_PT_PTES << 2)
@@ -238,62 +235,31 @@ static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova,
return 0;
}
-/*
- * The following four functions are implemented based on iommu.c
- * drivers/iommu/iommu.c/iommu_pgsize().
+/**
+ * imgu_mmu_map - map a buffer to a physical address
+ *
+ * @info: MMU mappable range
+ * @iova: the virtual address
+ * @paddr: the physical address
+ * @size: length of the mappable area
+ *
+ * The function has been adapted from iommu_map() in
+ * drivers/iommu/iommu.c .
*/
-static size_t imgu_mmu_pgsize(unsigned long pgsize_bitmap,
- unsigned long addr_merge, size_t size)
-{
- unsigned int pgsize_idx;
- size_t pgsize;
-
- /* Max page size that still fits into 'size' */
- pgsize_idx = __fls(size);
-
- /* need to consider alignment requirements ? */
- if (likely(addr_merge)) {
- /* Max page size allowed by address */
- unsigned int align_pgsize_idx = __ffs(addr_merge);
-
- pgsize_idx = min(pgsize_idx, align_pgsize_idx);
- }
-
- /* build a mask of acceptable page sizes */
- pgsize = (1UL << (pgsize_idx + 1)) - 1;
-
- /* throw away page sizes not supported by the hardware */
- pgsize &= pgsize_bitmap;
-
- /* make sure we're still sane */
- WARN_ON(!pgsize);
-
- /* pick the biggest page */
- pgsize_idx = __fls(pgsize);
- pgsize = 1UL << pgsize_idx;
-
- return pgsize;
-}
-
-/* drivers/iommu/iommu.c/iommu_map() */
int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
phys_addr_t paddr, size_t size)
{
struct imgu_mmu *mmu = to_imgu_mmu(info);
- unsigned int min_pagesz;
int ret = 0;
- /* find out the minimum page size supported */
- min_pagesz = 1 << __ffs(mmu->geometry.pgsize_bitmap);
-
/*
* both the virtual address and the physical one, as well as
* the size of the mapping, must be aligned (at least) to the
* size of the smallest page supported by the hardware
*/
- if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
- dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
- iova, &paddr, size, min_pagesz);
+ if (!IS_ALIGNED(iova | paddr | size, IPU3_PAGE_SIZE)) {
+ dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx\n",
+ iova, &paddr, size);
return -EINVAL;
}
@@ -301,19 +267,15 @@ int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
iova, &paddr, size);
while (size) {
- size_t pgsize = imgu_mmu_pgsize(mmu->geometry.pgsize_bitmap,
- iova | paddr, size);
-
- dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
- iova, &paddr, pgsize);
+ dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa\n", iova, &paddr);
ret = __imgu_mmu_map(mmu, iova, paddr);
if (ret)
break;
- iova += pgsize;
- paddr += pgsize;
- size -= pgsize;
+ iova += IPU3_PAGE_SIZE;
+ paddr += IPU3_PAGE_SIZE;
+ size -= IPU3_PAGE_SIZE;
}
call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
@@ -321,28 +283,36 @@ int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
return ret;
}
-/* drivers/iommu/iommu.c/default_iommu_map_sg() */
+/**
+ * imgu_mmu_map_sg - Map a scatterlist
+ *
+ * @info: MMU mappable range
+ * @iova: the virtual address
+ * @sg: the scatterlist to map
+ * @nents: number of entries in the scatterlist
+ *
+ * The function has been adapted from default_iommu_map_sg() in
+ * drivers/iommu/iommu.c .
+ */
size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
struct scatterlist *sg, unsigned int nents)
{
struct imgu_mmu *mmu = to_imgu_mmu(info);
struct scatterlist *s;
size_t s_length, mapped = 0;
- unsigned int i, min_pagesz;
+ unsigned int i;
int ret;
- min_pagesz = 1 << __ffs(mmu->geometry.pgsize_bitmap);
-
for_each_sg(sg, s, nents, i) {
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
s_length = s->length;
- if (!IS_ALIGNED(s->offset, min_pagesz))
+ if (!IS_ALIGNED(s->offset, IPU3_PAGE_SIZE))
goto out_err;
- /* must be min_pagesz aligned to be mapped singlely */
- if (i == nents - 1 && !IS_ALIGNED(s->length, min_pagesz))
+ /* must be IPU3_PAGE_SIZE aligned to be mapped singlely */
+ if (i == nents - 1 && !IS_ALIGNED(s->length, IPU3_PAGE_SIZE))
s_length = PAGE_ALIGN(s->length);
ret = imgu_mmu_map(info, iova + mapped, phys, s_length);
@@ -394,25 +364,30 @@ static size_t __imgu_mmu_unmap(struct imgu_mmu *mmu,
return unmap;
}
-/* drivers/iommu/iommu.c/iommu_unmap() */
+/**
+ * imgu_mmu_unmap - Unmap a buffer
+ *
+ * @info: MMU mappable range
+ * @iova: the virtual address
+ * @size: the length of the buffer
+ *
+ * The function has been adapted from iommu_unmap() in
+ * drivers/iommu/iommu.c .
+ */
size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
size_t size)
{
struct imgu_mmu *mmu = to_imgu_mmu(info);
size_t unmapped_page, unmapped = 0;
- unsigned int min_pagesz;
-
- /* find out the minimum page size supported */
- min_pagesz = 1 << __ffs(mmu->geometry.pgsize_bitmap);
/*
* The virtual address, as well as the size of the mapping, must be
* aligned (at least) to the size of the smallest page supported
* by the hardware
*/
- if (!IS_ALIGNED(iova | size, min_pagesz)) {
- dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
- iova, size, min_pagesz);
+ if (!IS_ALIGNED(iova | size, IPU3_PAGE_SIZE)) {
+ dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx\n",
+ iova, size);
return -EINVAL;
}
@@ -423,10 +398,7 @@ size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
* or we hit an area that isn't mapped.
*/
while (unmapped < size) {
- size_t pgsize = imgu_mmu_pgsize(mmu->geometry.pgsize_bitmap,
- iova, size - unmapped);
-
- unmapped_page = __imgu_mmu_unmap(mmu, iova, pgsize);
+ unmapped_page = __imgu_mmu_unmap(mmu, iova, IPU3_PAGE_SIZE);
if (!unmapped_page)
break;
@@ -444,6 +416,7 @@ size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
/**
* imgu_mmu_init() - initialize IPU3 MMU block
+ *
* @parent: struct device parent
* @base: IOMEM base of hardware registers.
*
@@ -505,7 +478,6 @@ struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base)
mmu->geometry.aperture_start = 0;
mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS);
- mmu->geometry.pgsize_bitmap = IPU3_PAGE_SIZE;
return &mmu->geometry;
@@ -523,7 +495,8 @@ fail_group:
/**
* imgu_mmu_exit() - clean up IPU3 MMU block
- * @info: IPU3 MMU private data
+ *
+ * @info: MMU mappable range
*/
void imgu_mmu_exit(struct imgu_mmu_info *info)
{
diff --git a/drivers/staging/media/ipu3/ipu3-mmu.h b/drivers/staging/media/ipu3/ipu3-mmu.h
index fa58827eb19c..a5f0bca7e7e0 100644
--- a/drivers/staging/media/ipu3/ipu3-mmu.h
+++ b/drivers/staging/media/ipu3/ipu3-mmu.h
@@ -5,17 +5,18 @@
#ifndef __IPU3_MMU_H
#define __IPU3_MMU_H
+#define IPU3_PAGE_SHIFT 12
+#define IPU3_PAGE_SIZE (1UL << IPU3_PAGE_SHIFT)
+
/**
* struct imgu_mmu_info - Describes mmu geometry
*
* @aperture_start: First address that can be mapped
* @aperture_end: Last address that can be mapped
- * @pgsize_bitmap: Bitmap of page sizes in use
*/
struct imgu_mmu_info {
dma_addr_t aperture_start;
dma_addr_t aperture_end;
- unsigned long pgsize_bitmap;
};
struct device;
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index a7bc22040ed8..3c7ad1eed434 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -955,12 +955,12 @@ static const struct v4l2_file_operations imgu_v4l2_fops = {
static const struct v4l2_ioctl_ops imgu_v4l2_ioctl_ops = {
.vidioc_querycap = imgu_vidioc_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap_mplane = imgu_vidioc_g_fmt,
.vidioc_s_fmt_vid_cap_mplane = imgu_vidioc_s_fmt,
.vidioc_try_fmt_vid_cap_mplane = imgu_vidioc_try_fmt,
- .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
.vidioc_g_fmt_vid_out_mplane = imgu_vidioc_g_fmt,
.vidioc_s_fmt_vid_out_mplane = imgu_vidioc_s_fmt,
.vidioc_try_fmt_vid_out_mplane = imgu_vidioc_try_fmt,
diff --git a/drivers/staging/media/meson/vdec/Kconfig b/drivers/staging/media/meson/vdec/Kconfig
new file mode 100644
index 000000000000..9e1450193392
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config VIDEO_MESON_VDEC
+ tristate "Amlogic video decoder driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ depends on ARCH_MESON || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select MESON_CANVAS
+ help
+ Support for the video decoder found in gxbb/gxl/gxm chips.
diff --git a/drivers/staging/media/meson/vdec/Makefile b/drivers/staging/media/meson/vdec/Makefile
new file mode 100644
index 000000000000..6bea129084b7
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for Amlogic meson video decoder driver
+
+meson-vdec-objs = esparser.o vdec.o vdec_helpers.o vdec_platform.o
+meson-vdec-objs += vdec_1.o
+meson-vdec-objs += codec_mpeg12.o
+
+obj-$(CONFIG_VIDEO_MESON_VDEC) += meson-vdec.o
diff --git a/drivers/staging/media/meson/vdec/TODO b/drivers/staging/media/meson/vdec/TODO
new file mode 100644
index 000000000000..70ae990cf13b
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/TODO
@@ -0,0 +1,8 @@
+This driver is in staging until the V4L2 documentation about stateful video
+decoders is finalized, as well as the corresponding compliance tests.
+
+It is at the moment not guaranteed to work properly with a userspace
+stack that follows the latest version of the specification, especially
+with compression standards like MPEG1/2 where the driver does not support
+dynamic resolution switching, including the first one used to determine coded
+resolution.
diff --git a/drivers/staging/media/meson/vdec/codec_mpeg12.c b/drivers/staging/media/meson/vdec/codec_mpeg12.c
new file mode 100644
index 000000000000..48869cc3d973
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_mpeg12.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "codec_mpeg12.h"
+#include "dos_regs.h"
+#include "vdec_helpers.h"
+
+#define SIZE_WORKSPACE SZ_128K
+/* Offset substracted by the firmware from the workspace paddr */
+#define WORKSPACE_OFFSET (5 * SZ_1K)
+
+/* map firmware registers to known MPEG1/2 functions */
+#define MREG_SEQ_INFO AV_SCRATCH_4
+ #define MPEG2_SEQ_DAR_MASK GENMASK(3, 0)
+ #define MPEG2_DAR_4_3 2
+ #define MPEG2_DAR_16_9 3
+ #define MPEG2_DAR_221_100 4
+#define MREG_PIC_INFO AV_SCRATCH_5
+#define MREG_PIC_WIDTH AV_SCRATCH_6
+#define MREG_PIC_HEIGHT AV_SCRATCH_7
+#define MREG_BUFFERIN AV_SCRATCH_8
+#define MREG_BUFFEROUT AV_SCRATCH_9
+#define MREG_CMD AV_SCRATCH_A
+#define MREG_CO_MV_START AV_SCRATCH_B
+#define MREG_ERROR_COUNT AV_SCRATCH_C
+#define MREG_FRAME_OFFSET AV_SCRATCH_D
+#define MREG_WAIT_BUFFER AV_SCRATCH_E
+#define MREG_FATAL_ERROR AV_SCRATCH_F
+
+#define PICINFO_PROG 0x00008000
+#define PICINFO_TOP_FIRST 0x00002000
+
+struct codec_mpeg12 {
+ /* Buffer for the MPEG1/2 Workspace */
+ void *workspace_vaddr;
+ dma_addr_t workspace_paddr;
+};
+
+static const u8 eos_sequence[SZ_1K] = { 0x00, 0x00, 0x01, 0xB7 };
+
+static const u8 *codec_mpeg12_eos_sequence(u32 *len)
+{
+ *len = ARRAY_SIZE(eos_sequence);
+ return eos_sequence;
+}
+
+static int codec_mpeg12_can_recycle(struct amvdec_core *core)
+{
+ return !amvdec_read_dos(core, MREG_BUFFERIN);
+}
+
+static void codec_mpeg12_recycle(struct amvdec_core *core, u32 buf_idx)
+{
+ amvdec_write_dos(core, MREG_BUFFERIN, buf_idx + 1);
+}
+
+static int codec_mpeg12_start(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_mpeg12 *mpeg12;
+ int ret;
+
+ mpeg12 = kzalloc(sizeof(*mpeg12), GFP_KERNEL);
+ if (!mpeg12)
+ return -ENOMEM;
+
+ /* Allocate some memory for the MPEG1/2 decoder's state */
+ mpeg12->workspace_vaddr = dma_alloc_coherent(core->dev, SIZE_WORKSPACE,
+ &mpeg12->workspace_paddr,
+ GFP_KERNEL);
+ if (!mpeg12->workspace_vaddr) {
+ dev_err(core->dev, "Failed to request MPEG 1/2 Workspace\n");
+ ret = -ENOMEM;
+ goto free_mpeg12;
+ }
+
+ ret = amvdec_set_canvases(sess, (u32[]){ AV_SCRATCH_0, 0 },
+ (u32[]){ 8, 0 });
+ if (ret)
+ goto free_workspace;
+
+ amvdec_write_dos(core, POWER_CTL_VLD, BIT(4));
+ amvdec_write_dos(core, MREG_CO_MV_START,
+ mpeg12->workspace_paddr + WORKSPACE_OFFSET);
+
+ amvdec_write_dos(core, MPEG1_2_REG, 0);
+ amvdec_write_dos(core, PSCALE_CTRL, 0);
+ amvdec_write_dos(core, PIC_HEAD_INFO, 0x380);
+ amvdec_write_dos(core, M4_CONTROL_REG, 0);
+ amvdec_write_dos(core, MREG_BUFFERIN, 0);
+ amvdec_write_dos(core, MREG_BUFFEROUT, 0);
+ amvdec_write_dos(core, MREG_CMD, (sess->width << 16) | sess->height);
+ amvdec_write_dos(core, MREG_ERROR_COUNT, 0);
+ amvdec_write_dos(core, MREG_FATAL_ERROR, 0);
+ amvdec_write_dos(core, MREG_WAIT_BUFFER, 0);
+
+ sess->keyframe_found = 1;
+ sess->priv = mpeg12;
+
+ return 0;
+
+free_workspace:
+ dma_free_coherent(core->dev, SIZE_WORKSPACE, mpeg12->workspace_vaddr,
+ mpeg12->workspace_paddr);
+free_mpeg12:
+ kfree(mpeg12);
+
+ return ret;
+}
+
+static int codec_mpeg12_stop(struct amvdec_session *sess)
+{
+ struct codec_mpeg12 *mpeg12 = sess->priv;
+ struct amvdec_core *core = sess->core;
+
+ if (mpeg12->workspace_vaddr)
+ dma_free_coherent(core->dev, SIZE_WORKSPACE,
+ mpeg12->workspace_vaddr,
+ mpeg12->workspace_paddr);
+
+ return 0;
+}
+
+static void codec_mpeg12_update_dar(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ u32 seq = amvdec_read_dos(core, MREG_SEQ_INFO);
+ u32 ar = seq & MPEG2_SEQ_DAR_MASK;
+
+ switch (ar) {
+ case MPEG2_DAR_4_3:
+ amvdec_set_par_from_dar(sess, 4, 3);
+ break;
+ case MPEG2_DAR_16_9:
+ amvdec_set_par_from_dar(sess, 16, 9);
+ break;
+ case MPEG2_DAR_221_100:
+ amvdec_set_par_from_dar(sess, 221, 100);
+ break;
+ default:
+ sess->pixelaspect.numerator = 1;
+ sess->pixelaspect.denominator = 1;
+ break;
+ }
+}
+
+static irqreturn_t codec_mpeg12_threaded_isr(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ u32 reg;
+ u32 pic_info;
+ u32 is_progressive;
+ u32 buffer_index;
+ u32 field = V4L2_FIELD_NONE;
+ u32 offset;
+
+ amvdec_write_dos(core, ASSIST_MBOX1_CLR_REG, 1);
+ reg = amvdec_read_dos(core, MREG_FATAL_ERROR);
+ if (reg == 1) {
+ dev_err(core->dev, "MPEG1/2 fatal error\n");
+ amvdec_abort(sess);
+ return IRQ_HANDLED;
+ }
+
+ reg = amvdec_read_dos(core, MREG_BUFFEROUT);
+ if (!reg)
+ return IRQ_HANDLED;
+
+ /* Unclear what this means */
+ if ((reg & GENMASK(23, 17)) == GENMASK(23, 17))
+ goto end;
+
+ pic_info = amvdec_read_dos(core, MREG_PIC_INFO);
+ is_progressive = pic_info & PICINFO_PROG;
+
+ if (!is_progressive)
+ field = (pic_info & PICINFO_TOP_FIRST) ?
+ V4L2_FIELD_INTERLACED_TB :
+ V4L2_FIELD_INTERLACED_BT;
+
+ codec_mpeg12_update_dar(sess);
+ buffer_index = ((reg & 0xf) - 1) & 7;
+ offset = amvdec_read_dos(core, MREG_FRAME_OFFSET);
+ amvdec_dst_buf_done_idx(sess, buffer_index, offset, field);
+
+end:
+ amvdec_write_dos(core, MREG_BUFFEROUT, 0);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t codec_mpeg12_isr(struct amvdec_session *sess)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+struct amvdec_codec_ops codec_mpeg12_ops = {
+ .start = codec_mpeg12_start,
+ .stop = codec_mpeg12_stop,
+ .isr = codec_mpeg12_isr,
+ .threaded_isr = codec_mpeg12_threaded_isr,
+ .can_recycle = codec_mpeg12_can_recycle,
+ .recycle = codec_mpeg12_recycle,
+ .eos_sequence = codec_mpeg12_eos_sequence,
+};
diff --git a/drivers/staging/media/meson/vdec/codec_mpeg12.h b/drivers/staging/media/meson/vdec/codec_mpeg12.h
new file mode 100644
index 000000000000..43cab5f39ca0
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_mpeg12.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_CODEC_MPEG12_H_
+#define __MESON_VDEC_CODEC_MPEG12_H_
+
+#include "vdec.h"
+
+extern struct amvdec_codec_ops codec_mpeg12_ops;
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/dos_regs.h b/drivers/staging/media/meson/vdec/dos_regs.h
new file mode 100644
index 000000000000..abd810542dbb
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/dos_regs.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_DOS_REGS_H_
+#define __MESON_VDEC_DOS_REGS_H_
+
+/* DOS registers */
+#define VDEC_ASSIST_AMR1_INT8 0x00b4
+
+#define ASSIST_MBOX1_CLR_REG 0x01d4
+#define ASSIST_MBOX1_MASK 0x01d8
+
+#define MPSR 0x0c04
+#define MCPU_INTR_MSK 0x0c10
+#define CPSR 0x0c84
+
+#define IMEM_DMA_CTRL 0x0d00
+#define IMEM_DMA_ADR 0x0d04
+#define IMEM_DMA_COUNT 0x0d08
+#define LMEM_DMA_CTRL 0x0d40
+
+#define MC_STATUS0 0x2424
+#define MC_CTRL1 0x242c
+
+#define PSCALE_RST 0x2440
+#define PSCALE_CTRL 0x2444
+#define PSCALE_BMEM_ADDR 0x247c
+#define PSCALE_BMEM_DAT 0x2480
+
+#define DBLK_CTRL 0x2544
+#define DBLK_STATUS 0x254c
+
+#define GCLK_EN 0x260c
+#define MDEC_PIC_DC_CTRL 0x2638
+#define MDEC_PIC_DC_STATUS 0x263c
+#define ANC0_CANVAS_ADDR 0x2640
+#define MDEC_PIC_DC_THRESH 0x26e0
+
+/* Firmware interface registers */
+#define AV_SCRATCH_0 0x2700
+#define AV_SCRATCH_1 0x2704
+#define AV_SCRATCH_2 0x2708
+#define AV_SCRATCH_3 0x270c
+#define AV_SCRATCH_4 0x2710
+#define AV_SCRATCH_5 0x2714
+#define AV_SCRATCH_6 0x2718
+#define AV_SCRATCH_7 0x271c
+#define AV_SCRATCH_8 0x2720
+#define AV_SCRATCH_9 0x2724
+#define AV_SCRATCH_A 0x2728
+#define AV_SCRATCH_B 0x272c
+#define AV_SCRATCH_C 0x2730
+#define AV_SCRATCH_D 0x2734
+#define AV_SCRATCH_E 0x2738
+#define AV_SCRATCH_F 0x273c
+#define AV_SCRATCH_G 0x2740
+#define AV_SCRATCH_H 0x2744
+#define AV_SCRATCH_I 0x2748
+#define AV_SCRATCH_J 0x274c
+#define AV_SCRATCH_K 0x2750
+#define AV_SCRATCH_L 0x2754
+
+#define MPEG1_2_REG 0x3004
+#define PIC_HEAD_INFO 0x300c
+#define POWER_CTL_VLD 0x3020
+#define M4_CONTROL_REG 0x30a4
+
+/* Stream Buffer (stbuf) regs */
+#define VLD_MEM_VIFIFO_START_PTR 0x3100
+#define VLD_MEM_VIFIFO_CURR_PTR 0x3104
+#define VLD_MEM_VIFIFO_END_PTR 0x3108
+#define VLD_MEM_VIFIFO_CONTROL 0x3110
+ #define MEM_FIFO_CNT_BIT 16
+ #define MEM_FILL_ON_LEVEL BIT(10)
+ #define MEM_CTRL_EMPTY_EN BIT(2)
+ #define MEM_CTRL_FILL_EN BIT(1)
+#define VLD_MEM_VIFIFO_WP 0x3114
+#define VLD_MEM_VIFIFO_RP 0x3118
+#define VLD_MEM_VIFIFO_LEVEL 0x311c
+#define VLD_MEM_VIFIFO_BUF_CNTL 0x3120
+ #define MEM_BUFCTRL_MANUAL BIT(1)
+#define VLD_MEM_VIFIFO_WRAP_COUNT 0x3144
+
+#define DCAC_DMA_CTRL 0x3848
+
+#define DOS_SW_RESET0 0xfc00
+#define DOS_GCLK_EN0 0xfc04
+#define DOS_GEN_CTRL0 0xfc08
+#define DOS_MEM_PD_VDEC 0xfcc0
+#define DOS_MEM_PD_HEVC 0xfccc
+#define DOS_SW_RESET3 0xfcd0
+#define DOS_GCLK_EN3 0xfcd4
+#define DOS_VDEC_MCRCC_STALL_CTRL 0xfd00
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/esparser.c b/drivers/staging/media/meson/vdec/esparser.c
new file mode 100644
index 000000000000..3a21a8cec799
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/esparser.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ *
+ * The Elementary Stream Parser is a HW bitstream parser.
+ * It reads bitstream buffers and feeds them to the VIFIFO
+ */
+
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/interrupt.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "dos_regs.h"
+#include "esparser.h"
+#include "vdec_helpers.h"
+
+/* PARSER REGS (CBUS) */
+#define PARSER_CONTROL 0x00
+ #define ES_PACK_SIZE_BIT 8
+ #define ES_WRITE BIT(5)
+ #define ES_SEARCH BIT(1)
+ #define ES_PARSER_START BIT(0)
+#define PARSER_FETCH_ADDR 0x4
+#define PARSER_FETCH_CMD 0x8
+#define PARSER_CONFIG 0x14
+ #define PS_CFG_MAX_FETCH_CYCLE_BIT 0
+ #define PS_CFG_STARTCODE_WID_24_BIT 10
+ #define PS_CFG_MAX_ES_WR_CYCLE_BIT 12
+ #define PS_CFG_PFIFO_EMPTY_CNT_BIT 16
+#define PFIFO_WR_PTR 0x18
+#define PFIFO_RD_PTR 0x1c
+#define PARSER_SEARCH_PATTERN 0x24
+ #define ES_START_CODE_PATTERN 0x00000100
+#define PARSER_SEARCH_MASK 0x28
+ #define ES_START_CODE_MASK 0xffffff00
+ #define FETCH_ENDIAN_BIT 27
+#define PARSER_INT_ENABLE 0x2c
+ #define PARSER_INT_HOST_EN_BIT 8
+#define PARSER_INT_STATUS 0x30
+ #define PARSER_INTSTAT_SC_FOUND 1
+#define PARSER_ES_CONTROL 0x5c
+#define PARSER_VIDEO_START_PTR 0x80
+#define PARSER_VIDEO_END_PTR 0x84
+#define PARSER_VIDEO_WP 0x88
+#define PARSER_VIDEO_HOLE 0x90
+
+#define SEARCH_PATTERN_LEN 512
+
+static DECLARE_WAIT_QUEUE_HEAD(wq);
+static int search_done;
+
+static irqreturn_t esparser_isr(int irq, void *dev)
+{
+ int int_status;
+ struct amvdec_core *core = dev;
+
+ int_status = amvdec_read_parser(core, PARSER_INT_STATUS);
+ amvdec_write_parser(core, PARSER_INT_STATUS, int_status);
+
+ if (int_status & PARSER_INTSTAT_SC_FOUND) {
+ amvdec_write_parser(core, PFIFO_RD_PTR, 0);
+ amvdec_write_parser(core, PFIFO_WR_PTR, 0);
+ search_done = 1;
+ wake_up_interruptible(&wq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Pad the packet to at least 4KiB bytes otherwise the VDEC unit won't trigger
+ * ISRs.
+ * Also append a start code 000001ff at the end to trigger
+ * the ESPARSER interrupt.
+ */
+static u32 esparser_pad_start_code(struct vb2_buffer *vb)
+{
+ u32 payload_size = vb2_get_plane_payload(vb, 0);
+ u32 pad_size = 0;
+ u8 *vaddr = vb2_plane_vaddr(vb, 0) + payload_size;
+
+ if (payload_size < ESPARSER_MIN_PACKET_SIZE) {
+ pad_size = ESPARSER_MIN_PACKET_SIZE - payload_size;
+ memset(vaddr, 0, pad_size);
+ }
+
+ memset(vaddr + pad_size, 0, SEARCH_PATTERN_LEN);
+ vaddr[pad_size] = 0x00;
+ vaddr[pad_size + 1] = 0x00;
+ vaddr[pad_size + 2] = 0x01;
+ vaddr[pad_size + 3] = 0xff;
+
+ return pad_size;
+}
+
+static int
+esparser_write_data(struct amvdec_core *core, dma_addr_t addr, u32 size)
+{
+ amvdec_write_parser(core, PFIFO_RD_PTR, 0);
+ amvdec_write_parser(core, PFIFO_WR_PTR, 0);
+ amvdec_write_parser(core, PARSER_CONTROL,
+ ES_WRITE |
+ ES_PARSER_START |
+ ES_SEARCH |
+ (size << ES_PACK_SIZE_BIT));
+
+ amvdec_write_parser(core, PARSER_FETCH_ADDR, addr);
+ amvdec_write_parser(core, PARSER_FETCH_CMD,
+ (7 << FETCH_ENDIAN_BIT) |
+ (size + SEARCH_PATTERN_LEN));
+
+ search_done = 0;
+ return wait_event_interruptible_timeout(wq, search_done, (HZ / 5));
+}
+
+static u32 esparser_vififo_get_free_space(struct amvdec_session *sess)
+{
+ u32 vififo_usage;
+ struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
+ struct amvdec_core *core = sess->core;
+
+ vififo_usage = vdec_ops->vififo_level(sess);
+ vififo_usage += amvdec_read_parser(core, PARSER_VIDEO_HOLE);
+ vififo_usage += (6 * SZ_1K); // 6 KiB internal fifo
+
+ if (vififo_usage > sess->vififo_size) {
+ dev_warn(sess->core->dev,
+ "VIFIFO usage (%u) > VIFIFO size (%u)\n",
+ vififo_usage, sess->vififo_size);
+ return 0;
+ }
+
+ return sess->vififo_size - vififo_usage;
+}
+
+int esparser_queue_eos(struct amvdec_core *core, const u8 *data, u32 len)
+{
+ struct device *dev = core->dev;
+ void *eos_vaddr;
+ dma_addr_t eos_paddr;
+ int ret;
+
+ eos_vaddr = dma_alloc_coherent(dev, len + SEARCH_PATTERN_LEN,
+ &eos_paddr, GFP_KERNEL);
+ if (!eos_vaddr)
+ return -ENOMEM;
+
+ memcpy(eos_vaddr, data, len);
+ ret = esparser_write_data(core, eos_paddr, len);
+ dma_free_coherent(dev, len + SEARCH_PATTERN_LEN,
+ eos_vaddr, eos_paddr);
+
+ return ret;
+}
+
+static u32 esparser_get_offset(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ u32 offset = amvdec_read_parser(core, PARSER_VIDEO_WP) -
+ sess->vififo_paddr;
+
+ if (offset < sess->last_offset)
+ sess->wrap_count++;
+
+ sess->last_offset = offset;
+ offset += (sess->wrap_count * sess->vififo_size);
+
+ return offset;
+}
+
+static int
+esparser_queue(struct amvdec_session *sess, struct vb2_v4l2_buffer *vbuf)
+{
+ int ret;
+ struct vb2_buffer *vb = &vbuf->vb2_buf;
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ u32 num_dst_bufs = 0;
+ u32 payload_size = vb2_get_plane_payload(vb, 0);
+ dma_addr_t phy = vb2_dma_contig_plane_dma_addr(vb, 0);
+ u32 offset;
+ u32 pad_size;
+
+ if (codec_ops->num_pending_bufs)
+ num_dst_bufs = codec_ops->num_pending_bufs(sess);
+
+ num_dst_bufs += v4l2_m2m_num_dst_bufs_ready(sess->m2m_ctx);
+
+ if (esparser_vififo_get_free_space(sess) < payload_size ||
+ atomic_read(&sess->esparser_queued_bufs) >= num_dst_bufs)
+ return -EAGAIN;
+
+ v4l2_m2m_src_buf_remove_by_buf(sess->m2m_ctx, vbuf);
+
+ offset = esparser_get_offset(sess);
+
+ amvdec_add_ts_reorder(sess, vb->timestamp, offset);
+ dev_dbg(core->dev, "esparser: ts = %llu pld_size = %u offset = %08X\n",
+ vb->timestamp, payload_size, offset);
+
+ pad_size = esparser_pad_start_code(vb);
+ ret = esparser_write_data(core, phy, payload_size + pad_size);
+
+ if (ret <= 0) {
+ dev_warn(core->dev, "esparser: input parsing error\n");
+ amvdec_remove_ts(sess, vb->timestamp);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ amvdec_write_parser(core, PARSER_FETCH_CMD, 0);
+
+ return 0;
+ }
+
+ /* We need to wait until we parse the first keyframe.
+ * All buffers prior to the first keyframe must be dropped.
+ */
+ if (!sess->keyframe_found)
+ usleep_range(1000, 2000);
+
+ if (sess->keyframe_found)
+ atomic_inc(&sess->esparser_queued_bufs);
+ else
+ amvdec_remove_ts(sess, vb->timestamp);
+
+ vbuf->flags = 0;
+ vbuf->field = V4L2_FIELD_NONE;
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+
+ return 0;
+}
+
+void esparser_queue_all_src(struct work_struct *work)
+{
+ struct v4l2_m2m_buffer *buf, *n;
+ struct amvdec_session *sess =
+ container_of(work, struct amvdec_session, esparser_queue_work);
+
+ mutex_lock(&sess->lock);
+ v4l2_m2m_for_each_src_buf_safe(sess->m2m_ctx, buf, n) {
+ if (sess->should_stop)
+ break;
+
+ if (esparser_queue(sess, &buf->vb) < 0)
+ break;
+ }
+ mutex_unlock(&sess->lock);
+}
+
+int esparser_power_up(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
+
+ reset_control_reset(core->esparser_reset);
+ amvdec_write_parser(core, PARSER_CONFIG,
+ (10 << PS_CFG_PFIFO_EMPTY_CNT_BIT) |
+ (1 << PS_CFG_MAX_ES_WR_CYCLE_BIT) |
+ (16 << PS_CFG_MAX_FETCH_CYCLE_BIT));
+
+ amvdec_write_parser(core, PFIFO_RD_PTR, 0);
+ amvdec_write_parser(core, PFIFO_WR_PTR, 0);
+
+ amvdec_write_parser(core, PARSER_SEARCH_PATTERN,
+ ES_START_CODE_PATTERN);
+ amvdec_write_parser(core, PARSER_SEARCH_MASK, ES_START_CODE_MASK);
+
+ amvdec_write_parser(core, PARSER_CONFIG,
+ (10 << PS_CFG_PFIFO_EMPTY_CNT_BIT) |
+ (1 << PS_CFG_MAX_ES_WR_CYCLE_BIT) |
+ (16 << PS_CFG_MAX_FETCH_CYCLE_BIT) |
+ (2 << PS_CFG_STARTCODE_WID_24_BIT));
+
+ amvdec_write_parser(core, PARSER_CONTROL,
+ (ES_SEARCH | ES_PARSER_START));
+
+ amvdec_write_parser(core, PARSER_VIDEO_START_PTR, sess->vififo_paddr);
+ amvdec_write_parser(core, PARSER_VIDEO_END_PTR,
+ sess->vififo_paddr + sess->vififo_size - 8);
+ amvdec_write_parser(core, PARSER_ES_CONTROL,
+ amvdec_read_parser(core, PARSER_ES_CONTROL) & ~1);
+
+ if (vdec_ops->conf_esparser)
+ vdec_ops->conf_esparser(sess);
+
+ amvdec_write_parser(core, PARSER_INT_STATUS, 0xffff);
+ amvdec_write_parser(core, PARSER_INT_ENABLE,
+ BIT(PARSER_INT_HOST_EN_BIT));
+
+ return 0;
+}
+
+int esparser_init(struct platform_device *pdev, struct amvdec_core *core)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "esparser");
+ if (irq < 0) {
+ dev_err(dev, "Failed getting ESPARSER IRQ from dtb\n");
+ return irq;
+ }
+
+ ret = devm_request_irq(dev, irq, esparser_isr, IRQF_SHARED,
+ "esparserirq", core);
+ if (ret) {
+ dev_err(dev, "Failed requesting ESPARSER IRQ\n");
+ return ret;
+ }
+
+ core->esparser_reset =
+ devm_reset_control_get_exclusive(dev, "esparser");
+ if (IS_ERR(core->esparser_reset)) {
+ dev_err(dev, "Failed to get esparser_reset\n");
+ return PTR_ERR(core->esparser_reset);
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/meson/vdec/esparser.h b/drivers/staging/media/meson/vdec/esparser.h
new file mode 100644
index 000000000000..ff51fe7fda66
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/esparser.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_ESPARSER_H_
+#define __MESON_VDEC_ESPARSER_H_
+
+#include <linux/platform_device.h>
+
+#include "vdec.h"
+
+int esparser_init(struct platform_device *pdev, struct amvdec_core *core);
+int esparser_power_up(struct amvdec_session *sess);
+
+/**
+ * esparser_queue_eos() - write End Of Stream sequence to the ESPARSER
+ *
+ * @core vdec core struct
+ */
+int esparser_queue_eos(struct amvdec_core *core, const u8 *data, u32 len);
+
+/**
+ * esparser_queue_all_src() - work handler that writes as many src buffers
+ * as possible to the ESPARSER
+ */
+void esparser_queue_all_src(struct work_struct *work);
+
+#define ESPARSER_MIN_PACKET_SIZE SZ_4K
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
new file mode 100644
index 000000000000..0a1a04fd5d13
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -0,0 +1,1099 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "vdec.h"
+#include "esparser.h"
+#include "vdec_helpers.h"
+
+struct dummy_buf {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+/* 16 MiB for parsed bitstream swap exchange */
+#define SIZE_VIFIFO SZ_16M
+
+static u32 get_output_size(u32 width, u32 height)
+{
+ return ALIGN(width * height, SZ_64K);
+}
+
+u32 amvdec_get_output_size(struct amvdec_session *sess)
+{
+ return get_output_size(sess->width, sess->height);
+}
+EXPORT_SYMBOL_GPL(amvdec_get_output_size);
+
+static int vdec_codec_needs_recycle(struct amvdec_session *sess)
+{
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ return codec_ops->can_recycle && codec_ops->recycle;
+}
+
+static int vdec_recycle_thread(void *data)
+{
+ struct amvdec_session *sess = data;
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ struct amvdec_buffer *tmp, *n;
+
+ while (!kthread_should_stop()) {
+ mutex_lock(&sess->bufs_recycle_lock);
+ list_for_each_entry_safe(tmp, n, &sess->bufs_recycle, list) {
+ if (!codec_ops->can_recycle(core))
+ break;
+
+ codec_ops->recycle(core, tmp->vb->index);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ mutex_unlock(&sess->bufs_recycle_lock);
+
+ usleep_range(5000, 10000);
+ }
+
+ return 0;
+}
+
+static int vdec_poweron(struct amvdec_session *sess)
+{
+ int ret;
+ struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
+
+ ret = clk_prepare_enable(sess->core->dos_parser_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(sess->core->dos_clk);
+ if (ret)
+ goto disable_dos_parser;
+
+ ret = vdec_ops->start(sess);
+ if (ret)
+ goto disable_dos;
+
+ esparser_power_up(sess);
+
+ return 0;
+
+disable_dos:
+ clk_disable_unprepare(sess->core->dos_clk);
+disable_dos_parser:
+ clk_disable_unprepare(sess->core->dos_parser_clk);
+
+ return ret;
+}
+
+static void vdec_wait_inactive(struct amvdec_session *sess)
+{
+ /* We consider 50ms with no IRQ to be inactive. */
+ while (time_is_after_jiffies64(sess->last_irq_jiffies +
+ msecs_to_jiffies(50)))
+ msleep(25);
+}
+
+static void vdec_poweroff(struct amvdec_session *sess)
+{
+ struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ sess->should_stop = 1;
+ vdec_wait_inactive(sess);
+ if (codec_ops->drain)
+ codec_ops->drain(sess);
+
+ vdec_ops->stop(sess);
+ clk_disable_unprepare(sess->core->dos_clk);
+ clk_disable_unprepare(sess->core->dos_parser_clk);
+}
+
+static void
+vdec_queue_recycle(struct amvdec_session *sess, struct vb2_buffer *vb)
+{
+ struct amvdec_buffer *new_buf;
+
+ new_buf = kmalloc(sizeof(*new_buf), GFP_KERNEL);
+ new_buf->vb = vb;
+
+ mutex_lock(&sess->bufs_recycle_lock);
+ list_add_tail(&new_buf->list, &sess->bufs_recycle);
+ mutex_unlock(&sess->bufs_recycle_lock);
+}
+
+static void vdec_m2m_device_run(void *priv)
+{
+ struct amvdec_session *sess = priv;
+
+ schedule_work(&sess->esparser_queue_work);
+}
+
+static void vdec_m2m_job_abort(void *priv)
+{
+ struct amvdec_session *sess = priv;
+
+ v4l2_m2m_job_finish(sess->m2m_dev, sess->m2m_ctx);
+}
+
+static const struct v4l2_m2m_ops vdec_m2m_ops = {
+ .device_run = vdec_m2m_device_run,
+ .job_abort = vdec_m2m_job_abort,
+};
+
+static void process_num_buffers(struct vb2_queue *q,
+ struct amvdec_session *sess,
+ unsigned int *num_buffers,
+ bool is_reqbufs)
+{
+ const struct amvdec_format *fmt_out = sess->fmt_out;
+ unsigned int buffers_total = q->num_buffers + *num_buffers;
+
+ if (is_reqbufs && buffers_total < fmt_out->min_buffers)
+ *num_buffers = fmt_out->min_buffers - q->num_buffers;
+ if (buffers_total > fmt_out->max_buffers)
+ *num_buffers = fmt_out->max_buffers - q->num_buffers;
+
+ /* We need to program the complete CAPTURE buffer list
+ * in registers during start_streaming, and the firmwares
+ * are free to choose any of them to write frames to. As such,
+ * we need all of them to be queued into the driver
+ */
+ sess->num_dst_bufs = q->num_buffers + *num_buffers;
+ q->min_buffers_needed = max(fmt_out->min_buffers, sess->num_dst_bufs);
+}
+
+static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct amvdec_session *sess = vb2_get_drv_priv(q);
+ u32 output_size = amvdec_get_output_size(sess);
+
+ if (*num_planes) {
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (*num_planes != 1 || sizes[0] < output_size)
+ return -EINVAL;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ switch (sess->pixfmt_cap) {
+ case V4L2_PIX_FMT_NV12M:
+ if (*num_planes != 2 ||
+ sizes[0] < output_size ||
+ sizes[1] < output_size / 2)
+ return -EINVAL;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ if (*num_planes != 3 ||
+ sizes[0] < output_size ||
+ sizes[1] < output_size / 4 ||
+ sizes[2] < output_size / 4)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ process_num_buffers(q, sess, num_buffers, false);
+ break;
+ }
+
+ return 0;
+ }
+
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ sizes[0] = amvdec_get_output_size(sess);
+ *num_planes = 1;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ switch (sess->pixfmt_cap) {
+ case V4L2_PIX_FMT_NV12M:
+ sizes[0] = output_size;
+ sizes[1] = output_size / 2;
+ *num_planes = 2;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ sizes[0] = output_size;
+ sizes[1] = output_size / 4;
+ sizes[2] = output_size / 4;
+ *num_planes = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ process_num_buffers(q, sess, num_buffers, true);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vdec_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct amvdec_session *sess = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_m2m_ctx *m2m_ctx = sess->m2m_ctx;
+
+ v4l2_m2m_buf_queue(m2m_ctx, vbuf);
+
+ if (!sess->streamon_out || !sess->streamon_cap)
+ return;
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ vdec_codec_needs_recycle(sess))
+ vdec_queue_recycle(sess, vb);
+
+ schedule_work(&sess->esparser_queue_work);
+}
+
+static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct amvdec_session *sess = vb2_get_drv_priv(q);
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ struct amvdec_core *core = sess->core;
+ struct vb2_v4l2_buffer *buf;
+ int ret;
+
+ if (core->cur_sess && core->cur_sess != sess) {
+ ret = -EBUSY;
+ goto bufs_done;
+ }
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ sess->streamon_out = 1;
+ else
+ sess->streamon_cap = 1;
+
+ if (!sess->streamon_out || !sess->streamon_cap)
+ return 0;
+
+ if (sess->status == STATUS_NEEDS_RESUME &&
+ q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ codec_ops->resume(sess);
+ sess->status = STATUS_RUNNING;
+ return 0;
+ }
+
+ sess->vififo_size = SIZE_VIFIFO;
+ sess->vififo_vaddr =
+ dma_alloc_coherent(sess->core->dev, sess->vififo_size,
+ &sess->vififo_paddr, GFP_KERNEL);
+ if (!sess->vififo_vaddr) {
+ dev_err(sess->core->dev, "Failed to request VIFIFO buffer\n");
+ ret = -ENOMEM;
+ goto bufs_done;
+ }
+
+ sess->should_stop = 0;
+ sess->keyframe_found = 0;
+ sess->last_offset = 0;
+ sess->wrap_count = 0;
+ sess->pixelaspect.numerator = 1;
+ sess->pixelaspect.denominator = 1;
+ atomic_set(&sess->esparser_queued_bufs, 0);
+ v4l2_ctrl_s_ctrl(sess->ctrl_min_buf_capture, 1);
+
+ ret = vdec_poweron(sess);
+ if (ret)
+ goto vififo_free;
+
+ sess->sequence_cap = 0;
+ if (vdec_codec_needs_recycle(sess))
+ sess->recycle_thread = kthread_run(vdec_recycle_thread, sess,
+ "vdec_recycle");
+
+ sess->status = STATUS_RUNNING;
+ core->cur_sess = sess;
+
+ return 0;
+
+vififo_free:
+ dma_free_coherent(sess->core->dev, sess->vififo_size,
+ sess->vififo_vaddr, sess->vififo_paddr);
+bufs_done:
+ while ((buf = v4l2_m2m_src_buf_remove(sess->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ while ((buf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ sess->streamon_out = 0;
+ else
+ sess->streamon_cap = 0;
+
+ return ret;
+}
+
+static void vdec_free_canvas(struct amvdec_session *sess)
+{
+ int i;
+
+ for (i = 0; i < sess->canvas_num; ++i)
+ meson_canvas_free(sess->core->canvas, sess->canvas_alloc[i]);
+
+ sess->canvas_num = 0;
+}
+
+static void vdec_reset_timestamps(struct amvdec_session *sess)
+{
+ struct amvdec_timestamp *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &sess->timestamps, list) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+}
+
+static void vdec_reset_bufs_recycle(struct amvdec_session *sess)
+{
+ struct amvdec_buffer *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &sess->bufs_recycle, list) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+}
+
+static void vdec_stop_streaming(struct vb2_queue *q)
+{
+ struct amvdec_session *sess = vb2_get_drv_priv(q);
+ struct amvdec_core *core = sess->core;
+ struct vb2_v4l2_buffer *buf;
+
+ if (sess->status == STATUS_RUNNING ||
+ (sess->status == STATUS_NEEDS_RESUME &&
+ (!sess->streamon_out || !sess->streamon_cap))) {
+ if (vdec_codec_needs_recycle(sess))
+ kthread_stop(sess->recycle_thread);
+
+ vdec_poweroff(sess);
+ vdec_free_canvas(sess);
+ dma_free_coherent(sess->core->dev, sess->vififo_size,
+ sess->vififo_vaddr, sess->vififo_paddr);
+ vdec_reset_timestamps(sess);
+ vdec_reset_bufs_recycle(sess);
+ kfree(sess->priv);
+ sess->priv = NULL;
+ core->cur_sess = NULL;
+ sess->status = STATUS_STOPPED;
+ }
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ while ((buf = v4l2_m2m_src_buf_remove(sess->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+
+ sess->streamon_out = 0;
+ } else {
+ while ((buf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+
+ sess->streamon_cap = 0;
+ }
+}
+
+static int vdec_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+static const struct vb2_ops vdec_vb2_ops = {
+ .queue_setup = vdec_queue_setup,
+ .start_streaming = vdec_start_streaming,
+ .stop_streaming = vdec_stop_streaming,
+ .buf_queue = vdec_vb2_buf_queue,
+ .buf_prepare = vdec_vb2_buf_prepare,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int
+vdec_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, "meson-vdec", sizeof(cap->driver));
+ strscpy(cap->card, "Amlogic Video Decoder", sizeof(cap->card));
+ strscpy(cap->bus_info, "platform:meson-vdec", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static const struct amvdec_format *
+find_format(const struct amvdec_format *fmts, u32 size, u32 pixfmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ if (fmts[i].pixfmt == pixfmt)
+ return &fmts[i];
+ }
+
+ return NULL;
+}
+
+static unsigned int
+vdec_supports_pixfmt_cap(const struct amvdec_format *fmt_out, u32 pixfmt_cap)
+{
+ int i;
+
+ for (i = 0; fmt_out->pixfmts_cap[i]; i++)
+ if (fmt_out->pixfmts_cap[i] == pixfmt_cap)
+ return 1;
+
+ return 0;
+}
+
+static const struct amvdec_format *
+vdec_try_fmt_common(struct amvdec_session *sess, u32 size,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
+ const struct amvdec_format *fmts = sess->core->platform->formats;
+ const struct amvdec_format *fmt_out;
+
+ memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
+ memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ fmt_out = find_format(fmts, size, pixmp->pixelformat);
+ if (!fmt_out) {
+ pixmp->pixelformat = V4L2_PIX_FMT_MPEG2;
+ fmt_out = find_format(fmts, size, pixmp->pixelformat);
+ }
+
+ pfmt[0].sizeimage =
+ get_output_size(pixmp->width, pixmp->height);
+ pfmt[0].bytesperline = 0;
+ pixmp->num_planes = 1;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fmt_out = sess->fmt_out;
+ if (!vdec_supports_pixfmt_cap(fmt_out, pixmp->pixelformat))
+ pixmp->pixelformat = fmt_out->pixfmts_cap[0];
+
+ memset(pfmt[1].reserved, 0, sizeof(pfmt[1].reserved));
+ if (pixmp->pixelformat == V4L2_PIX_FMT_NV12M) {
+ pfmt[0].sizeimage =
+ get_output_size(pixmp->width, pixmp->height);
+ pfmt[0].bytesperline = ALIGN(pixmp->width, 64);
+
+ pfmt[1].sizeimage =
+ get_output_size(pixmp->width, pixmp->height) / 2;
+ pfmt[1].bytesperline = ALIGN(pixmp->width, 64);
+ pixmp->num_planes = 2;
+ } else if (pixmp->pixelformat == V4L2_PIX_FMT_YUV420M) {
+ pfmt[0].sizeimage =
+ get_output_size(pixmp->width, pixmp->height);
+ pfmt[0].bytesperline = ALIGN(pixmp->width, 64);
+
+ pfmt[1].sizeimage =
+ get_output_size(pixmp->width, pixmp->height) / 4;
+ pfmt[1].bytesperline = ALIGN(pixmp->width, 64) / 2;
+
+ pfmt[2].sizeimage =
+ get_output_size(pixmp->width, pixmp->height) / 4;
+ pfmt[2].bytesperline = ALIGN(pixmp->width, 64) / 2;
+ pixmp->num_planes = 3;
+ }
+ } else {
+ return NULL;
+ }
+
+ pixmp->width = clamp(pixmp->width, (u32)256, fmt_out->max_width);
+ pixmp->height = clamp(pixmp->height, (u32)144, fmt_out->max_height);
+
+ if (pixmp->field == V4L2_FIELD_ANY)
+ pixmp->field = V4L2_FIELD_NONE;
+
+ return fmt_out;
+}
+
+static int vdec_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+
+ vdec_try_fmt_common(sess, sess->core->platform->num_formats, f);
+
+ return 0;
+}
+
+static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ pixmp->pixelformat = sess->pixfmt_cap;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ pixmp->pixelformat = sess->fmt_out->pixfmt;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pixmp->width = sess->width;
+ pixmp->height = sess->height;
+ pixmp->colorspace = sess->colorspace;
+ pixmp->ycbcr_enc = sess->ycbcr_enc;
+ pixmp->quantization = sess->quantization;
+ pixmp->xfer_func = sess->xfer_func;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixmp->width = sess->width;
+ pixmp->height = sess->height;
+ }
+
+ vdec_try_fmt_common(sess, sess->core->platform->num_formats, f);
+
+ return 0;
+}
+
+static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ u32 num_formats = sess->core->platform->num_formats;
+ const struct amvdec_format *fmt_out;
+ struct v4l2_pix_format_mplane orig_pixmp;
+ struct v4l2_format format;
+ u32 pixfmt_out = 0, pixfmt_cap = 0;
+
+ orig_pixmp = *pixmp;
+
+ fmt_out = vdec_try_fmt_common(sess, num_formats, f);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixfmt_out = pixmp->pixelformat;
+ pixfmt_cap = sess->pixfmt_cap;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pixfmt_cap = pixmp->pixelformat;
+ pixfmt_out = sess->fmt_out->pixfmt;
+ }
+
+ memset(&format, 0, sizeof(format));
+
+ format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ format.fmt.pix_mp.pixelformat = pixfmt_out;
+ format.fmt.pix_mp.width = orig_pixmp.width;
+ format.fmt.pix_mp.height = orig_pixmp.height;
+ vdec_try_fmt_common(sess, num_formats, &format);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ sess->width = format.fmt.pix_mp.width;
+ sess->height = format.fmt.pix_mp.height;
+ sess->colorspace = pixmp->colorspace;
+ sess->ycbcr_enc = pixmp->ycbcr_enc;
+ sess->quantization = pixmp->quantization;
+ sess->xfer_func = pixmp->xfer_func;
+ }
+
+ memset(&format, 0, sizeof(format));
+
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ format.fmt.pix_mp.pixelformat = pixfmt_cap;
+ format.fmt.pix_mp.width = orig_pixmp.width;
+ format.fmt.pix_mp.height = orig_pixmp.height;
+ vdec_try_fmt_common(sess, num_formats, &format);
+
+ sess->width = format.fmt.pix_mp.width;
+ sess->height = format.fmt.pix_mp.height;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ sess->fmt_out = fmt_out;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ sess->pixfmt_cap = format.fmt.pix_mp.pixelformat;
+
+ return 0;
+}
+
+static int vdec_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+ const struct vdec_platform *platform = sess->core->platform;
+ const struct amvdec_format *fmt_out;
+
+ memset(f->reserved, 0, sizeof(f->reserved));
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (f->index >= platform->num_formats)
+ return -EINVAL;
+
+ fmt_out = &platform->formats[f->index];
+ f->pixelformat = fmt_out->pixfmt;
+ f->flags = fmt_out->flags;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fmt_out = sess->fmt_out;
+ if (f->index >= 4 || !fmt_out->pixfmts_cap[f->index])
+ return -EINVAL;
+
+ f->pixelformat = fmt_out->pixfmts_cap[f->index];
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vdec_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+ const struct amvdec_format *formats = sess->core->platform->formats;
+ const struct amvdec_format *fmt;
+ u32 num_formats = sess->core->platform->num_formats;
+
+ fmt = find_format(formats, num_formats, fsize->pixel_format);
+ if (!fmt || fsize->index)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+
+ fsize->stepwise.min_width = 256;
+ fsize->stepwise.max_width = fmt->max_width;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.min_height = 144;
+ fsize->stepwise.max_height = fmt->max_height;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int
+vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ struct device *dev = sess->core->dev;
+ int ret;
+
+ ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, cmd);
+ if (ret)
+ return ret;
+
+ if (!(sess->streamon_out & sess->streamon_cap))
+ return 0;
+
+ /* Currently not handled since we do not support dynamic resolution
+ * for MPEG2. We consider both queues streaming to mean that the
+ * decoding session is started
+ */
+ if (cmd->cmd == V4L2_DEC_CMD_START)
+ return 0;
+
+ /* Should not happen */
+ if (cmd->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ dev_dbg(dev, "Received V4L2_DEC_CMD_STOP\n");
+ sess->should_stop = 1;
+
+ vdec_wait_inactive(sess);
+
+ if (codec_ops->drain) {
+ codec_ops->drain(sess);
+ } else if (codec_ops->eos_sequence) {
+ u32 len;
+ const u8 *data = codec_ops->eos_sequence(&len);
+
+ esparser_queue_eos(sess->core, data, len);
+ }
+
+ return ret;
+}
+
+static int vdec_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vdec_g_pixelaspect(struct file *file, void *fh, int type,
+ struct v4l2_fract *f)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ *f = sess->pixelaspect;
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
+ .vidioc_querycap = vdec_querycap,
+ .vidioc_enum_fmt_vid_cap = vdec_enum_fmt,
+ .vidioc_enum_fmt_vid_out = vdec_enum_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vdec_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vdec_s_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vdec_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vdec_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vdec_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vdec_try_fmt,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_enum_framesizes = vdec_enum_framesizes,
+ .vidioc_subscribe_event = vdec_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd,
+ .vidioc_decoder_cmd = vdec_decoder_cmd,
+ .vidioc_g_pixelaspect = vdec_g_pixelaspect,
+};
+
+static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct amvdec_session *sess = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->ops = &vdec_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->drv_priv = sess;
+ src_vq->buf_struct_size = sizeof(struct dummy_buf);
+ src_vq->min_buffers_needed = 1;
+ src_vq->dev = sess->core->dev;
+ src_vq->lock = &sess->lock;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->ops = &vdec_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->drv_priv = sess;
+ dst_vq->buf_struct_size = sizeof(struct dummy_buf);
+ dst_vq->min_buffers_needed = 1;
+ dst_vq->dev = sess->core->dev;
+ dst_vq->lock = &sess->lock;
+ ret = vb2_queue_init(dst_vq);
+ if (ret) {
+ vb2_queue_release(src_vq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vdec_init_ctrls(struct amvdec_session *sess)
+{
+ struct v4l2_ctrl_handler *ctrl_handler = &sess->ctrl_handler;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(ctrl_handler, 1);
+ if (ret)
+ return ret;
+
+ sess->ctrl_min_buf_capture =
+ v4l2_ctrl_new_std(ctrl_handler, NULL,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1,
+ 1);
+
+ ret = ctrl_handler->error;
+ if (ret) {
+ v4l2_ctrl_handler_free(ctrl_handler);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vdec_open(struct file *file)
+{
+ struct amvdec_core *core = video_drvdata(file);
+ struct device *dev = core->dev;
+ const struct amvdec_format *formats = core->platform->formats;
+ struct amvdec_session *sess;
+ int ret;
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess)
+ return -ENOMEM;
+
+ sess->core = core;
+
+ sess->m2m_dev = v4l2_m2m_init(&vdec_m2m_ops);
+ if (IS_ERR(sess->m2m_dev)) {
+ dev_err(dev, "Fail to v4l2_m2m_init\n");
+ ret = PTR_ERR(sess->m2m_dev);
+ goto err_free_sess;
+ }
+
+ sess->m2m_ctx = v4l2_m2m_ctx_init(sess->m2m_dev, sess, m2m_queue_init);
+ if (IS_ERR(sess->m2m_ctx)) {
+ dev_err(dev, "Fail to v4l2_m2m_ctx_init\n");
+ ret = PTR_ERR(sess->m2m_ctx);
+ goto err_m2m_release;
+ }
+
+ ret = vdec_init_ctrls(sess);
+ if (ret)
+ goto err_m2m_release;
+
+ sess->pixfmt_cap = formats[0].pixfmts_cap[0];
+ sess->fmt_out = &formats[0];
+ sess->width = 1280;
+ sess->height = 720;
+ sess->pixelaspect.numerator = 1;
+ sess->pixelaspect.denominator = 1;
+
+ INIT_LIST_HEAD(&sess->timestamps);
+ INIT_LIST_HEAD(&sess->bufs_recycle);
+ INIT_WORK(&sess->esparser_queue_work, esparser_queue_all_src);
+ mutex_init(&sess->lock);
+ mutex_init(&sess->bufs_recycle_lock);
+ spin_lock_init(&sess->ts_spinlock);
+
+ v4l2_fh_init(&sess->fh, core->vdev_dec);
+ sess->fh.ctrl_handler = &sess->ctrl_handler;
+ v4l2_fh_add(&sess->fh);
+ sess->fh.m2m_ctx = sess->m2m_ctx;
+ file->private_data = &sess->fh;
+
+ return 0;
+
+err_m2m_release:
+ v4l2_m2m_release(sess->m2m_dev);
+err_free_sess:
+ kfree(sess);
+ return ret;
+}
+
+static int vdec_close(struct file *file)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+
+ v4l2_m2m_ctx_release(sess->m2m_ctx);
+ v4l2_m2m_release(sess->m2m_dev);
+ v4l2_fh_del(&sess->fh);
+ v4l2_fh_exit(&sess->fh);
+
+ mutex_destroy(&sess->lock);
+ mutex_destroy(&sess->bufs_recycle_lock);
+
+ kfree(sess);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations vdec_fops = {
+ .owner = THIS_MODULE,
+ .open = vdec_open,
+ .release = vdec_close,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static irqreturn_t vdec_isr(int irq, void *data)
+{
+ struct amvdec_core *core = data;
+ struct amvdec_session *sess = core->cur_sess;
+
+ sess->last_irq_jiffies = get_jiffies_64();
+
+ return sess->fmt_out->codec_ops->isr(sess);
+}
+
+static irqreturn_t vdec_threaded_isr(int irq, void *data)
+{
+ struct amvdec_core *core = data;
+ struct amvdec_session *sess = core->cur_sess;
+
+ return sess->fmt_out->codec_ops->threaded_isr(sess);
+}
+
+static const struct of_device_id vdec_dt_match[] = {
+ { .compatible = "amlogic,gxbb-vdec",
+ .data = &vdec_platform_gxbb },
+ { .compatible = "amlogic,gxm-vdec",
+ .data = &vdec_platform_gxm },
+ { .compatible = "amlogic,gxl-vdec",
+ .data = &vdec_platform_gxl },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vdec_dt_match);
+
+static int vdec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct video_device *vdev;
+ struct amvdec_core *core;
+ struct resource *r;
+ const struct of_device_id *of_id;
+ int irq;
+ int ret;
+
+ core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
+ if (!core)
+ return -ENOMEM;
+
+ core->dev = dev;
+ platform_set_drvdata(pdev, core);
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dos");
+ core->dos_base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(core->dos_base)) {
+ dev_err(dev, "Couldn't remap DOS memory\n");
+ return PTR_ERR(core->dos_base);
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "esparser");
+ core->esparser_base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(core->esparser_base)) {
+ dev_err(dev, "Couldn't remap ESPARSER memory\n");
+ return PTR_ERR(core->esparser_base);
+ }
+
+ core->regmap_ao =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "amlogic,ao-sysctrl");
+ if (IS_ERR(core->regmap_ao)) {
+ dev_err(dev, "Couldn't regmap AO sysctrl\n");
+ return PTR_ERR(core->regmap_ao);
+ }
+
+ core->canvas = meson_canvas_get(dev);
+ if (IS_ERR(core->canvas))
+ return PTR_ERR(core->canvas);
+
+ core->dos_parser_clk = devm_clk_get(dev, "dos_parser");
+ if (IS_ERR(core->dos_parser_clk))
+ return -EPROBE_DEFER;
+
+ core->dos_clk = devm_clk_get(dev, "dos");
+ if (IS_ERR(core->dos_clk))
+ return -EPROBE_DEFER;
+
+ core->vdec_1_clk = devm_clk_get(dev, "vdec_1");
+ if (IS_ERR(core->vdec_1_clk))
+ return -EPROBE_DEFER;
+
+ core->vdec_hevc_clk = devm_clk_get(dev, "vdec_hevc");
+ if (IS_ERR(core->vdec_hevc_clk))
+ return -EPROBE_DEFER;
+
+ irq = platform_get_irq_byname(pdev, "vdec");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(core->dev, irq, vdec_isr,
+ vdec_threaded_isr, IRQF_ONESHOT,
+ "vdec", core);
+ if (ret)
+ return ret;
+
+ ret = esparser_init(pdev, core);
+ if (ret)
+ return ret;
+
+ ret = v4l2_device_register(dev, &core->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "Couldn't register v4l2 device\n");
+ return -ENOMEM;
+ }
+
+ vdev = video_device_alloc();
+ if (!vdev) {
+ ret = -ENOMEM;
+ goto err_vdev_release;
+ }
+
+ of_id = of_match_node(vdec_dt_match, dev->of_node);
+ core->platform = of_id->data;
+ core->vdev_dec = vdev;
+ core->dev_dec = dev;
+ mutex_init(&core->lock);
+
+ strscpy(vdev->name, "meson-video-decoder", sizeof(vdev->name));
+ vdev->release = video_device_release;
+ vdev->fops = &vdec_fops;
+ vdev->ioctl_ops = &vdec_ioctl_ops;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->v4l2_dev = &core->v4l2_dev;
+ vdev->lock = &core->lock;
+ vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+
+ video_set_drvdata(vdev, core);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(dev, "Failed registering video device\n");
+ goto err_vdev_release;
+ }
+
+ return 0;
+
+err_vdev_release:
+ video_device_release(vdev);
+ return ret;
+}
+
+static int vdec_remove(struct platform_device *pdev)
+{
+ struct amvdec_core *core = platform_get_drvdata(pdev);
+
+ video_unregister_device(core->vdev_dec);
+
+ return 0;
+}
+
+static struct platform_driver meson_vdec_driver = {
+ .probe = vdec_probe,
+ .remove = vdec_remove,
+ .driver = {
+ .name = "meson-vdec",
+ .of_match_table = vdec_dt_match,
+ },
+};
+module_platform_driver(meson_vdec_driver);
+
+MODULE_DESCRIPTION("Meson video decoder driver for GXBB/GXL/GXM");
+MODULE_AUTHOR("Maxime Jourdan <mjourdan@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/meson/vdec/vdec.h b/drivers/staging/media/meson/vdec/vdec.h
new file mode 100644
index 000000000000..d811e7976519
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec.h
@@ -0,0 +1,267 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_CORE_H_
+#define __MESON_VDEC_CORE_H_
+
+#include <linux/irqreturn.h>
+#include <linux/regmap.h>
+#include <linux/list.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <linux/soc/amlogic/meson-canvas.h>
+
+#include "vdec_platform.h"
+
+/* 32 buffers in 3-plane YUV420 */
+#define MAX_CANVAS (32 * 3)
+
+struct amvdec_buffer {
+ struct list_head list;
+ struct vb2_buffer *vb;
+};
+
+/**
+ * struct amvdec_timestamp - stores a src timestamp along with a VIFIFO offset
+ *
+ * @list: used to make lists out of this struct
+ * @ts: timestamp
+ * @offset: offset in the VIFIFO where the associated packet was written
+ */
+struct amvdec_timestamp {
+ struct list_head list;
+ u64 ts;
+ u32 offset;
+};
+
+struct amvdec_session;
+
+/**
+ * struct amvdec_core - device parameters, singleton
+ *
+ * @dos_base: DOS memory base address
+ * @esparser_base: PARSER memory base address
+ * @regmap_ao: regmap for the AO bus
+ * @dev: core device
+ * @dev_dec: decoder device
+ * @platform: platform-specific data
+ * @canvas: canvas provider reference
+ * @dos_parser_clk: DOS_PARSER clock
+ * @dos_clk: DOS clock
+ * @vdec_1_clk: VDEC_1 clock
+ * @vdec_hevc_clk: VDEC_HEVC clock
+ * @esparser_reset: RESET for the PARSER
+ * @vdec_dec: video device for the decoder
+ * @v4l2_dev: v4l2 device
+ * @cur_sess: current decoding session
+ */
+struct amvdec_core {
+ void __iomem *dos_base;
+ void __iomem *esparser_base;
+ struct regmap *regmap_ao;
+
+ struct device *dev;
+ struct device *dev_dec;
+ const struct vdec_platform *platform;
+
+ struct meson_canvas *canvas;
+
+ struct clk *dos_parser_clk;
+ struct clk *dos_clk;
+ struct clk *vdec_1_clk;
+ struct clk *vdec_hevc_clk;
+
+ struct reset_control *esparser_reset;
+
+ struct video_device *vdev_dec;
+ struct v4l2_device v4l2_dev;
+
+ struct amvdec_session *cur_sess;
+ struct mutex lock; /* video device lock */
+};
+
+/**
+ * struct amvdec_ops - vdec operations
+ *
+ * @start: mandatory call when the vdec needs to initialize
+ * @stop: mandatory call when the vdec needs to stop
+ * @conf_esparser: mandatory call to let the vdec configure the ESPARSER
+ * @vififo_level: mandatory call to get the current amount of data
+ * in the VIFIFO
+ * @use_offsets: mandatory call. Returns 1 if the VDEC supports vififo offsets
+ */
+struct amvdec_ops {
+ int (*start)(struct amvdec_session *sess);
+ int (*stop)(struct amvdec_session *sess);
+ void (*conf_esparser)(struct amvdec_session *sess);
+ u32 (*vififo_level)(struct amvdec_session *sess);
+};
+
+/**
+ * struct amvdec_codec_ops - codec operations
+ *
+ * @start: mandatory call when the codec needs to initialize
+ * @stop: mandatory call when the codec needs to stop
+ * @load_extended_firmware: optional call to load additional firmware bits
+ * @num_pending_bufs: optional call to get the number of dst buffers on hold
+ * @can_recycle: optional call to know if the codec is ready to recycle
+ * a dst buffer
+ * @recycle: optional call to tell the codec to recycle a dst buffer. Must go
+ * in pair with @can_recycle
+ * @drain: optional call if the codec has a custom way of draining
+ * @eos_sequence: optional call to get an end sequence to send to esparser
+ * for flush. Mutually exclusive with @drain.
+ * @isr: mandatory call when the ISR triggers
+ * @threaded_isr: mandatory call for the threaded ISR
+ */
+struct amvdec_codec_ops {
+ int (*start)(struct amvdec_session *sess);
+ int (*stop)(struct amvdec_session *sess);
+ int (*load_extended_firmware)(struct amvdec_session *sess,
+ const u8 *data, u32 len);
+ u32 (*num_pending_bufs)(struct amvdec_session *sess);
+ int (*can_recycle)(struct amvdec_core *core);
+ void (*recycle)(struct amvdec_core *core, u32 buf_idx);
+ void (*drain)(struct amvdec_session *sess);
+ void (*resume)(struct amvdec_session *sess);
+ const u8 * (*eos_sequence)(u32 *len);
+ irqreturn_t (*isr)(struct amvdec_session *sess);
+ irqreturn_t (*threaded_isr)(struct amvdec_session *sess);
+};
+
+/**
+ * struct amvdec_format - describes one of the OUTPUT (src) format supported
+ *
+ * @pixfmt: V4L2 pixel format
+ * @min_buffers: minimum amount of CAPTURE (dst) buffers
+ * @max_buffers: maximum amount of CAPTURE (dst) buffers
+ * @max_width: maximum picture width supported
+ * @max_height: maximum picture height supported
+ * @flags: enum flags associated with this pixfmt
+ * @vdec_ops: the VDEC operations that support this format
+ * @codec_ops: the codec operations that support this format
+ * @firmware_path: Path to the firmware that supports this format
+ * @pixfmts_cap: list of CAPTURE pixel formats available with pixfmt
+ */
+struct amvdec_format {
+ u32 pixfmt;
+ u32 min_buffers;
+ u32 max_buffers;
+ u32 max_width;
+ u32 max_height;
+ u32 flags;
+
+ struct amvdec_ops *vdec_ops;
+ struct amvdec_codec_ops *codec_ops;
+
+ char *firmware_path;
+ u32 pixfmts_cap[4];
+};
+
+enum amvdec_status {
+ STATUS_STOPPED,
+ STATUS_RUNNING,
+ STATUS_NEEDS_RESUME,
+};
+
+/**
+ * struct amvdec_session - decoding session parameters
+ *
+ * @core: reference to the vdec core struct
+ * @fh: v4l2 file handle
+ * @m2m_dev: v4l2 m2m device
+ * @m2m_ctx: v4l2 m2m context
+ * @ctrl_handler: V4L2 control handler
+ * @ctrl_min_buf_capture: V4L2 control V4L2_CID_MIN_BUFFERS_FOR_CAPTURE
+ * @fmt_out: vdec pixel format for the OUTPUT queue
+ * @pixfmt_cap: V4L2 pixel format for the CAPTURE queue
+ * @width: current picture width
+ * @height: current picture height
+ * @colorspace: current colorspace
+ * @ycbcr_enc: current ycbcr_enc
+ * @quantization: current quantization
+ * @xfer_func: current transfer function
+ * @pixelaspect: Pixel Aspect Ratio reported by the decoder
+ * @esparser_queued_bufs: number of buffers currently queued into ESPARSER
+ * @esparser_queue_work: work struct for the ESPARSER to process src buffers
+ * @streamon_cap: stream on flag for capture queue
+ * @streamon_out: stream on flag for output queue
+ * @sequence_cap: capture sequence counter
+ * @should_stop: flag set if userspace signaled EOS via command
+ * or empty buffer
+ * @keyframe_found: flag set once a keyframe has been parsed
+ * @canvas_alloc: array of all the canvas IDs allocated
+ * @canvas_num: number of canvas IDs allocated
+ * @vififo_vaddr: virtual address for the VIFIFO
+ * @vififo_paddr: physical address for the VIFIFO
+ * @vififo_size: size of the VIFIFO dma alloc
+ * @bufs_recycle: list of buffers that need to be recycled
+ * @bufs_recycle_lock: lock for the bufs_recycle list
+ * @recycle_thread: task struct for the recycling thread
+ * @timestamps: chronological list of src timestamps
+ * @ts_spinlock: spinlock for the timestamps list
+ * @last_irq_jiffies: tracks last time the vdec triggered an IRQ
+ * @status: current decoding status
+ * @priv: codec private data
+ */
+struct amvdec_session {
+ struct amvdec_core *core;
+
+ struct v4l2_fh fh;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrl_min_buf_capture;
+ struct mutex lock; /* cap & out queues lock */
+
+ const struct amvdec_format *fmt_out;
+ u32 pixfmt_cap;
+
+ u32 width;
+ u32 height;
+ u32 colorspace;
+ u8 ycbcr_enc;
+ u8 quantization;
+ u8 xfer_func;
+
+ struct v4l2_fract pixelaspect;
+
+ atomic_t esparser_queued_bufs;
+ struct work_struct esparser_queue_work;
+
+ unsigned int streamon_cap, streamon_out;
+ unsigned int sequence_cap;
+ unsigned int should_stop;
+ unsigned int keyframe_found;
+ unsigned int num_dst_bufs;
+
+ u8 canvas_alloc[MAX_CANVAS];
+ u32 canvas_num;
+
+ void *vififo_vaddr;
+ dma_addr_t vififo_paddr;
+ u32 vififo_size;
+
+ struct list_head bufs_recycle;
+ struct mutex bufs_recycle_lock; /* bufs_recycle list lock */
+ struct task_struct *recycle_thread;
+
+ struct list_head timestamps;
+ spinlock_t ts_spinlock; /* timestamp list lock */
+
+ u64 last_irq_jiffies;
+ u32 last_offset;
+ u32 wrap_count;
+ u32 fw_idx_to_vb2_idx[32];
+
+ enum amvdec_status status;
+ void *priv;
+};
+
+u32 amvdec_get_output_size(struct amvdec_session *sess);
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/vdec_1.c b/drivers/staging/media/meson/vdec/vdec_1.c
new file mode 100644
index 000000000000..3a15c6fc0567
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_1.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ *
+ * VDEC_1 is a video decoding block that allows decoding of
+ * MPEG 1/2/4, H.263, H.264, MJPEG, VC1
+ */
+
+#include <linux/firmware.h>
+#include <linux/clk.h>
+
+#include "vdec_1.h"
+#include "vdec_helpers.h"
+#include "dos_regs.h"
+
+/* AO Registers */
+#define AO_RTI_GEN_PWR_SLEEP0 0xe8
+#define AO_RTI_GEN_PWR_ISO0 0xec
+ #define GEN_PWR_VDEC_1 (BIT(3) | BIT(2))
+
+#define MC_SIZE (4096 * 4)
+
+static int
+vdec_1_load_firmware(struct amvdec_session *sess, const char *fwname)
+{
+ const struct firmware *fw;
+ struct amvdec_core *core = sess->core;
+ struct device *dev = core->dev_dec;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ static void *mc_addr;
+ static dma_addr_t mc_addr_map;
+ int ret;
+ u32 i = 1000;
+
+ ret = request_firmware(&fw, fwname, dev);
+ if (ret < 0)
+ return -EINVAL;
+
+ if (fw->size < MC_SIZE) {
+ dev_err(dev, "Firmware size %zu is too small. Expected %u.\n",
+ fw->size, MC_SIZE);
+ ret = -EINVAL;
+ goto release_firmware;
+ }
+
+ mc_addr = dma_alloc_coherent(core->dev, MC_SIZE,
+ &mc_addr_map, GFP_KERNEL);
+ if (!mc_addr) {
+ ret = -ENOMEM;
+ goto release_firmware;
+ }
+
+ memcpy(mc_addr, fw->data, MC_SIZE);
+
+ amvdec_write_dos(core, MPSR, 0);
+ amvdec_write_dos(core, CPSR, 0);
+
+ amvdec_clear_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(31));
+
+ amvdec_write_dos(core, IMEM_DMA_ADR, mc_addr_map);
+ amvdec_write_dos(core, IMEM_DMA_COUNT, MC_SIZE / 4);
+ amvdec_write_dos(core, IMEM_DMA_CTRL, (0x8000 | (7 << 16)));
+
+ while (--i && amvdec_read_dos(core, IMEM_DMA_CTRL) & 0x8000);
+
+ if (i == 0) {
+ dev_err(dev, "Firmware load fail (DMA hang?)\n");
+ ret = -EINVAL;
+ goto free_mc;
+ }
+
+ if (codec_ops->load_extended_firmware)
+ ret = codec_ops->load_extended_firmware(sess,
+ fw->data + MC_SIZE,
+ fw->size - MC_SIZE);
+
+free_mc:
+ dma_free_coherent(core->dev, MC_SIZE, mc_addr, mc_addr_map);
+release_firmware:
+ release_firmware(fw);
+ return ret;
+}
+
+static int vdec_1_stbuf_power_up(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_CONTROL, 0);
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_WRAP_COUNT, 0);
+ amvdec_write_dos(core, POWER_CTL_VLD, BIT(4));
+
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_START_PTR, sess->vififo_paddr);
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_CURR_PTR, sess->vififo_paddr);
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_END_PTR,
+ sess->vififo_paddr + sess->vififo_size - 8);
+
+ amvdec_write_dos_bits(core, VLD_MEM_VIFIFO_CONTROL, 1);
+ amvdec_clear_dos_bits(core, VLD_MEM_VIFIFO_CONTROL, 1);
+
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_BUF_CNTL, MEM_BUFCTRL_MANUAL);
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_WP, sess->vififo_paddr);
+
+ amvdec_write_dos_bits(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
+ amvdec_clear_dos_bits(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
+
+ amvdec_write_dos_bits(core, VLD_MEM_VIFIFO_CONTROL,
+ (0x11 << MEM_FIFO_CNT_BIT) | MEM_FILL_ON_LEVEL |
+ MEM_CTRL_FILL_EN | MEM_CTRL_EMPTY_EN);
+
+ return 0;
+}
+
+static void vdec_1_conf_esparser(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ /* VDEC_1 specific ESPARSER stuff */
+ amvdec_write_dos(core, DOS_GEN_CTRL0, 0);
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
+ amvdec_clear_dos_bits(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
+}
+
+static u32 vdec_1_vififo_level(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ return amvdec_read_dos(core, VLD_MEM_VIFIFO_LEVEL);
+}
+
+static int vdec_1_stop(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ amvdec_write_dos(core, MPSR, 0);
+ amvdec_write_dos(core, CPSR, 0);
+ amvdec_write_dos(core, ASSIST_MBOX1_MASK, 0);
+
+ amvdec_write_dos(core, DOS_SW_RESET0, BIT(12) | BIT(11));
+ amvdec_write_dos(core, DOS_SW_RESET0, 0);
+ amvdec_read_dos(core, DOS_SW_RESET0);
+
+ /* enable vdec1 isolation */
+ regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0xc0);
+ /* power off vdec1 memories */
+ amvdec_write_dos(core, DOS_MEM_PD_VDEC, 0xffffffff);
+ /* power off vdec1 */
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_1, GEN_PWR_VDEC_1);
+
+ clk_disable_unprepare(core->vdec_1_clk);
+
+ if (sess->priv)
+ codec_ops->stop(sess);
+
+ return 0;
+}
+
+static int vdec_1_start(struct amvdec_session *sess)
+{
+ int ret;
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ /* Configure the vdec clk to the maximum available */
+ clk_set_rate(core->vdec_1_clk, 666666666);
+ ret = clk_prepare_enable(core->vdec_1_clk);
+ if (ret)
+ return ret;
+
+ /* Enable power for VDEC_1 */
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_1, 0);
+ usleep_range(10, 20);
+
+ /* Reset VDEC1 */
+ amvdec_write_dos(core, DOS_SW_RESET0, 0xfffffffc);
+ amvdec_write_dos(core, DOS_SW_RESET0, 0x00000000);
+
+ amvdec_write_dos(core, DOS_GCLK_EN0, 0x3ff);
+
+ /* enable VDEC Memories */
+ amvdec_write_dos(core, DOS_MEM_PD_VDEC, 0);
+ /* Remove VDEC1 Isolation */
+ regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0);
+ /* Reset DOS top registers */
+ amvdec_write_dos(core, DOS_VDEC_MCRCC_STALL_CTRL, 0);
+
+ amvdec_write_dos(core, GCLK_EN, 0x3ff);
+ amvdec_clear_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(31));
+
+ vdec_1_stbuf_power_up(sess);
+
+ ret = vdec_1_load_firmware(sess, sess->fmt_out->firmware_path);
+ if (ret)
+ goto stop;
+
+ ret = codec_ops->start(sess);
+ if (ret)
+ goto stop;
+
+ /* Enable IRQ */
+ amvdec_write_dos(core, ASSIST_MBOX1_CLR_REG, 1);
+ amvdec_write_dos(core, ASSIST_MBOX1_MASK, 1);
+
+ /* Enable 2-plane output */
+ if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M)
+ amvdec_write_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(17));
+ else
+ amvdec_clear_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(17));
+
+ /* Enable firmware processor */
+ amvdec_write_dos(core, MPSR, 1);
+ /* Let the firmware settle */
+ usleep_range(10, 20);
+
+ return 0;
+
+stop:
+ vdec_1_stop(sess);
+ return ret;
+}
+
+struct amvdec_ops vdec_1_ops = {
+ .start = vdec_1_start,
+ .stop = vdec_1_stop,
+ .conf_esparser = vdec_1_conf_esparser,
+ .vififo_level = vdec_1_vififo_level,
+};
diff --git a/drivers/staging/media/meson/vdec/vdec_1.h b/drivers/staging/media/meson/vdec/vdec_1.h
new file mode 100644
index 000000000000..042d930c40d7
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_1.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_VDEC_1_H_
+#define __MESON_VDEC_VDEC_1_H_
+
+#include "vdec.h"
+
+extern struct amvdec_ops vdec_1_ops;
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.c b/drivers/staging/media/meson/vdec/vdec_helpers.c
new file mode 100644
index 000000000000..f16948bdbf2f
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include <linux/gcd.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "vdec_helpers.h"
+
+#define NUM_CANVAS_NV12 2
+#define NUM_CANVAS_YUV420 3
+
+u32 amvdec_read_dos(struct amvdec_core *core, u32 reg)
+{
+ return readl_relaxed(core->dos_base + reg);
+}
+EXPORT_SYMBOL_GPL(amvdec_read_dos);
+
+void amvdec_write_dos(struct amvdec_core *core, u32 reg, u32 val)
+{
+ writel_relaxed(val, core->dos_base + reg);
+}
+EXPORT_SYMBOL_GPL(amvdec_write_dos);
+
+void amvdec_write_dos_bits(struct amvdec_core *core, u32 reg, u32 val)
+{
+ amvdec_write_dos(core, reg, amvdec_read_dos(core, reg) | val);
+}
+EXPORT_SYMBOL_GPL(amvdec_write_dos_bits);
+
+void amvdec_clear_dos_bits(struct amvdec_core *core, u32 reg, u32 val)
+{
+ amvdec_write_dos(core, reg, amvdec_read_dos(core, reg) & ~val);
+}
+EXPORT_SYMBOL_GPL(amvdec_clear_dos_bits);
+
+u32 amvdec_read_parser(struct amvdec_core *core, u32 reg)
+{
+ return readl_relaxed(core->esparser_base + reg);
+}
+EXPORT_SYMBOL_GPL(amvdec_read_parser);
+
+void amvdec_write_parser(struct amvdec_core *core, u32 reg, u32 val)
+{
+ writel_relaxed(val, core->esparser_base + reg);
+}
+EXPORT_SYMBOL_GPL(amvdec_write_parser);
+
+static int canvas_alloc(struct amvdec_session *sess, u8 *canvas_id)
+{
+ int ret;
+
+ if (sess->canvas_num >= MAX_CANVAS) {
+ dev_err(sess->core->dev, "Reached max number of canvas\n");
+ return -ENOMEM;
+ }
+
+ ret = meson_canvas_alloc(sess->core->canvas, canvas_id);
+ if (ret)
+ return ret;
+
+ sess->canvas_alloc[sess->canvas_num++] = *canvas_id;
+ return 0;
+}
+
+static int set_canvas_yuv420m(struct amvdec_session *sess,
+ struct vb2_buffer *vb, u32 width,
+ u32 height, u32 reg)
+{
+ struct amvdec_core *core = sess->core;
+ u8 canvas_id[NUM_CANVAS_YUV420]; /* Y U V */
+ dma_addr_t buf_paddr[NUM_CANVAS_YUV420]; /* Y U V */
+ int ret, i;
+
+ for (i = 0; i < NUM_CANVAS_YUV420; ++i) {
+ ret = canvas_alloc(sess, &canvas_id[i]);
+ if (ret)
+ return ret;
+
+ buf_paddr[i] =
+ vb2_dma_contig_plane_dma_addr(vb, i);
+ }
+
+ /* Y plane */
+ meson_canvas_config(core->canvas, canvas_id[0], buf_paddr[0],
+ width, height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+
+ /* U plane */
+ meson_canvas_config(core->canvas, canvas_id[1], buf_paddr[1],
+ width / 2, height / 2, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+
+ /* V plane */
+ meson_canvas_config(core->canvas, canvas_id[2], buf_paddr[2],
+ width / 2, height / 2, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+
+ amvdec_write_dos(core, reg,
+ ((canvas_id[2]) << 16) |
+ ((canvas_id[1]) << 8) |
+ (canvas_id[0]));
+
+ return 0;
+}
+
+static int set_canvas_nv12m(struct amvdec_session *sess,
+ struct vb2_buffer *vb, u32 width,
+ u32 height, u32 reg)
+{
+ struct amvdec_core *core = sess->core;
+ u8 canvas_id[NUM_CANVAS_NV12]; /* Y U/V */
+ dma_addr_t buf_paddr[NUM_CANVAS_NV12]; /* Y U/V */
+ int ret, i;
+
+ for (i = 0; i < NUM_CANVAS_NV12; ++i) {
+ ret = canvas_alloc(sess, &canvas_id[i]);
+ if (ret)
+ return ret;
+
+ buf_paddr[i] =
+ vb2_dma_contig_plane_dma_addr(vb, i);
+ }
+
+ /* Y plane */
+ meson_canvas_config(core->canvas, canvas_id[0], buf_paddr[0],
+ width, height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+
+ /* U/V plane */
+ meson_canvas_config(core->canvas, canvas_id[1], buf_paddr[1],
+ width, height / 2, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+
+ amvdec_write_dos(core, reg,
+ ((canvas_id[1]) << 16) |
+ ((canvas_id[1]) << 8) |
+ (canvas_id[0]));
+
+ return 0;
+}
+
+int amvdec_set_canvases(struct amvdec_session *sess,
+ u32 reg_base[], u32 reg_num[])
+{
+ struct v4l2_m2m_buffer *buf;
+ u32 pixfmt = sess->pixfmt_cap;
+ u32 width = ALIGN(sess->width, 64);
+ u32 height = ALIGN(sess->height, 64);
+ u32 reg_cur = reg_base[0];
+ u32 reg_num_cur = 0;
+ u32 reg_base_cur = 0;
+ int i = 0;
+ int ret;
+
+ v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
+ if (!reg_base[reg_base_cur])
+ return -EINVAL;
+
+ reg_cur = reg_base[reg_base_cur] + reg_num_cur * 4;
+
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_NV12M:
+ ret = set_canvas_nv12m(sess, &buf->vb.vb2_buf, width,
+ height, reg_cur);
+ if (ret)
+ return ret;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ ret = set_canvas_yuv420m(sess, &buf->vb.vb2_buf, width,
+ height, reg_cur);
+ if (ret)
+ return ret;
+ break;
+ default:
+ dev_err(sess->core->dev, "Unsupported pixfmt %08X\n",
+ pixfmt);
+ return -EINVAL;
+ }
+
+ reg_num_cur++;
+ if (reg_num_cur >= reg_num[reg_base_cur]) {
+ reg_base_cur++;
+ reg_num_cur = 0;
+ }
+
+ sess->fw_idx_to_vb2_idx[i++] = buf->vb.vb2_buf.index;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(amvdec_set_canvases);
+
+void amvdec_add_ts_reorder(struct amvdec_session *sess, u64 ts, u32 offset)
+{
+ struct amvdec_timestamp *new_ts, *tmp;
+ unsigned long flags;
+
+ new_ts = kmalloc(sizeof(*new_ts), GFP_KERNEL);
+ new_ts->ts = ts;
+ new_ts->offset = offset;
+
+ spin_lock_irqsave(&sess->ts_spinlock, flags);
+
+ if (list_empty(&sess->timestamps))
+ goto add_tail;
+
+ list_for_each_entry(tmp, &sess->timestamps, list) {
+ if (ts <= tmp->ts) {
+ list_add_tail(&new_ts->list, &tmp->list);
+ goto unlock;
+ }
+ }
+
+add_tail:
+ list_add_tail(&new_ts->list, &sess->timestamps);
+unlock:
+ spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+}
+EXPORT_SYMBOL_GPL(amvdec_add_ts_reorder);
+
+void amvdec_remove_ts(struct amvdec_session *sess, u64 ts)
+{
+ struct amvdec_timestamp *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sess->ts_spinlock, flags);
+ list_for_each_entry(tmp, &sess->timestamps, list) {
+ if (tmp->ts == ts) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ goto unlock;
+ }
+ }
+ dev_warn(sess->core->dev_dec,
+ "Couldn't remove buffer with timestamp %llu from list\n", ts);
+
+unlock:
+ spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+}
+EXPORT_SYMBOL_GPL(amvdec_remove_ts);
+
+static void dst_buf_done(struct amvdec_session *sess,
+ struct vb2_v4l2_buffer *vbuf,
+ u32 field,
+ u64 timestamp)
+{
+ struct device *dev = sess->core->dev_dec;
+ u32 output_size = amvdec_get_output_size(sess);
+
+ switch (sess->pixfmt_cap) {
+ case V4L2_PIX_FMT_NV12M:
+ vbuf->vb2_buf.planes[0].bytesused = output_size;
+ vbuf->vb2_buf.planes[1].bytesused = output_size / 2;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ vbuf->vb2_buf.planes[0].bytesused = output_size;
+ vbuf->vb2_buf.planes[1].bytesused = output_size / 4;
+ vbuf->vb2_buf.planes[2].bytesused = output_size / 4;
+ break;
+ }
+
+ vbuf->vb2_buf.timestamp = timestamp;
+ vbuf->sequence = sess->sequence_cap++;
+
+ if (sess->should_stop &&
+ atomic_read(&sess->esparser_queued_bufs) <= 2) {
+ const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
+
+ dev_dbg(dev, "Signaling EOS\n");
+ v4l2_event_queue_fh(&sess->fh, &ev);
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ } else if (sess->should_stop)
+ dev_dbg(dev, "should_stop, %u bufs remain\n",
+ atomic_read(&sess->esparser_queued_bufs));
+
+ dev_dbg(dev, "Buffer %u done\n", vbuf->vb2_buf.index);
+ vbuf->field = field;
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+
+ /* Buffer done probably means the vififo got freed */
+ schedule_work(&sess->esparser_queue_work);
+}
+
+void amvdec_dst_buf_done(struct amvdec_session *sess,
+ struct vb2_v4l2_buffer *vbuf, u32 field)
+{
+ struct device *dev = sess->core->dev_dec;
+ struct amvdec_timestamp *tmp;
+ struct list_head *timestamps = &sess->timestamps;
+ u64 timestamp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sess->ts_spinlock, flags);
+ if (list_empty(timestamps)) {
+ dev_err(dev, "Buffer %u done but list is empty\n",
+ vbuf->vb2_buf.index);
+
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+ return;
+ }
+
+ tmp = list_first_entry(timestamps, struct amvdec_timestamp, list);
+ timestamp = tmp->ts;
+ list_del(&tmp->list);
+ kfree(tmp);
+ spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+
+ dst_buf_done(sess, vbuf, field, timestamp);
+ atomic_dec(&sess->esparser_queued_bufs);
+}
+EXPORT_SYMBOL_GPL(amvdec_dst_buf_done);
+
+void amvdec_dst_buf_done_offset(struct amvdec_session *sess,
+ struct vb2_v4l2_buffer *vbuf,
+ u32 offset, u32 field, bool allow_drop)
+{
+ struct device *dev = sess->core->dev_dec;
+ struct amvdec_timestamp *match = NULL;
+ struct amvdec_timestamp *tmp, *n;
+ u64 timestamp = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sess->ts_spinlock, flags);
+
+ /* Look for our vififo offset to get the corresponding timestamp. */
+ list_for_each_entry_safe(tmp, n, &sess->timestamps, list) {
+ s64 delta = (s64)offset - tmp->offset;
+
+ /* Offsets reported by codecs usually differ slightly,
+ * so we need some wiggle room.
+ * 4KiB being the minimum packet size, there is no risk here.
+ */
+ if (delta > (-1 * (s32)SZ_4K) && delta < SZ_4K) {
+ match = tmp;
+ break;
+ }
+
+ if (!allow_drop)
+ continue;
+
+ /* Delete any timestamp entry that appears before our target
+ * (not all src packets/timestamps lead to a frame)
+ */
+ if (delta > 0 || delta < -1 * (s32)sess->vififo_size) {
+ atomic_dec(&sess->esparser_queued_bufs);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ }
+
+ if (!match) {
+ dev_dbg(dev, "Buffer %u done but can't match offset (%08X)\n",
+ vbuf->vb2_buf.index, offset);
+ } else {
+ timestamp = match->ts;
+ list_del(&match->list);
+ kfree(match);
+ }
+ spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+
+ dst_buf_done(sess, vbuf, field, timestamp);
+ if (match)
+ atomic_dec(&sess->esparser_queued_bufs);
+}
+EXPORT_SYMBOL_GPL(amvdec_dst_buf_done_offset);
+
+void amvdec_dst_buf_done_idx(struct amvdec_session *sess,
+ u32 buf_idx, u32 offset, u32 field)
+{
+ struct vb2_v4l2_buffer *vbuf;
+ struct device *dev = sess->core->dev_dec;
+
+ vbuf = v4l2_m2m_dst_buf_remove_by_idx(sess->m2m_ctx,
+ sess->fw_idx_to_vb2_idx[buf_idx]);
+
+ if (!vbuf) {
+ dev_err(dev,
+ "Buffer %u done but it doesn't exist in m2m_ctx\n",
+ buf_idx);
+ return;
+ }
+
+ if (offset != -1)
+ amvdec_dst_buf_done_offset(sess, vbuf, offset, field, true);
+ else
+ amvdec_dst_buf_done(sess, vbuf, field);
+}
+EXPORT_SYMBOL_GPL(amvdec_dst_buf_done_idx);
+
+void amvdec_set_par_from_dar(struct amvdec_session *sess,
+ u32 dar_num, u32 dar_den)
+{
+ u32 div;
+
+ sess->pixelaspect.numerator = sess->height * dar_num;
+ sess->pixelaspect.denominator = sess->width * dar_den;
+ div = gcd(sess->pixelaspect.numerator, sess->pixelaspect.denominator);
+ sess->pixelaspect.numerator /= div;
+ sess->pixelaspect.denominator /= div;
+}
+EXPORT_SYMBOL_GPL(amvdec_set_par_from_dar);
+
+void amvdec_src_change(struct amvdec_session *sess, u32 width,
+ u32 height, u32 dpb_size)
+{
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION };
+
+ v4l2_ctrl_s_ctrl(sess->ctrl_min_buf_capture, dpb_size);
+
+ /* Check if the capture queue is already configured well for our
+ * usecase. If so, keep decoding with it and do not send the event
+ */
+ if (sess->width == width &&
+ sess->height == height &&
+ dpb_size <= sess->num_dst_bufs) {
+ sess->fmt_out->codec_ops->resume(sess);
+ return;
+ }
+
+ sess->width = width;
+ sess->height = height;
+ sess->status = STATUS_NEEDS_RESUME;
+
+ dev_dbg(sess->core->dev, "Res. changed (%ux%u), DPB size %u\n",
+ width, height, dpb_size);
+ v4l2_event_queue_fh(&sess->fh, &ev);
+}
+EXPORT_SYMBOL_GPL(amvdec_src_change);
+
+void amvdec_abort(struct amvdec_session *sess)
+{
+ dev_info(sess->core->dev, "Aborting decoding session!\n");
+ vb2_queue_error(&sess->m2m_ctx->cap_q_ctx.q);
+ vb2_queue_error(&sess->m2m_ctx->out_q_ctx.q);
+}
+EXPORT_SYMBOL_GPL(amvdec_abort);
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.h b/drivers/staging/media/meson/vdec/vdec_helpers.h
new file mode 100644
index 000000000000..a455a9ee1cc2
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_HELPERS_H_
+#define __MESON_VDEC_HELPERS_H_
+
+#include "vdec.h"
+
+/**
+ * amvdec_set_canvases() - Map VB2 buffers to canvases
+ *
+ * @sess: current session
+ * @reg_base: Registry bases of where to write the canvas indexes
+ * @reg_num: number of contiguous registers after each reg_base (including it)
+ */
+int amvdec_set_canvases(struct amvdec_session *sess,
+ u32 reg_base[], u32 reg_num[]);
+
+/* Helpers to read/write to the various IPs (DOS, PARSER) */
+u32 amvdec_read_dos(struct amvdec_core *core, u32 reg);
+void amvdec_write_dos(struct amvdec_core *core, u32 reg, u32 val);
+void amvdec_write_dos_bits(struct amvdec_core *core, u32 reg, u32 val);
+void amvdec_clear_dos_bits(struct amvdec_core *core, u32 reg, u32 val);
+u32 amvdec_read_parser(struct amvdec_core *core, u32 reg);
+void amvdec_write_parser(struct amvdec_core *core, u32 reg, u32 val);
+
+/**
+ * amvdec_dst_buf_done_idx() - Signal that a buffer is done decoding
+ *
+ * @sess: current session
+ * @buf_idx: hardware buffer index
+ * @offset: VIFIFO bitstream offset corresponding to the buffer
+ * @field: V4L2 interlaced field
+ */
+void amvdec_dst_buf_done_idx(struct amvdec_session *sess, u32 buf_idx,
+ u32 offset, u32 field);
+void amvdec_dst_buf_done(struct amvdec_session *sess,
+ struct vb2_v4l2_buffer *vbuf, u32 field);
+void amvdec_dst_buf_done_offset(struct amvdec_session *sess,
+ struct vb2_v4l2_buffer *vbuf,
+ u32 offset, u32 field, bool allow_drop);
+
+/**
+ * amvdec_add_ts_reorder() - Add a timestamp to the list in chronological order
+ *
+ * @sess: current session
+ * @ts: timestamp to add
+ * @offset: offset in the VIFIFO where the associated packet was written
+ */
+void amvdec_add_ts_reorder(struct amvdec_session *sess, u64 ts, u32 offset);
+void amvdec_remove_ts(struct amvdec_session *sess, u64 ts);
+
+/**
+ * amvdec_set_par_from_dar() - Set Pixel Aspect Ratio from Display Aspect Ratio
+ *
+ * @sess: current session
+ * @dar_num: numerator of the DAR
+ * @dar_den: denominator of the DAR
+ */
+void amvdec_set_par_from_dar(struct amvdec_session *sess,
+ u32 dar_num, u32 dar_den);
+
+/**
+ * amvdec_src_change() - Notify new resolution/DPB size to the core
+ *
+ * @sess: current session
+ * @width: picture width detected by the hardware
+ * @height: picture height detected by the hardware
+ * @dpb_size: Decoded Picture Buffer size (= amount of buffers for decoding)
+ */
+void amvdec_src_change(struct amvdec_session *sess, u32 width,
+ u32 height, u32 dpb_size);
+
+/**
+ * amvdec_abort() - Abort the current decoding session
+ *
+ * @sess: current session
+ */
+void amvdec_abort(struct amvdec_session *sess);
+#endif
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.c b/drivers/staging/media/meson/vdec/vdec_platform.c
new file mode 100644
index 000000000000..824dbc7f46f5
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_platform.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include "vdec_platform.h"
+#include "vdec.h"
+
+#include "vdec_1.h"
+#include "codec_mpeg12.h"
+
+static const struct amvdec_format vdec_formats_gxbb[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ },
+};
+
+static const struct amvdec_format vdec_formats_gxl[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ },
+};
+
+static const struct amvdec_format vdec_formats_gxm[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ },
+};
+
+const struct vdec_platform vdec_platform_gxbb = {
+ .formats = vdec_formats_gxbb,
+ .num_formats = ARRAY_SIZE(vdec_formats_gxbb),
+ .revision = VDEC_REVISION_GXBB,
+};
+
+const struct vdec_platform vdec_platform_gxl = {
+ .formats = vdec_formats_gxl,
+ .num_formats = ARRAY_SIZE(vdec_formats_gxl),
+ .revision = VDEC_REVISION_GXL,
+};
+
+const struct vdec_platform vdec_platform_gxm = {
+ .formats = vdec_formats_gxm,
+ .num_formats = ARRAY_SIZE(vdec_formats_gxm),
+ .revision = VDEC_REVISION_GXM,
+};
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.h b/drivers/staging/media/meson/vdec/vdec_platform.h
new file mode 100644
index 000000000000..f6025326db1d
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_platform.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_PLATFORM_H_
+#define __MESON_VDEC_PLATFORM_H_
+
+#include "vdec.h"
+
+struct amvdec_format;
+
+enum vdec_revision {
+ VDEC_REVISION_GXBB,
+ VDEC_REVISION_GXL,
+ VDEC_REVISION_GXM,
+};
+
+struct vdec_platform {
+ const struct amvdec_format *formats;
+ const u32 num_formats;
+ enum vdec_revision revision;
+};
+
+extern const struct vdec_platform vdec_platform_gxbb;
+extern const struct vdec_platform vdec_platform_gxm;
+extern const struct vdec_platform vdec_platform_gxl;
+
+#endif
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index c2c5a9cd8642..c307707480f7 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -533,12 +533,6 @@ iss_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
strscpy(cap->driver, ISS_VIDEO_DRIVER_NAME, sizeof(cap->driver));
strscpy(cap->card, video->video.name, sizeof(cap->card));
strscpy(cap->bus_info, "media", sizeof(cap->bus_info));
-
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- else
- cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
-
cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
| V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
@@ -1272,6 +1266,11 @@ int omap4iss_video_register(struct iss_video *video, struct v4l2_device *vdev)
int ret;
video->video.v4l2_dev = vdev;
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE;
+ else
+ video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT;
+ video->video.device_caps |= V4L2_CAP_STREAMING;
ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
if (ret < 0)
diff --git a/drivers/staging/media/rockchip/vpu/Kconfig b/drivers/staging/media/rockchip/vpu/Kconfig
deleted file mode 100644
index fc54bbf6753d..000000000000
--- a/drivers/staging/media/rockchip/vpu/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config VIDEO_ROCKCHIP_VPU
- tristate "Rockchip VPU driver"
- depends on ARCH_ROCKCHIP || COMPILE_TEST
- depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
- select VIDEOBUF2_DMA_CONTIG
- select VIDEOBUF2_VMALLOC
- select V4L2_MEM2MEM_DEV
- help
- Support for the Video Processing Unit present on Rockchip SoC,
- which accelerates video and image encoding and decoding.
- To compile this driver as a module, choose M here: the module
- will be called rockchip-vpu.
diff --git a/drivers/staging/media/rockchip/vpu/Makefile b/drivers/staging/media/rockchip/vpu/Makefile
deleted file mode 100644
index ae5d143a0bfa..000000000000
--- a/drivers/staging/media/rockchip/vpu/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_VIDEO_ROCKCHIP_VPU) += rockchip-vpu.o
-
-rockchip-vpu-y += \
- rockchip_vpu_drv.o \
- rockchip_vpu_enc.o \
- rk3288_vpu_hw.o \
- rk3288_vpu_hw_jpeg_enc.o \
- rk3399_vpu_hw.o \
- rk3399_vpu_hw_jpeg_enc.o \
- rockchip_vpu_jpeg.o
diff --git a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw.c b/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw.c
deleted file mode 100644
index a5e9d183fffd..000000000000
--- a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Rockchip VPU codec driver
- *
- * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
- * Jeffy Chen <jeffy.chen@rock-chips.com>
- */
-
-#include <linux/clk.h>
-
-#include "rockchip_vpu.h"
-#include "rockchip_vpu_jpeg.h"
-#include "rk3288_vpu_regs.h"
-
-#define RK3288_ACLK_MAX_FREQ (400 * 1000 * 1000)
-
-/*
- * Supported formats.
- */
-
-static const struct rockchip_vpu_fmt rk3288_vpu_enc_fmts[] = {
- {
- .fourcc = V4L2_PIX_FMT_YUV420M,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUV420P,
- },
- {
- .fourcc = V4L2_PIX_FMT_NV12M,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUV420SP,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUYV,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUYV422,
- },
- {
- .fourcc = V4L2_PIX_FMT_UYVY,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_UYVY422,
- },
- {
- .fourcc = V4L2_PIX_FMT_JPEG,
- .codec_mode = RK_VPU_MODE_JPEG_ENC,
- .max_depth = 2,
- .header_size = JPEG_HEADER_SIZE,
- .frmsize = {
- .min_width = 96,
- .max_width = 8192,
- .step_width = JPEG_MB_DIM,
- .min_height = 32,
- .max_height = 8192,
- .step_height = JPEG_MB_DIM,
- },
- },
-};
-
-static irqreturn_t rk3288_vepu_irq(int irq, void *dev_id)
-{
- struct rockchip_vpu_dev *vpu = dev_id;
- enum vb2_buffer_state state;
- u32 status, bytesused;
-
- status = vepu_read(vpu, VEPU_REG_INTERRUPT);
- bytesused = vepu_read(vpu, VEPU_REG_STR_BUF_LIMIT) / 8;
- state = (status & VEPU_REG_INTERRUPT_FRAME_RDY) ?
- VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
-
- vepu_write(vpu, 0, VEPU_REG_INTERRUPT);
- vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
-
- rockchip_vpu_irq_done(vpu, bytesused, state);
-
- return IRQ_HANDLED;
-}
-
-static int rk3288_vpu_hw_init(struct rockchip_vpu_dev *vpu)
-{
- /* Bump ACLK to max. possible freq. to improve performance. */
- clk_set_rate(vpu->clocks[0].clk, RK3288_ACLK_MAX_FREQ);
- return 0;
-}
-
-static void rk3288_vpu_enc_reset(struct rockchip_vpu_ctx *ctx)
-{
- struct rockchip_vpu_dev *vpu = ctx->dev;
-
- vepu_write(vpu, VEPU_REG_INTERRUPT_DIS_BIT, VEPU_REG_INTERRUPT);
- vepu_write(vpu, 0, VEPU_REG_ENC_CTRL);
- vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
-}
-
-/*
- * Supported codec ops.
- */
-
-static const struct rockchip_vpu_codec_ops rk3288_vpu_codec_ops[] = {
- [RK_VPU_MODE_JPEG_ENC] = {
- .run = rk3288_vpu_jpeg_enc_run,
- .reset = rk3288_vpu_enc_reset,
- },
-};
-
-/*
- * VPU variant.
- */
-
-const struct rockchip_vpu_variant rk3288_vpu_variant = {
- .enc_offset = 0x0,
- .enc_fmts = rk3288_vpu_enc_fmts,
- .num_enc_fmts = ARRAY_SIZE(rk3288_vpu_enc_fmts),
- .codec_ops = rk3288_vpu_codec_ops,
- .codec = RK_VPU_CODEC_JPEG,
- .vepu_irq = rk3288_vepu_irq,
- .init = rk3288_vpu_hw_init,
- .clk_names = {"aclk", "hclk"},
- .num_clocks = 2
-};
diff --git a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c b/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c
deleted file mode 100644
index 06daea66fb49..000000000000
--- a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c
+++ /dev/null
@@ -1,125 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Rockchip VPU codec driver
- *
- * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
- */
-
-#include <asm/unaligned.h>
-#include <media/v4l2-mem2mem.h>
-#include "rockchip_vpu_jpeg.h"
-#include "rockchip_vpu.h"
-#include "rockchip_vpu_common.h"
-#include "rockchip_vpu_hw.h"
-#include "rk3288_vpu_regs.h"
-
-#define VEPU_JPEG_QUANT_TABLE_COUNT 16
-
-static void rk3288_vpu_set_src_img_ctrl(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx)
-{
- struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
- u32 reg;
-
- reg = VEPU_REG_IN_IMG_CTRL_ROW_LEN(pix_fmt->width)
- | VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(0)
- | VEPU_REG_IN_IMG_CTRL_OVRFLB_D4(0)
- | VEPU_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
- vepu_write_relaxed(vpu, reg, VEPU_REG_IN_IMG_CTRL);
-}
-
-static void rk3288_vpu_jpeg_enc_set_buffers(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx,
- struct vb2_buffer *src_buf)
-{
- struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
- dma_addr_t src[3];
-
- WARN_ON(pix_fmt->num_planes > 3);
-
- vepu_write_relaxed(vpu, ctx->bounce_dma_addr,
- VEPU_REG_ADDR_OUTPUT_STREAM);
- vepu_write_relaxed(vpu, ctx->bounce_size,
- VEPU_REG_STR_BUF_LIMIT);
-
- if (pix_fmt->num_planes == 1) {
- src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- /* single plane formats we supported are all interlaced */
- vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
- } else if (pix_fmt->num_planes == 2) {
- src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
- vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
- vepu_write_relaxed(vpu, src[1], VEPU_REG_ADDR_IN_PLANE_1);
- } else {
- src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
- src[2] = vb2_dma_contig_plane_dma_addr(src_buf, 2);
- vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
- vepu_write_relaxed(vpu, src[1], VEPU_REG_ADDR_IN_PLANE_1);
- vepu_write_relaxed(vpu, src[2], VEPU_REG_ADDR_IN_PLANE_2);
- }
-}
-
-static void
-rk3288_vpu_jpeg_enc_set_qtable(struct rockchip_vpu_dev *vpu,
- unsigned char *luma_qtable,
- unsigned char *chroma_qtable)
-{
- u32 reg, i;
-
- for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) {
- reg = get_unaligned_be32(&luma_qtable[i]);
- vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_LUMA_QUAT(i));
-
- reg = get_unaligned_be32(&chroma_qtable[i]);
- vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_CHROMA_QUAT(i));
- }
-}
-
-void rk3288_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
-{
- struct rockchip_vpu_dev *vpu = ctx->dev;
- struct vb2_v4l2_buffer *src_buf, *dst_buf;
- struct rockchip_vpu_jpeg_ctx jpeg_ctx;
- u32 reg;
-
- src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
-
- memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
- jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
- jpeg_ctx.width = ctx->dst_fmt.width;
- jpeg_ctx.height = ctx->dst_fmt.height;
- jpeg_ctx.quality = ctx->jpeg_quality;
- rockchip_vpu_jpeg_header_assemble(&jpeg_ctx);
-
- /* Switch to JPEG encoder mode before writing registers */
- vepu_write_relaxed(vpu, VEPU_REG_ENC_CTRL_ENC_MODE_JPEG,
- VEPU_REG_ENC_CTRL);
-
- rk3288_vpu_set_src_img_ctrl(vpu, ctx);
- rk3288_vpu_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
- rk3288_vpu_jpeg_enc_set_qtable(vpu,
- rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 0),
- rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 1));
-
- reg = VEPU_REG_AXI_CTRL_OUTPUT_SWAP16
- | VEPU_REG_AXI_CTRL_INPUT_SWAP16
- | VEPU_REG_AXI_CTRL_BURST_LEN(16)
- | VEPU_REG_AXI_CTRL_OUTPUT_SWAP32
- | VEPU_REG_AXI_CTRL_INPUT_SWAP32
- | VEPU_REG_AXI_CTRL_OUTPUT_SWAP8
- | VEPU_REG_AXI_CTRL_INPUT_SWAP8;
- /* Make sure that all registers are written at this point. */
- vepu_write(vpu, reg, VEPU_REG_AXI_CTRL);
-
- reg = VEPU_REG_ENC_CTRL_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
- | VEPU_REG_ENC_CTRL_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
- | VEPU_REG_ENC_CTRL_ENC_MODE_JPEG
- | VEPU_REG_ENC_PIC_INTRA
- | VEPU_REG_ENC_CTRL_EN_BIT;
- /* Kick the watchdog and start encoding */
- schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
- vepu_write(vpu, reg, VEPU_REG_ENC_CTRL);
-}
diff --git a/drivers/staging/media/rockchip/vpu/rk3288_vpu_regs.h b/drivers/staging/media/rockchip/vpu/rk3288_vpu_regs.h
deleted file mode 100644
index 9d0b9bdf3297..000000000000
--- a/drivers/staging/media/rockchip/vpu/rk3288_vpu_regs.h
+++ /dev/null
@@ -1,442 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Rockchip VPU codec driver
- *
- * Copyright 2018 Google LLC.
- * Tomasz Figa <tfiga@chromium.org>
- */
-
-#ifndef RK3288_VPU_REGS_H_
-#define RK3288_VPU_REGS_H_
-
-/* Encoder registers. */
-#define VEPU_REG_INTERRUPT 0x004
-#define VEPU_REG_INTERRUPT_FRAME_RDY BIT(2)
-#define VEPU_REG_INTERRUPT_DIS_BIT BIT(1)
-#define VEPU_REG_INTERRUPT_BIT BIT(0)
-#define VEPU_REG_AXI_CTRL 0x008
-#define VEPU_REG_AXI_CTRL_OUTPUT_SWAP16 BIT(15)
-#define VEPU_REG_AXI_CTRL_INPUT_SWAP16 BIT(14)
-#define VEPU_REG_AXI_CTRL_BURST_LEN(x) ((x) << 8)
-#define VEPU_REG_AXI_CTRL_GATE_BIT BIT(4)
-#define VEPU_REG_AXI_CTRL_OUTPUT_SWAP32 BIT(3)
-#define VEPU_REG_AXI_CTRL_INPUT_SWAP32 BIT(2)
-#define VEPU_REG_AXI_CTRL_OUTPUT_SWAP8 BIT(1)
-#define VEPU_REG_AXI_CTRL_INPUT_SWAP8 BIT(0)
-#define VEPU_REG_ADDR_OUTPUT_STREAM 0x014
-#define VEPU_REG_ADDR_OUTPUT_CTRL 0x018
-#define VEPU_REG_ADDR_REF_LUMA 0x01c
-#define VEPU_REG_ADDR_REF_CHROMA 0x020
-#define VEPU_REG_ADDR_REC_LUMA 0x024
-#define VEPU_REG_ADDR_REC_CHROMA 0x028
-#define VEPU_REG_ADDR_IN_PLANE_0 0x02c
-#define VEPU_REG_ADDR_IN_PLANE_1 0x030
-#define VEPU_REG_ADDR_IN_PLANE_2 0x034
-#define VEPU_REG_ENC_CTRL 0x038
-#define VEPU_REG_ENC_CTRL_TIMEOUT_EN BIT(31)
-#define VEPU_REG_ENC_CTRL_NAL_MODE_BIT BIT(29)
-#define VEPU_REG_ENC_CTRL_WIDTH(w) ((w) << 19)
-#define VEPU_REG_ENC_CTRL_HEIGHT(h) ((h) << 10)
-#define VEPU_REG_ENC_PIC_INTER (0x0 << 3)
-#define VEPU_REG_ENC_PIC_INTRA (0x1 << 3)
-#define VEPU_REG_ENC_PIC_MVCINTER (0x2 << 3)
-#define VEPU_REG_ENC_CTRL_ENC_MODE_H264 (0x3 << 1)
-#define VEPU_REG_ENC_CTRL_ENC_MODE_JPEG (0x2 << 1)
-#define VEPU_REG_ENC_CTRL_ENC_MODE_VP8 (0x1 << 1)
-#define VEPU_REG_ENC_CTRL_EN_BIT BIT(0)
-#define VEPU_REG_IN_IMG_CTRL 0x03c
-#define VEPU_REG_IN_IMG_CTRL_ROW_LEN(x) ((x) << 12)
-#define VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(x) ((x) << 10)
-#define VEPU_REG_IN_IMG_CTRL_OVRFLB_D4(x) ((x) << 6)
-#define VEPU_REG_IN_IMG_CTRL_FMT(x) ((x) << 2)
-#define VEPU_REG_ENC_CTRL0 0x040
-#define VEPU_REG_ENC_CTRL0_INIT_QP(x) ((x) << 26)
-#define VEPU_REG_ENC_CTRL0_SLICE_ALPHA(x) ((x) << 22)
-#define VEPU_REG_ENC_CTRL0_SLICE_BETA(x) ((x) << 18)
-#define VEPU_REG_ENC_CTRL0_CHROMA_QP_OFFSET(x) ((x) << 13)
-#define VEPU_REG_ENC_CTRL0_FILTER_DIS(x) ((x) << 5)
-#define VEPU_REG_ENC_CTRL0_IDR_PICID(x) ((x) << 1)
-#define VEPU_REG_ENC_CTRL0_CONSTR_INTRA_PRED BIT(0)
-#define VEPU_REG_ENC_CTRL1 0x044
-#define VEPU_REG_ENC_CTRL1_PPS_ID(x) ((x) << 24)
-#define VEPU_REG_ENC_CTRL1_INTRA_PRED_MODE(x) ((x) << 16)
-#define VEPU_REG_ENC_CTRL1_FRAME_NUM(x) ((x))
-#define VEPU_REG_ENC_CTRL2 0x048
-#define VEPU_REG_ENC_CTRL2_DEBLOCKING_FILETER_MODE(x) ((x) << 30)
-#define VEPU_REG_ENC_CTRL2_H264_SLICE_SIZE(x) ((x) << 23)
-#define VEPU_REG_ENC_CTRL2_DISABLE_QUARTER_PIXMV BIT(22)
-#define VEPU_REG_ENC_CTRL2_TRANS8X8_MODE_EN BIT(21)
-#define VEPU_REG_ENC_CTRL2_CABAC_INIT_IDC(x) ((x) << 19)
-#define VEPU_REG_ENC_CTRL2_ENTROPY_CODING_MODE BIT(18)
-#define VEPU_REG_ENC_CTRL2_H264_INTER4X4_MODE BIT(17)
-#define VEPU_REG_ENC_CTRL2_H264_STREAM_MODE BIT(16)
-#define VEPU_REG_ENC_CTRL2_INTRA16X16_MODE(x) ((x))
-#define VEPU_REG_ENC_CTRL3 0x04c
-#define VEPU_REG_ENC_CTRL3_MUTIMV_EN BIT(30)
-#define VEPU_REG_ENC_CTRL3_MV_PENALTY_1_4P(x) ((x) << 20)
-#define VEPU_REG_ENC_CTRL3_MV_PENALTY_4P(x) ((x) << 10)
-#define VEPU_REG_ENC_CTRL3_MV_PENALTY_1P(x) ((x))
-#define VEPU_REG_ENC_CTRL4 0x050
-#define VEPU_REG_ENC_CTRL4_MV_PENALTY_16X8_8X16(x) ((x) << 20)
-#define VEPU_REG_ENC_CTRL4_MV_PENALTY_8X8(x) ((x) << 10)
-#define VEPU_REG_ENC_CTRL4_8X4_4X8(x) ((x))
-#define VEPU_REG_ENC_CTRL5 0x054
-#define VEPU_REG_ENC_CTRL5_MACROBLOCK_PENALTY(x) ((x) << 24)
-#define VEPU_REG_ENC_CTRL5_COMPLETE_SLICES(x) ((x) << 16)
-#define VEPU_REG_ENC_CTRL5_INTER_MODE(x) ((x))
-#define VEPU_REG_STR_HDR_REM_MSB 0x058
-#define VEPU_REG_STR_HDR_REM_LSB 0x05c
-#define VEPU_REG_STR_BUF_LIMIT 0x060
-#define VEPU_REG_MAD_CTRL 0x064
-#define VEPU_REG_MAD_CTRL_QP_ADJUST(x) ((x) << 28)
-#define VEPU_REG_MAD_CTRL_MAD_THREDHOLD(x) ((x) << 22)
-#define VEPU_REG_MAD_CTRL_QP_SUM_DIV2(x) ((x))
-#define VEPU_REG_ADDR_VP8_PROB_CNT 0x068
-#define VEPU_REG_QP_VAL 0x06c
-#define VEPU_REG_QP_VAL_LUM(x) ((x) << 26)
-#define VEPU_REG_QP_VAL_MAX(x) ((x) << 20)
-#define VEPU_REG_QP_VAL_MIN(x) ((x) << 14)
-#define VEPU_REG_QP_VAL_CHECKPOINT_DISTAN(x) ((x))
-#define VEPU_REG_VP8_QP_VAL(i) (0x06c + ((i) * 0x4))
-#define VEPU_REG_CHECKPOINT(i) (0x070 + ((i) * 0x4))
-#define VEPU_REG_CHECKPOINT_CHECK0(x) (((x) & 0xffff))
-#define VEPU_REG_CHECKPOINT_CHECK1(x) (((x) & 0xffff) << 16)
-#define VEPU_REG_CHECKPOINT_RESULT(x) ((((x) >> (16 - 16 \
- * (i & 1))) & 0xffff) \
- * 32)
-#define VEPU_REG_CHKPT_WORD_ERR(i) (0x084 + ((i) * 0x4))
-#define VEPU_REG_CHKPT_WORD_ERR_CHK0(x) (((x) & 0xffff))
-#define VEPU_REG_CHKPT_WORD_ERR_CHK1(x) (((x) & 0xffff) << 16)
-#define VEPU_REG_VP8_BOOL_ENC 0x08c
-#define VEPU_REG_CHKPT_DELTA_QP 0x090
-#define VEPU_REG_CHKPT_DELTA_QP_CHK0(x) (((x) & 0x0f) << 0)
-#define VEPU_REG_CHKPT_DELTA_QP_CHK1(x) (((x) & 0x0f) << 4)
-#define VEPU_REG_CHKPT_DELTA_QP_CHK2(x) (((x) & 0x0f) << 8)
-#define VEPU_REG_CHKPT_DELTA_QP_CHK3(x) (((x) & 0x0f) << 12)
-#define VEPU_REG_CHKPT_DELTA_QP_CHK4(x) (((x) & 0x0f) << 16)
-#define VEPU_REG_CHKPT_DELTA_QP_CHK5(x) (((x) & 0x0f) << 20)
-#define VEPU_REG_CHKPT_DELTA_QP_CHK6(x) (((x) & 0x0f) << 24)
-#define VEPU_REG_VP8_CTRL0 0x090
-#define VEPU_REG_RLC_CTRL 0x094
-#define VEPU_REG_RLC_CTRL_STR_OFFS_SHIFT 23
-#define VEPU_REG_RLC_CTRL_STR_OFFS_MASK (0x3f << 23)
-#define VEPU_REG_RLC_CTRL_RLC_SUM(x) ((x))
-#define VEPU_REG_MB_CTRL 0x098
-#define VEPU_REG_MB_CNT_OUT(x) (((x) & 0xffff))
-#define VEPU_REG_MB_CNT_SET(x) (((x) & 0xffff) << 16)
-#define VEPU_REG_ADDR_NEXT_PIC 0x09c
-#define VEPU_REG_JPEG_LUMA_QUAT(i) (0x100 + ((i) * 0x4))
-#define VEPU_REG_JPEG_CHROMA_QUAT(i) (0x140 + ((i) * 0x4))
-#define VEPU_REG_STABILIZATION_OUTPUT 0x0A0
-#define VEPU_REG_ADDR_CABAC_TBL 0x0cc
-#define VEPU_REG_ADDR_MV_OUT 0x0d0
-#define VEPU_REG_RGB_YUV_COEFF(i) (0x0d4 + ((i) * 0x4))
-#define VEPU_REG_RGB_MASK_MSB 0x0dc
-#define VEPU_REG_INTRA_AREA_CTRL 0x0e0
-#define VEPU_REG_CIR_INTRA_CTRL 0x0e4
-#define VEPU_REG_INTRA_SLICE_BITMAP(i) (0x0e8 + ((i) * 0x4))
-#define VEPU_REG_ADDR_VP8_DCT_PART(i) (0x0e8 + ((i) * 0x4))
-#define VEPU_REG_FIRST_ROI_AREA 0x0f0
-#define VEPU_REG_SECOND_ROI_AREA 0x0f4
-#define VEPU_REG_MVC_CTRL 0x0f8
-#define VEPU_REG_MVC_CTRL_MV16X16_FAVOR(x) ((x) << 28)
-#define VEPU_REG_VP8_INTRA_PENALTY(i) (0x100 + ((i) * 0x4))
-#define VEPU_REG_ADDR_VP8_SEG_MAP 0x11c
-#define VEPU_REG_VP8_SEG_QP(i) (0x120 + ((i) * 0x4))
-#define VEPU_REG_DMV_4P_1P_PENALTY(i) (0x180 + ((i) * 0x4))
-#define VEPU_REG_DMV_4P_1P_PENALTY_BIT(x, i) ((x) << (i) * 8)
-#define VEPU_REG_DMV_QPEL_PENALTY(i) (0x200 + ((i) * 0x4))
-#define VEPU_REG_DMV_QPEL_PENALTY_BIT(x, i) ((x) << (i) * 8)
-#define VEPU_REG_VP8_CTRL1 0x280
-#define VEPU_REG_VP8_BIT_COST_GOLDEN 0x284
-#define VEPU_REG_VP8_LOOP_FLT_DELTA(i) (0x288 + ((i) * 0x4))
-
-/* Decoder registers. */
-#define VDPU_REG_INTERRUPT 0x004
-#define VDPU_REG_INTERRUPT_DEC_PIC_INF BIT(24)
-#define VDPU_REG_INTERRUPT_DEC_TIMEOUT BIT(18)
-#define VDPU_REG_INTERRUPT_DEC_SLICE_INT BIT(17)
-#define VDPU_REG_INTERRUPT_DEC_ERROR_INT BIT(16)
-#define VDPU_REG_INTERRUPT_DEC_ASO_INT BIT(15)
-#define VDPU_REG_INTERRUPT_DEC_BUFFER_INT BIT(14)
-#define VDPU_REG_INTERRUPT_DEC_BUS_INT BIT(13)
-#define VDPU_REG_INTERRUPT_DEC_RDY_INT BIT(12)
-#define VDPU_REG_INTERRUPT_DEC_IRQ BIT(8)
-#define VDPU_REG_INTERRUPT_DEC_IRQ_DIS BIT(4)
-#define VDPU_REG_INTERRUPT_DEC_E BIT(0)
-#define VDPU_REG_CONFIG 0x008
-#define VDPU_REG_CONFIG_DEC_AXI_RD_ID(x) (((x) & 0xff) << 24)
-#define VDPU_REG_CONFIG_DEC_TIMEOUT_E BIT(23)
-#define VDPU_REG_CONFIG_DEC_STRSWAP32_E BIT(22)
-#define VDPU_REG_CONFIG_DEC_STRENDIAN_E BIT(21)
-#define VDPU_REG_CONFIG_DEC_INSWAP32_E BIT(20)
-#define VDPU_REG_CONFIG_DEC_OUTSWAP32_E BIT(19)
-#define VDPU_REG_CONFIG_DEC_DATA_DISC_E BIT(18)
-#define VDPU_REG_CONFIG_TILED_MODE_MSB BIT(17)
-#define VDPU_REG_CONFIG_DEC_OUT_TILED_E BIT(17)
-#define VDPU_REG_CONFIG_DEC_LATENCY(x) (((x) & 0x3f) << 11)
-#define VDPU_REG_CONFIG_DEC_CLK_GATE_E BIT(10)
-#define VDPU_REG_CONFIG_DEC_IN_ENDIAN BIT(9)
-#define VDPU_REG_CONFIG_DEC_OUT_ENDIAN BIT(8)
-#define VDPU_REG_CONFIG_PRIORITY_MODE(x) (((x) & 0x7) << 5)
-#define VDPU_REG_CONFIG_TILED_MODE_LSB BIT(7)
-#define VDPU_REG_CONFIG_DEC_ADV_PRE_DIS BIT(6)
-#define VDPU_REG_CONFIG_DEC_SCMD_DIS BIT(5)
-#define VDPU_REG_CONFIG_DEC_MAX_BURST(x) (((x) & 0x1f) << 0)
-#define VDPU_REG_DEC_CTRL0 0x00c
-#define VDPU_REG_DEC_CTRL0_DEC_MODE(x) (((x) & 0xf) << 28)
-#define VDPU_REG_DEC_CTRL0_RLC_MODE_E BIT(27)
-#define VDPU_REG_DEC_CTRL0_SKIP_MODE BIT(26)
-#define VDPU_REG_DEC_CTRL0_DIVX3_E BIT(25)
-#define VDPU_REG_DEC_CTRL0_PJPEG_E BIT(24)
-#define VDPU_REG_DEC_CTRL0_PIC_INTERLACE_E BIT(23)
-#define VDPU_REG_DEC_CTRL0_PIC_FIELDMODE_E BIT(22)
-#define VDPU_REG_DEC_CTRL0_PIC_B_E BIT(21)
-#define VDPU_REG_DEC_CTRL0_PIC_INTER_E BIT(20)
-#define VDPU_REG_DEC_CTRL0_PIC_TOPFIELD_E BIT(19)
-#define VDPU_REG_DEC_CTRL0_FWD_INTERLACE_E BIT(18)
-#define VDPU_REG_DEC_CTRL0_SORENSON_E BIT(17)
-#define VDPU_REG_DEC_CTRL0_REF_TOPFIELD_E BIT(16)
-#define VDPU_REG_DEC_CTRL0_DEC_OUT_DIS BIT(15)
-#define VDPU_REG_DEC_CTRL0_FILTERING_DIS BIT(14)
-#define VDPU_REG_DEC_CTRL0_WEBP_E BIT(13)
-#define VDPU_REG_DEC_CTRL0_MVC_E BIT(13)
-#define VDPU_REG_DEC_CTRL0_PIC_FIXED_QUANT BIT(13)
-#define VDPU_REG_DEC_CTRL0_WRITE_MVS_E BIT(12)
-#define VDPU_REG_DEC_CTRL0_REFTOPFIRST_E BIT(11)
-#define VDPU_REG_DEC_CTRL0_SEQ_MBAFF_E BIT(10)
-#define VDPU_REG_DEC_CTRL0_PICORD_COUNT_E BIT(9)
-#define VDPU_REG_DEC_CTRL0_DEC_AHB_HLOCK_E BIT(8)
-#define VDPU_REG_DEC_CTRL0_DEC_AXI_WR_ID(x) (((x) & 0xff) << 0)
-#define VDPU_REG_DEC_CTRL1 0x010
-#define VDPU_REG_DEC_CTRL1_PIC_MB_WIDTH(x) (((x) & 0x1ff) << 23)
-#define VDPU_REG_DEC_CTRL1_MB_WIDTH_OFF(x) (((x) & 0xf) << 19)
-#define VDPU_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(x) (((x) & 0xff) << 11)
-#define VDPU_REG_DEC_CTRL1_MB_HEIGHT_OFF(x) (((x) & 0xf) << 7)
-#define VDPU_REG_DEC_CTRL1_ALT_SCAN_E BIT(6)
-#define VDPU_REG_DEC_CTRL1_TOPFIELDFIRST_E BIT(5)
-#define VDPU_REG_DEC_CTRL1_REF_FRAMES(x) (((x) & 0x1f) << 0)
-#define VDPU_REG_DEC_CTRL1_PIC_MB_W_EXT(x) (((x) & 0x7) << 3)
-#define VDPU_REG_DEC_CTRL1_PIC_MB_H_EXT(x) (((x) & 0x7) << 0)
-#define VDPU_REG_DEC_CTRL1_PIC_REFER_FLAG BIT(0)
-#define VDPU_REG_DEC_CTRL2 0x014
-#define VDPU_REG_DEC_CTRL2_STRM_START_BIT(x) (((x) & 0x3f) << 26)
-#define VDPU_REG_DEC_CTRL2_SYNC_MARKER_E BIT(25)
-#define VDPU_REG_DEC_CTRL2_TYPE1_QUANT_E BIT(24)
-#define VDPU_REG_DEC_CTRL2_CH_QP_OFFSET(x) (((x) & 0x1f) << 19)
-#define VDPU_REG_DEC_CTRL2_CH_QP_OFFSET2(x) (((x) & 0x1f) << 14)
-#define VDPU_REG_DEC_CTRL2_FIELDPIC_FLAG_E BIT(0)
-#define VDPU_REG_DEC_CTRL2_INTRADC_VLC_THR(x) (((x) & 0x7) << 16)
-#define VDPU_REG_DEC_CTRL2_VOP_TIME_INCR(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_DEC_CTRL2_DQ_PROFILE BIT(24)
-#define VDPU_REG_DEC_CTRL2_DQBI_LEVEL BIT(23)
-#define VDPU_REG_DEC_CTRL2_RANGE_RED_FRM_E BIT(22)
-#define VDPU_REG_DEC_CTRL2_FAST_UVMC_E BIT(20)
-#define VDPU_REG_DEC_CTRL2_TRANSDCTAB BIT(17)
-#define VDPU_REG_DEC_CTRL2_TRANSACFRM(x) (((x) & 0x3) << 15)
-#define VDPU_REG_DEC_CTRL2_TRANSACFRM2(x) (((x) & 0x3) << 13)
-#define VDPU_REG_DEC_CTRL2_MB_MODE_TAB(x) (((x) & 0x7) << 10)
-#define VDPU_REG_DEC_CTRL2_MVTAB(x) (((x) & 0x7) << 7)
-#define VDPU_REG_DEC_CTRL2_CBPTAB(x) (((x) & 0x7) << 4)
-#define VDPU_REG_DEC_CTRL2_2MV_BLK_PAT_TAB(x) (((x) & 0x3) << 2)
-#define VDPU_REG_DEC_CTRL2_4MV_BLK_PAT_TAB(x) (((x) & 0x3) << 0)
-#define VDPU_REG_DEC_CTRL2_QSCALE_TYPE BIT(24)
-#define VDPU_REG_DEC_CTRL2_CON_MV_E BIT(4)
-#define VDPU_REG_DEC_CTRL2_INTRA_DC_PREC(x) (((x) & 0x3) << 2)
-#define VDPU_REG_DEC_CTRL2_INTRA_VLC_TAB BIT(1)
-#define VDPU_REG_DEC_CTRL2_FRAME_PRED_DCT BIT(0)
-#define VDPU_REG_DEC_CTRL2_JPEG_QTABLES(x) (((x) & 0x3) << 11)
-#define VDPU_REG_DEC_CTRL2_JPEG_MODE(x) (((x) & 0x7) << 8)
-#define VDPU_REG_DEC_CTRL2_JPEG_FILRIGHT_E BIT(7)
-#define VDPU_REG_DEC_CTRL2_JPEG_STREAM_ALL BIT(6)
-#define VDPU_REG_DEC_CTRL2_CR_AC_VLCTABLE BIT(5)
-#define VDPU_REG_DEC_CTRL2_CB_AC_VLCTABLE BIT(4)
-#define VDPU_REG_DEC_CTRL2_CR_DC_VLCTABLE BIT(3)
-#define VDPU_REG_DEC_CTRL2_CB_DC_VLCTABLE BIT(2)
-#define VDPU_REG_DEC_CTRL2_CR_DC_VLCTABLE3 BIT(1)
-#define VDPU_REG_DEC_CTRL2_CB_DC_VLCTABLE3 BIT(0)
-#define VDPU_REG_DEC_CTRL2_STRM1_START_BIT(x) (((x) & 0x3f) << 18)
-#define VDPU_REG_DEC_CTRL2_HUFFMAN_E BIT(17)
-#define VDPU_REG_DEC_CTRL2_MULTISTREAM_E BIT(16)
-#define VDPU_REG_DEC_CTRL2_BOOLEAN_VALUE(x) (((x) & 0xff) << 8)
-#define VDPU_REG_DEC_CTRL2_BOOLEAN_RANGE(x) (((x) & 0xff) << 0)
-#define VDPU_REG_DEC_CTRL2_ALPHA_OFFSET(x) (((x) & 0x1f) << 5)
-#define VDPU_REG_DEC_CTRL2_BETA_OFFSET(x) (((x) & 0x1f) << 0)
-#define VDPU_REG_DEC_CTRL3 0x018
-#define VDPU_REG_DEC_CTRL3_START_CODE_E BIT(31)
-#define VDPU_REG_DEC_CTRL3_INIT_QP(x) (((x) & 0x3f) << 25)
-#define VDPU_REG_DEC_CTRL3_CH_8PIX_ILEAV_E BIT(24)
-#define VDPU_REG_DEC_CTRL3_STREAM_LEN_EXT(x) (((x) & 0xff) << 24)
-#define VDPU_REG_DEC_CTRL3_STREAM_LEN(x) (((x) & 0xffffff) << 0)
-#define VDPU_REG_DEC_CTRL4 0x01c
-#define VDPU_REG_DEC_CTRL4_CABAC_E BIT(31)
-#define VDPU_REG_DEC_CTRL4_BLACKWHITE_E BIT(30)
-#define VDPU_REG_DEC_CTRL4_DIR_8X8_INFER_E BIT(29)
-#define VDPU_REG_DEC_CTRL4_WEIGHT_PRED_E BIT(28)
-#define VDPU_REG_DEC_CTRL4_WEIGHT_BIPR_IDC(x) (((x) & 0x3) << 26)
-#define VDPU_REG_DEC_CTRL4_AVS_H264_H_EXT BIT(25)
-#define VDPU_REG_DEC_CTRL4_FRAMENUM_LEN(x) (((x) & 0x1f) << 16)
-#define VDPU_REG_DEC_CTRL4_FRAMENUM(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_DEC_CTRL4_BITPLANE0_E BIT(31)
-#define VDPU_REG_DEC_CTRL4_BITPLANE1_E BIT(30)
-#define VDPU_REG_DEC_CTRL4_BITPLANE2_E BIT(29)
-#define VDPU_REG_DEC_CTRL4_ALT_PQUANT(x) (((x) & 0x1f) << 24)
-#define VDPU_REG_DEC_CTRL4_DQ_EDGES(x) (((x) & 0xf) << 20)
-#define VDPU_REG_DEC_CTRL4_TTMBF BIT(19)
-#define VDPU_REG_DEC_CTRL4_PQINDEX(x) (((x) & 0x1f) << 14)
-#define VDPU_REG_DEC_CTRL4_VC1_HEIGHT_EXT BIT(13)
-#define VDPU_REG_DEC_CTRL4_BILIN_MC_E BIT(12)
-#define VDPU_REG_DEC_CTRL4_UNIQP_E BIT(11)
-#define VDPU_REG_DEC_CTRL4_HALFQP_E BIT(10)
-#define VDPU_REG_DEC_CTRL4_TTFRM(x) (((x) & 0x3) << 8)
-#define VDPU_REG_DEC_CTRL4_2ND_BYTE_EMUL_E BIT(7)
-#define VDPU_REG_DEC_CTRL4_DQUANT_E BIT(6)
-#define VDPU_REG_DEC_CTRL4_VC1_ADV_E BIT(5)
-#define VDPU_REG_DEC_CTRL4_PJPEG_FILDOWN_E BIT(26)
-#define VDPU_REG_DEC_CTRL4_PJPEG_WDIV8 BIT(25)
-#define VDPU_REG_DEC_CTRL4_PJPEG_HDIV8 BIT(24)
-#define VDPU_REG_DEC_CTRL4_PJPEG_AH(x) (((x) & 0xf) << 20)
-#define VDPU_REG_DEC_CTRL4_PJPEG_AL(x) (((x) & 0xf) << 16)
-#define VDPU_REG_DEC_CTRL4_PJPEG_SS(x) (((x) & 0xff) << 8)
-#define VDPU_REG_DEC_CTRL4_PJPEG_SE(x) (((x) & 0xff) << 0)
-#define VDPU_REG_DEC_CTRL4_DCT1_START_BIT(x) (((x) & 0x3f) << 26)
-#define VDPU_REG_DEC_CTRL4_DCT2_START_BIT(x) (((x) & 0x3f) << 20)
-#define VDPU_REG_DEC_CTRL4_CH_MV_RES BIT(13)
-#define VDPU_REG_DEC_CTRL4_INIT_DC_MATCH0(x) (((x) & 0x7) << 9)
-#define VDPU_REG_DEC_CTRL4_INIT_DC_MATCH1(x) (((x) & 0x7) << 6)
-#define VDPU_REG_DEC_CTRL4_VP7_VERSION BIT(5)
-#define VDPU_REG_DEC_CTRL5 0x020
-#define VDPU_REG_DEC_CTRL5_CONST_INTRA_E BIT(31)
-#define VDPU_REG_DEC_CTRL5_FILT_CTRL_PRES BIT(30)
-#define VDPU_REG_DEC_CTRL5_RDPIC_CNT_PRES BIT(29)
-#define VDPU_REG_DEC_CTRL5_8X8TRANS_FLAG_E BIT(28)
-#define VDPU_REG_DEC_CTRL5_REFPIC_MK_LEN(x) (((x) & 0x7ff) << 17)
-#define VDPU_REG_DEC_CTRL5_IDR_PIC_E BIT(16)
-#define VDPU_REG_DEC_CTRL5_IDR_PIC_ID(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_DEC_CTRL5_MV_SCALEFACTOR(x) (((x) & 0xff) << 24)
-#define VDPU_REG_DEC_CTRL5_REF_DIST_FWD(x) (((x) & 0x1f) << 19)
-#define VDPU_REG_DEC_CTRL5_REF_DIST_BWD(x) (((x) & 0x1f) << 14)
-#define VDPU_REG_DEC_CTRL5_LOOP_FILT_LIMIT(x) (((x) & 0xf) << 14)
-#define VDPU_REG_DEC_CTRL5_VARIANCE_TEST_E BIT(13)
-#define VDPU_REG_DEC_CTRL5_MV_THRESHOLD(x) (((x) & 0x7) << 10)
-#define VDPU_REG_DEC_CTRL5_VAR_THRESHOLD(x) (((x) & 0x3ff) << 0)
-#define VDPU_REG_DEC_CTRL5_DIVX_IDCT_E BIT(8)
-#define VDPU_REG_DEC_CTRL5_DIVX3_SLICE_SIZE(x) (((x) & 0xff) << 0)
-#define VDPU_REG_DEC_CTRL5_PJPEG_REST_FREQ(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_DEC_CTRL5_RV_PROFILE(x) (((x) & 0x3) << 30)
-#define VDPU_REG_DEC_CTRL5_RV_OSV_QUANT(x) (((x) & 0x3) << 28)
-#define VDPU_REG_DEC_CTRL5_RV_FWD_SCALE(x) (((x) & 0x3fff) << 14)
-#define VDPU_REG_DEC_CTRL5_RV_BWD_SCALE(x) (((x) & 0x3fff) << 0)
-#define VDPU_REG_DEC_CTRL5_INIT_DC_COMP0(x) (((x) & 0xffff) << 16)
-#define VDPU_REG_DEC_CTRL5_INIT_DC_COMP1(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_DEC_CTRL6 0x024
-#define VDPU_REG_DEC_CTRL6_PPS_ID(x) (((x) & 0xff) << 24)
-#define VDPU_REG_DEC_CTRL6_REFIDX1_ACTIVE(x) (((x) & 0x1f) << 19)
-#define VDPU_REG_DEC_CTRL6_REFIDX0_ACTIVE(x) (((x) & 0x1f) << 14)
-#define VDPU_REG_DEC_CTRL6_POC_LENGTH(x) (((x) & 0xff) << 0)
-#define VDPU_REG_DEC_CTRL6_ICOMP0_E BIT(24)
-#define VDPU_REG_DEC_CTRL6_ISCALE0(x) (((x) & 0xff) << 16)
-#define VDPU_REG_DEC_CTRL6_ISHIFT0(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_DEC_CTRL6_STREAM1_LEN(x) (((x) & 0xffffff) << 0)
-#define VDPU_REG_DEC_CTRL6_PIC_SLICE_AM(x) (((x) & 0x1fff) << 0)
-#define VDPU_REG_DEC_CTRL6_COEFFS_PART_AM(x) (((x) & 0xf) << 24)
-#define VDPU_REG_FWD_PIC(i) (0x028 + ((i) * 0x4))
-#define VDPU_REG_FWD_PIC_PINIT_RLIST_F5(x) (((x) & 0x1f) << 25)
-#define VDPU_REG_FWD_PIC_PINIT_RLIST_F4(x) (((x) & 0x1f) << 20)
-#define VDPU_REG_FWD_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
-#define VDPU_REG_FWD_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
-#define VDPU_REG_FWD_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
-#define VDPU_REG_FWD_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
-#define VDPU_REG_FWD_PIC1_ICOMP1_E BIT(24)
-#define VDPU_REG_FWD_PIC1_ISCALE1(x) (((x) & 0xff) << 16)
-#define VDPU_REG_FWD_PIC1_ISHIFT1(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_FWD_PIC1_SEGMENT_BASE(x) ((x) << 0)
-#define VDPU_REG_FWD_PIC1_SEGMENT_UPD_E BIT(1)
-#define VDPU_REG_FWD_PIC1_SEGMENT_E BIT(0)
-#define VDPU_REG_DEC_CTRL7 0x02c
-#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F15(x) (((x) & 0x1f) << 25)
-#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F14(x) (((x) & 0x1f) << 20)
-#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F13(x) (((x) & 0x1f) << 15)
-#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F12(x) (((x) & 0x1f) << 10)
-#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F11(x) (((x) & 0x1f) << 5)
-#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F10(x) (((x) & 0x1f) << 0)
-#define VDPU_REG_DEC_CTRL7_ICOMP2_E BIT(24)
-#define VDPU_REG_DEC_CTRL7_ISCALE2(x) (((x) & 0xff) << 16)
-#define VDPU_REG_DEC_CTRL7_ISHIFT2(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_DEC_CTRL7_DCT3_START_BIT(x) (((x) & 0x3f) << 24)
-#define VDPU_REG_DEC_CTRL7_DCT4_START_BIT(x) (((x) & 0x3f) << 18)
-#define VDPU_REG_DEC_CTRL7_DCT5_START_BIT(x) (((x) & 0x3f) << 12)
-#define VDPU_REG_DEC_CTRL7_DCT6_START_BIT(x) (((x) & 0x3f) << 6)
-#define VDPU_REG_DEC_CTRL7_DCT7_START_BIT(x) (((x) & 0x3f) << 0)
-#define VDPU_REG_ADDR_STR 0x030
-#define VDPU_REG_ADDR_DST 0x034
-#define VDPU_REG_ADDR_REF(i) (0x038 + ((i) * 0x4))
-#define VDPU_REG_ADDR_REF_FIELD_E BIT(1)
-#define VDPU_REG_ADDR_REF_TOPC_E BIT(0)
-#define VDPU_REG_REF_PIC(i) (0x078 + ((i) * 0x4))
-#define VDPU_REG_REF_PIC_FILT_TYPE_E BIT(31)
-#define VDPU_REG_REF_PIC_FILT_SHARPNESS(x) (((x) & 0x7) << 28)
-#define VDPU_REG_REF_PIC_MB_ADJ_0(x) (((x) & 0x7f) << 21)
-#define VDPU_REG_REF_PIC_MB_ADJ_1(x) (((x) & 0x7f) << 14)
-#define VDPU_REG_REF_PIC_MB_ADJ_2(x) (((x) & 0x7f) << 7)
-#define VDPU_REG_REF_PIC_MB_ADJ_3(x) (((x) & 0x7f) << 0)
-#define VDPU_REG_REF_PIC_REFER1_NBR(x) (((x) & 0xffff) << 16)
-#define VDPU_REG_REF_PIC_REFER0_NBR(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_REF_PIC_LF_LEVEL_0(x) (((x) & 0x3f) << 18)
-#define VDPU_REG_REF_PIC_LF_LEVEL_1(x) (((x) & 0x3f) << 12)
-#define VDPU_REG_REF_PIC_LF_LEVEL_2(x) (((x) & 0x3f) << 6)
-#define VDPU_REG_REF_PIC_LF_LEVEL_3(x) (((x) & 0x3f) << 0)
-#define VDPU_REG_REF_PIC_QUANT_DELTA_0(x) (((x) & 0x1f) << 27)
-#define VDPU_REG_REF_PIC_QUANT_DELTA_1(x) (((x) & 0x1f) << 22)
-#define VDPU_REG_REF_PIC_QUANT_0(x) (((x) & 0x7ff) << 11)
-#define VDPU_REG_REF_PIC_QUANT_1(x) (((x) & 0x7ff) << 0)
-#define VDPU_REG_LT_REF 0x098
-#define VDPU_REG_VALID_REF 0x09c
-#define VDPU_REG_ADDR_QTABLE 0x0a0
-#define VDPU_REG_ADDR_DIR_MV 0x0a4
-#define VDPU_REG_BD_REF_PIC(i) (0x0a8 + ((i) * 0x4))
-#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B2(x) (((x) & 0x1f) << 25)
-#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F2(x) (((x) & 0x1f) << 20)
-#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B1(x) (((x) & 0x1f) << 15)
-#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F1(x) (((x) & 0x1f) << 10)
-#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B0(x) (((x) & 0x1f) << 5)
-#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
-#define VDPU_REG_BD_REF_PIC_PRED_TAP_2_M1(x) (((x) & 0x3) << 10)
-#define VDPU_REG_BD_REF_PIC_PRED_TAP_2_4(x) (((x) & 0x3) << 8)
-#define VDPU_REG_BD_REF_PIC_PRED_TAP_4_M1(x) (((x) & 0x3) << 6)
-#define VDPU_REG_BD_REF_PIC_PRED_TAP_4_4(x) (((x) & 0x3) << 4)
-#define VDPU_REG_BD_REF_PIC_PRED_TAP_6_M1(x) (((x) & 0x3) << 2)
-#define VDPU_REG_BD_REF_PIC_PRED_TAP_6_4(x) (((x) & 0x3) << 0)
-#define VDPU_REG_BD_REF_PIC_QUANT_DELTA_2(x) (((x) & 0x1f) << 27)
-#define VDPU_REG_BD_REF_PIC_QUANT_DELTA_3(x) (((x) & 0x1f) << 22)
-#define VDPU_REG_BD_REF_PIC_QUANT_2(x) (((x) & 0x7ff) << 11)
-#define VDPU_REG_BD_REF_PIC_QUANT_3(x) (((x) & 0x7ff) << 0)
-#define VDPU_REG_BD_P_REF_PIC 0x0bc
-#define VDPU_REG_BD_P_REF_PIC_QUANT_DELTA_4(x) (((x) & 0x1f) << 27)
-#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 25)
-#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 20)
-#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 15)
-#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 10)
-#define VDPU_REG_BD_P_REF_PIC_BINIT_RLIST_B15(x) (((x) & 0x1f) << 5)
-#define VDPU_REG_BD_P_REF_PIC_BINIT_RLIST_F15(x) (((x) & 0x1f) << 0)
-#define VDPU_REG_ERR_CONC 0x0c0
-#define VDPU_REG_ERR_CONC_STARTMB_X(x) (((x) & 0x1ff) << 23)
-#define VDPU_REG_ERR_CONC_STARTMB_Y(x) (((x) & 0xff) << 15)
-#define VDPU_REG_PRED_FLT 0x0c4
-#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_0(x) (((x) & 0x3ff) << 22)
-#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_1(x) (((x) & 0x3ff) << 12)
-#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_2(x) (((x) & 0x3ff) << 2)
-#define VDPU_REG_REF_BUF_CTRL 0x0cc
-#define VDPU_REG_REF_BUF_CTRL_REFBU_E BIT(31)
-#define VDPU_REG_REF_BUF_CTRL_REFBU_THR(x) (((x) & 0xfff) << 19)
-#define VDPU_REG_REF_BUF_CTRL_REFBU_PICID(x) (((x) & 0x1f) << 14)
-#define VDPU_REG_REF_BUF_CTRL_REFBU_EVAL_E BIT(13)
-#define VDPU_REG_REF_BUF_CTRL_REFBU_FPARMOD_E BIT(12)
-#define VDPU_REG_REF_BUF_CTRL_REFBU_Y_OFFSET(x) (((x) & 0x1ff) << 0)
-#define VDPU_REG_REF_BUF_CTRL2 0x0dc
-#define VDPU_REG_REF_BUF_CTRL2_REFBU2_BUF_E BIT(31)
-#define VDPU_REG_REF_BUF_CTRL2_REFBU2_THR(x) (((x) & 0xfff) << 19)
-#define VDPU_REG_REF_BUF_CTRL2_REFBU2_PICID(x) (((x) & 0x1f) << 14)
-#define VDPU_REG_REF_BUF_CTRL2_APF_THRESHOLD(x) (((x) & 0x3fff) << 0)
-
-#endif /* RK3288_VPU_REGS_H_ */
diff --git a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw.c b/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw.c
deleted file mode 100644
index 6fdef61e2127..000000000000
--- a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Rockchip VPU codec driver
- *
- * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
- * Jeffy Chen <jeffy.chen@rock-chips.com>
- */
-
-#include <linux/clk.h>
-
-#include "rockchip_vpu.h"
-#include "rockchip_vpu_jpeg.h"
-#include "rk3399_vpu_regs.h"
-
-#define RK3399_ACLK_MAX_FREQ (400 * 1000 * 1000)
-
-/*
- * Supported formats.
- */
-
-static const struct rockchip_vpu_fmt rk3399_vpu_enc_fmts[] = {
- {
- .fourcc = V4L2_PIX_FMT_YUV420M,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUV420P,
- },
- {
- .fourcc = V4L2_PIX_FMT_NV12M,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUV420SP,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUYV,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUYV422,
- },
- {
- .fourcc = V4L2_PIX_FMT_UYVY,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_UYVY422,
- },
- {
- .fourcc = V4L2_PIX_FMT_JPEG,
- .codec_mode = RK_VPU_MODE_JPEG_ENC,
- .max_depth = 2,
- .header_size = JPEG_HEADER_SIZE,
- .frmsize = {
- .min_width = 96,
- .max_width = 8192,
- .step_width = JPEG_MB_DIM,
- .min_height = 32,
- .max_height = 8192,
- .step_height = JPEG_MB_DIM,
- },
- },
-};
-
-static irqreturn_t rk3399_vepu_irq(int irq, void *dev_id)
-{
- struct rockchip_vpu_dev *vpu = dev_id;
- enum vb2_buffer_state state;
- u32 status, bytesused;
-
- status = vepu_read(vpu, VEPU_REG_INTERRUPT);
- bytesused = vepu_read(vpu, VEPU_REG_STR_BUF_LIMIT) / 8;
- state = (status & VEPU_REG_INTERRUPT_FRAME_READY) ?
- VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
-
- vepu_write(vpu, 0, VEPU_REG_INTERRUPT);
- vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
-
- rockchip_vpu_irq_done(vpu, bytesused, state);
-
- return IRQ_HANDLED;
-}
-
-static int rk3399_vpu_hw_init(struct rockchip_vpu_dev *vpu)
-{
- /* Bump ACLK to max. possible freq. to improve performance. */
- clk_set_rate(vpu->clocks[0].clk, RK3399_ACLK_MAX_FREQ);
- return 0;
-}
-
-static void rk3399_vpu_enc_reset(struct rockchip_vpu_ctx *ctx)
-{
- struct rockchip_vpu_dev *vpu = ctx->dev;
-
- vepu_write(vpu, VEPU_REG_INTERRUPT_DIS_BIT, VEPU_REG_INTERRUPT);
- vepu_write(vpu, 0, VEPU_REG_ENCODE_START);
- vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
-}
-
-/*
- * Supported codec ops.
- */
-
-static const struct rockchip_vpu_codec_ops rk3399_vpu_codec_ops[] = {
- [RK_VPU_MODE_JPEG_ENC] = {
- .run = rk3399_vpu_jpeg_enc_run,
- .reset = rk3399_vpu_enc_reset,
- },
-};
-
-/*
- * VPU variant.
- */
-
-const struct rockchip_vpu_variant rk3399_vpu_variant = {
- .enc_offset = 0x0,
- .enc_fmts = rk3399_vpu_enc_fmts,
- .num_enc_fmts = ARRAY_SIZE(rk3399_vpu_enc_fmts),
- .codec = RK_VPU_CODEC_JPEG,
- .codec_ops = rk3399_vpu_codec_ops,
- .vepu_irq = rk3399_vepu_irq,
- .init = rk3399_vpu_hw_init,
- .clk_names = {"aclk", "hclk"},
- .num_clocks = 2
-};
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu.h b/drivers/staging/media/rockchip/vpu/rockchip_vpu.h
deleted file mode 100644
index 1ec2be483e27..000000000000
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Rockchip VPU codec driver
- *
- * Copyright 2018 Google LLC.
- * Tomasz Figa <tfiga@chromium.org>
- *
- * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
- * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- */
-
-#ifndef ROCKCHIP_VPU_H_
-#define ROCKCHIP_VPU_H_
-
-#include <linux/platform_device.h>
-#include <linux/videodev2.h>
-#include <linux/wait.h>
-#include <linux/clk.h>
-
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-#include <media/videobuf2-core.h>
-#include <media/videobuf2-dma-contig.h>
-
-#include "rockchip_vpu_hw.h"
-
-#define ROCKCHIP_VPU_MAX_CLOCKS 4
-
-#define JPEG_MB_DIM 16
-#define JPEG_MB_WIDTH(w) DIV_ROUND_UP(w, JPEG_MB_DIM)
-#define JPEG_MB_HEIGHT(h) DIV_ROUND_UP(h, JPEG_MB_DIM)
-
-struct rockchip_vpu_ctx;
-struct rockchip_vpu_codec_ops;
-
-#define RK_VPU_CODEC_JPEG BIT(0)
-
-/**
- * struct rockchip_vpu_variant - information about VPU hardware variant
- *
- * @enc_offset: Offset from VPU base to encoder registers.
- * @enc_fmts: Encoder formats.
- * @num_enc_fmts: Number of encoder formats.
- * @codec: Supported codecs
- * @codec_ops: Codec ops.
- * @init: Initialize hardware.
- * @vepu_irq: encoder interrupt handler
- * @clk_names: array of clock names
- * @num_clocks: number of clocks in the array
- */
-struct rockchip_vpu_variant {
- unsigned int enc_offset;
- const struct rockchip_vpu_fmt *enc_fmts;
- unsigned int num_enc_fmts;
- unsigned int codec;
- const struct rockchip_vpu_codec_ops *codec_ops;
- int (*init)(struct rockchip_vpu_dev *vpu);
- irqreturn_t (*vepu_irq)(int irq, void *priv);
- const char *clk_names[ROCKCHIP_VPU_MAX_CLOCKS];
- int num_clocks;
-};
-
-/**
- * enum rockchip_vpu_codec_mode - codec operating mode.
- * @RK_VPU_MODE_NONE: No operating mode. Used for RAW video formats.
- * @RK_VPU_MODE_JPEG_ENC: JPEG encoder.
- */
-enum rockchip_vpu_codec_mode {
- RK_VPU_MODE_NONE = -1,
- RK_VPU_MODE_JPEG_ENC,
-};
-
-/**
- * struct rockchip_vpu_dev - driver data
- * @v4l2_dev: V4L2 device to register video devices for.
- * @m2m_dev: mem2mem device associated to this device.
- * @mdev: media device associated to this device.
- * @vfd_enc: Video device for encoder.
- * @pdev: Pointer to VPU platform device.
- * @dev: Pointer to device for convenient logging using
- * dev_ macros.
- * @clocks: Array of clock handles.
- * @base: Mapped address of VPU registers.
- * @enc_base: Mapped address of VPU encoder register for convenience.
- * @vpu_mutex: Mutex to synchronize V4L2 calls.
- * @irqlock: Spinlock to synchronize access to data structures
- * shared with interrupt handlers.
- * @variant: Hardware variant-specific parameters.
- * @watchdog_work: Delayed work for hardware timeout handling.
- */
-struct rockchip_vpu_dev {
- struct v4l2_device v4l2_dev;
- struct v4l2_m2m_dev *m2m_dev;
- struct media_device mdev;
- struct video_device *vfd_enc;
- struct platform_device *pdev;
- struct device *dev;
- struct clk_bulk_data clocks[ROCKCHIP_VPU_MAX_CLOCKS];
- void __iomem *base;
- void __iomem *enc_base;
-
- struct mutex vpu_mutex; /* video_device lock */
- spinlock_t irqlock;
- const struct rockchip_vpu_variant *variant;
- struct delayed_work watchdog_work;
-};
-
-/**
- * struct rockchip_vpu_ctx - Context (instance) private data.
- *
- * @dev: VPU driver data to which the context belongs.
- * @fh: V4L2 file handler.
- *
- * @sequence_cap: Sequence counter for capture queue
- * @sequence_out: Sequence counter for output queue
- *
- * @vpu_src_fmt: Descriptor of active source format.
- * @src_fmt: V4L2 pixel format of active source format.
- * @vpu_dst_fmt: Descriptor of active destination format.
- * @dst_fmt: V4L2 pixel format of active destination format.
- *
- * @ctrl_handler: Control handler used to register controls.
- * @jpeg_quality: User-specified JPEG compression quality.
- *
- * @codec_ops: Set of operations related to codec mode.
- *
- * @bounce_dma_addr: Bounce buffer bus address.
- * @bounce_buf: Bounce buffer pointer.
- * @bounce_size: Bounce buffer size.
- */
-struct rockchip_vpu_ctx {
- struct rockchip_vpu_dev *dev;
- struct v4l2_fh fh;
-
- u32 sequence_cap;
- u32 sequence_out;
-
- const struct rockchip_vpu_fmt *vpu_src_fmt;
- struct v4l2_pix_format_mplane src_fmt;
- const struct rockchip_vpu_fmt *vpu_dst_fmt;
- struct v4l2_pix_format_mplane dst_fmt;
-
- struct v4l2_ctrl_handler ctrl_handler;
- int jpeg_quality;
-
- const struct rockchip_vpu_codec_ops *codec_ops;
-
- dma_addr_t bounce_dma_addr;
- void *bounce_buf;
- size_t bounce_size;
-};
-
-/**
- * struct rockchip_vpu_fmt - information about supported video formats.
- * @name: Human readable name of the format.
- * @fourcc: FourCC code of the format. See V4L2_PIX_FMT_*.
- * @codec_mode: Codec mode related to this format. See
- * enum rockchip_vpu_codec_mode.
- * @header_size: Optional header size. Currently used by JPEG encoder.
- * @max_depth: Maximum depth, for bitstream formats
- * @enc_fmt: Format identifier for encoder registers.
- * @frmsize: Supported range of frame sizes (only for bitstream formats).
- */
-struct rockchip_vpu_fmt {
- char *name;
- u32 fourcc;
- enum rockchip_vpu_codec_mode codec_mode;
- int header_size;
- int max_depth;
- enum rockchip_vpu_enc_fmt enc_fmt;
- struct v4l2_frmsize_stepwise frmsize;
-};
-
-/* Logging helpers */
-
-/**
- * debug - Module parameter to control level of debugging messages.
- *
- * Level of debugging messages can be controlled by bits of
- * module parameter called "debug". Meaning of particular
- * bits is as follows:
- *
- * bit 0 - global information: mode, size, init, release
- * bit 1 - each run start/result information
- * bit 2 - contents of small controls from userspace
- * bit 3 - contents of big controls from userspace
- * bit 4 - detail fmt, ctrl, buffer q/dq information
- * bit 5 - detail function enter/leave trace information
- * bit 6 - register write/read information
- */
-extern int rockchip_vpu_debug;
-
-#define vpu_debug(level, fmt, args...) \
- do { \
- if (rockchip_vpu_debug & BIT(level)) \
- pr_info("%s:%d: " fmt, \
- __func__, __LINE__, ##args); \
- } while (0)
-
-#define vpu_err(fmt, args...) \
- pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
-
-/* Structure access helpers. */
-static inline struct rockchip_vpu_ctx *fh_to_ctx(struct v4l2_fh *fh)
-{
- return container_of(fh, struct rockchip_vpu_ctx, fh);
-}
-
-/* Register accessors. */
-static inline void vepu_write_relaxed(struct rockchip_vpu_dev *vpu,
- u32 val, u32 reg)
-{
- vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
- writel_relaxed(val, vpu->enc_base + reg);
-}
-
-static inline void vepu_write(struct rockchip_vpu_dev *vpu, u32 val, u32 reg)
-{
- vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
- writel(val, vpu->enc_base + reg);
-}
-
-static inline u32 vepu_read(struct rockchip_vpu_dev *vpu, u32 reg)
-{
- u32 val = readl(vpu->enc_base + reg);
-
- vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
- return val;
-}
-
-#endif /* ROCKCHIP_VPU_H_ */
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_common.h b/drivers/staging/media/rockchip/vpu/rockchip_vpu_common.h
deleted file mode 100644
index ca77668d9579..000000000000
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_common.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Rockchip VPU codec driver
- *
- * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
- * Alpha Lin <Alpha.Lin@rock-chips.com>
- * Jeffy Chen <jeffy.chen@rock-chips.com>
- *
- * Copyright 2018 Google LLC.
- * Tomasz Figa <tfiga@chromium.org>
- *
- * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
- * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- */
-
-#ifndef ROCKCHIP_VPU_COMMON_H_
-#define ROCKCHIP_VPU_COMMON_H_
-
-#include "rockchip_vpu.h"
-
-extern const struct v4l2_ioctl_ops rockchip_vpu_enc_ioctl_ops;
-extern const struct vb2_ops rockchip_vpu_enc_queue_ops;
-
-void rockchip_vpu_enc_reset_src_fmt(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx);
-void rockchip_vpu_enc_reset_dst_fmt(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx);
-
-#endif /* ROCKCHIP_VPU_COMMON_H_ */
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c b/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
deleted file mode 100644
index 8bbc905b26c8..000000000000
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
+++ /dev/null
@@ -1,542 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Rockchip VPU codec driver
- *
- * Copyright (C) 2018 Collabora, Ltd.
- * Copyright 2018 Google LLC.
- * Tomasz Figa <tfiga@chromium.org>
- *
- * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
- * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- */
-
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/videodev2.h>
-#include <linux/workqueue.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-mem2mem.h>
-#include <media/videobuf2-core.h>
-#include <media/videobuf2-vmalloc.h>
-
-#include "rockchip_vpu_common.h"
-#include "rockchip_vpu.h"
-#include "rockchip_vpu_hw.h"
-
-#define DRIVER_NAME "rockchip-vpu"
-
-int rockchip_vpu_debug;
-module_param_named(debug, rockchip_vpu_debug, int, 0644);
-MODULE_PARM_DESC(debug,
- "Debug level - higher value produces more verbose messages");
-
-static void rockchip_vpu_job_finish(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx,
- unsigned int bytesused,
- enum vb2_buffer_state result)
-{
- struct vb2_v4l2_buffer *src, *dst;
- size_t avail_size;
-
- pm_runtime_mark_last_busy(vpu->dev);
- pm_runtime_put_autosuspend(vpu->dev);
- clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
-
- src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
-
- if (WARN_ON(!src))
- return;
- if (WARN_ON(!dst))
- return;
-
- src->sequence = ctx->sequence_out++;
- dst->sequence = ctx->sequence_cap++;
-
- dst->field = src->field;
- if (src->flags & V4L2_BUF_FLAG_TIMECODE)
- dst->timecode = src->timecode;
- dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
- dst->flags &= ~(V4L2_BUF_FLAG_TSTAMP_SRC_MASK |
- V4L2_BUF_FLAG_TIMECODE);
- dst->flags |= src->flags & (V4L2_BUF_FLAG_TSTAMP_SRC_MASK |
- V4L2_BUF_FLAG_TIMECODE);
-
- avail_size = vb2_plane_size(&dst->vb2_buf, 0) -
- ctx->vpu_dst_fmt->header_size;
- if (bytesused <= avail_size) {
- if (ctx->bounce_buf) {
- memcpy(vb2_plane_vaddr(&dst->vb2_buf, 0) +
- ctx->vpu_dst_fmt->header_size,
- ctx->bounce_buf, bytesused);
- }
- dst->vb2_buf.planes[0].bytesused =
- ctx->vpu_dst_fmt->header_size + bytesused;
- } else {
- result = VB2_BUF_STATE_ERROR;
- }
-
- v4l2_m2m_buf_done(src, result);
- v4l2_m2m_buf_done(dst, result);
-
- v4l2_m2m_job_finish(vpu->m2m_dev, ctx->fh.m2m_ctx);
-}
-
-void rockchip_vpu_irq_done(struct rockchip_vpu_dev *vpu,
- unsigned int bytesused,
- enum vb2_buffer_state result)
-{
- struct rockchip_vpu_ctx *ctx =
- v4l2_m2m_get_curr_priv(vpu->m2m_dev);
-
- /*
- * If cancel_delayed_work returns false
- * the timeout expired. The watchdog is running,
- * and will take care of finishing the job.
- */
- if (cancel_delayed_work(&vpu->watchdog_work))
- rockchip_vpu_job_finish(vpu, ctx, bytesused, result);
-}
-
-void rockchip_vpu_watchdog(struct work_struct *work)
-{
- struct rockchip_vpu_dev *vpu;
- struct rockchip_vpu_ctx *ctx;
-
- vpu = container_of(to_delayed_work(work),
- struct rockchip_vpu_dev, watchdog_work);
- ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
- if (ctx) {
- vpu_err("frame processing timed out!\n");
- ctx->codec_ops->reset(ctx);
- rockchip_vpu_job_finish(vpu, ctx, 0, VB2_BUF_STATE_ERROR);
- }
-}
-
-static void device_run(void *priv)
-{
- struct rockchip_vpu_ctx *ctx = priv;
- int ret;
-
- ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
- if (ret)
- goto err_cancel_job;
- ret = pm_runtime_get_sync(ctx->dev->dev);
- if (ret < 0)
- goto err_cancel_job;
-
- ctx->codec_ops->run(ctx);
- return;
-
-err_cancel_job:
- rockchip_vpu_job_finish(ctx->dev, ctx, 0, VB2_BUF_STATE_ERROR);
-}
-
-static struct v4l2_m2m_ops vpu_m2m_ops = {
- .device_run = device_run,
-};
-
-static int
-enc_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
-{
- struct rockchip_vpu_ctx *ctx = priv;
- int ret;
-
- src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
- src_vq->drv_priv = ctx;
- src_vq->ops = &rockchip_vpu_enc_queue_ops;
- src_vq->mem_ops = &vb2_dma_contig_memops;
-
- /*
- * Driver does mostly sequential access, so sacrifice TLB efficiency
- * for faster allocation. Also, no CPU access on the source queue,
- * so no kernel mapping needed.
- */
- src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
- DMA_ATTR_NO_KERNEL_MAPPING;
- src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- src_vq->lock = &ctx->dev->vpu_mutex;
- src_vq->dev = ctx->dev->v4l2_dev.dev;
-
- ret = vb2_queue_init(src_vq);
- if (ret)
- return ret;
-
- /*
- * The CAPTURE queue doesn't need dma memory,
- * as the CPU needs to create the JPEG frames,
- * from the hardware-produced JPEG payload.
- *
- * For the DMA destination buffer, we use
- * a bounce buffer.
- */
- dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
- dst_vq->drv_priv = ctx;
- dst_vq->ops = &rockchip_vpu_enc_queue_ops;
- dst_vq->mem_ops = &vb2_vmalloc_memops;
- dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- dst_vq->lock = &ctx->dev->vpu_mutex;
- dst_vq->dev = ctx->dev->v4l2_dev.dev;
-
- return vb2_queue_init(dst_vq);
-}
-
-static int rockchip_vpu_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct rockchip_vpu_ctx *ctx;
-
- ctx = container_of(ctrl->handler,
- struct rockchip_vpu_ctx, ctrl_handler);
-
- vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
-
- switch (ctrl->id) {
- case V4L2_CID_JPEG_COMPRESSION_QUALITY:
- ctx->jpeg_quality = ctrl->val;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const struct v4l2_ctrl_ops rockchip_vpu_ctrl_ops = {
- .s_ctrl = rockchip_vpu_s_ctrl,
-};
-
-static int rockchip_vpu_ctrls_setup(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx)
-{
- v4l2_ctrl_handler_init(&ctx->ctrl_handler, 1);
- if (vpu->variant->codec & RK_VPU_CODEC_JPEG) {
- v4l2_ctrl_new_std(&ctx->ctrl_handler, &rockchip_vpu_ctrl_ops,
- V4L2_CID_JPEG_COMPRESSION_QUALITY,
- 5, 100, 1, 50);
- if (ctx->ctrl_handler.error) {
- vpu_err("Adding JPEG control failed %d\n",
- ctx->ctrl_handler.error);
- v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- return ctx->ctrl_handler.error;
- }
- }
-
- return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
-}
-
-/*
- * V4L2 file operations.
- */
-
-static int rockchip_vpu_open(struct file *filp)
-{
- struct rockchip_vpu_dev *vpu = video_drvdata(filp);
- struct video_device *vdev = video_devdata(filp);
- struct rockchip_vpu_ctx *ctx;
- int ret;
-
- /*
- * We do not need any extra locking here, because we operate only
- * on local data here, except reading few fields from dev, which
- * do not change through device's lifetime (which is guaranteed by
- * reference on module from open()) and V4L2 internal objects (such
- * as vdev and ctx->fh), which have proper locking done in respective
- * helper functions used here.
- */
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->dev = vpu;
- if (vdev == vpu->vfd_enc)
- ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx,
- &enc_queue_init);
- else
- ctx->fh.m2m_ctx = ERR_PTR(-ENODEV);
- if (IS_ERR(ctx->fh.m2m_ctx)) {
- ret = PTR_ERR(ctx->fh.m2m_ctx);
- kfree(ctx);
- return ret;
- }
-
- v4l2_fh_init(&ctx->fh, vdev);
- filp->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
-
- if (vdev == vpu->vfd_enc) {
- rockchip_vpu_enc_reset_dst_fmt(vpu, ctx);
- rockchip_vpu_enc_reset_src_fmt(vpu, ctx);
- }
-
- ret = rockchip_vpu_ctrls_setup(vpu, ctx);
- if (ret) {
- vpu_err("Failed to set up controls\n");
- goto err_fh_free;
- }
- ctx->fh.ctrl_handler = &ctx->ctrl_handler;
-
- return 0;
-
-err_fh_free:
- v4l2_fh_del(&ctx->fh);
- v4l2_fh_exit(&ctx->fh);
- kfree(ctx);
- return ret;
-}
-
-static int rockchip_vpu_release(struct file *filp)
-{
- struct rockchip_vpu_ctx *ctx =
- container_of(filp->private_data, struct rockchip_vpu_ctx, fh);
-
- /*
- * No need for extra locking because this was the last reference
- * to this file.
- */
- v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
- v4l2_fh_del(&ctx->fh);
- v4l2_fh_exit(&ctx->fh);
- v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- kfree(ctx);
-
- return 0;
-}
-
-static const struct v4l2_file_operations rockchip_vpu_fops = {
- .owner = THIS_MODULE,
- .open = rockchip_vpu_open,
- .release = rockchip_vpu_release,
- .poll = v4l2_m2m_fop_poll,
- .unlocked_ioctl = video_ioctl2,
- .mmap = v4l2_m2m_fop_mmap,
-};
-
-static const struct of_device_id of_rockchip_vpu_match[] = {
- { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
- { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, of_rockchip_vpu_match);
-
-static int rockchip_vpu_video_device_register(struct rockchip_vpu_dev *vpu)
-{
- const struct of_device_id *match;
- struct video_device *vfd;
- int function, ret;
-
- match = of_match_node(of_rockchip_vpu_match, vpu->dev->of_node);
- vfd = video_device_alloc();
- if (!vfd) {
- v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
- return -ENOMEM;
- }
-
- vfd->fops = &rockchip_vpu_fops;
- vfd->release = video_device_release;
- vfd->lock = &vpu->vpu_mutex;
- vfd->v4l2_dev = &vpu->v4l2_dev;
- vfd->vfl_dir = VFL_DIR_M2M;
- vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
- vfd->ioctl_ops = &rockchip_vpu_enc_ioctl_ops;
- snprintf(vfd->name, sizeof(vfd->name), "%s-enc", match->compatible);
- vpu->vfd_enc = vfd;
- video_set_drvdata(vfd, vpu);
-
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
- if (ret) {
- v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
- goto err_free_dev;
- }
- v4l2_info(&vpu->v4l2_dev, "registered as /dev/video%d\n", vfd->num);
-
- function = MEDIA_ENT_F_PROC_VIDEO_ENCODER;
- ret = v4l2_m2m_register_media_controller(vpu->m2m_dev, vfd, function);
- if (ret) {
- v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem media controller\n");
- goto err_unreg_video;
- }
- return 0;
-
-err_unreg_video:
- video_unregister_device(vfd);
-err_free_dev:
- video_device_release(vfd);
- return ret;
-}
-
-static int rockchip_vpu_probe(struct platform_device *pdev)
-{
- const struct of_device_id *match;
- struct rockchip_vpu_dev *vpu;
- struct resource *res;
- int i, ret;
-
- vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
- if (!vpu)
- return -ENOMEM;
-
- vpu->dev = &pdev->dev;
- vpu->pdev = pdev;
- mutex_init(&vpu->vpu_mutex);
- spin_lock_init(&vpu->irqlock);
-
- match = of_match_node(of_rockchip_vpu_match, pdev->dev.of_node);
- vpu->variant = match->data;
-
- INIT_DELAYED_WORK(&vpu->watchdog_work, rockchip_vpu_watchdog);
-
- for (i = 0; i < vpu->variant->num_clocks; i++)
- vpu->clocks[i].id = vpu->variant->clk_names[i];
- ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
- vpu->clocks);
- if (ret)
- return ret;
-
- res = platform_get_resource(vpu->pdev, IORESOURCE_MEM, 0);
- vpu->base = devm_ioremap_resource(vpu->dev, res);
- if (IS_ERR(vpu->base))
- return PTR_ERR(vpu->base);
- vpu->enc_base = vpu->base + vpu->variant->enc_offset;
-
- ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
- return ret;
- }
-
- if (vpu->variant->vepu_irq) {
- int irq;
-
- irq = platform_get_irq_byname(vpu->pdev, "vepu");
- if (irq <= 0) {
- dev_err(vpu->dev, "Could not get vepu IRQ.\n");
- return -ENXIO;
- }
-
- ret = devm_request_irq(vpu->dev, irq, vpu->variant->vepu_irq,
- 0, dev_name(vpu->dev), vpu);
- if (ret) {
- dev_err(vpu->dev, "Could not request vepu IRQ.\n");
- return ret;
- }
- }
-
- ret = vpu->variant->init(vpu);
- if (ret) {
- dev_err(&pdev->dev, "Failed to init VPU hardware\n");
- return ret;
- }
-
- pm_runtime_set_autosuspend_delay(vpu->dev, 100);
- pm_runtime_use_autosuspend(vpu->dev);
- pm_runtime_enable(vpu->dev);
-
- ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks);
- if (ret) {
- dev_err(&pdev->dev, "Failed to prepare clocks\n");
- return ret;
- }
-
- ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register v4l2 device\n");
- goto err_clk_unprepare;
- }
- platform_set_drvdata(pdev, vpu);
-
- vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
- if (IS_ERR(vpu->m2m_dev)) {
- v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
- ret = PTR_ERR(vpu->m2m_dev);
- goto err_v4l2_unreg;
- }
-
- vpu->mdev.dev = vpu->dev;
- strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
- strscpy(vpu->mdev.bus_info, "platform: " DRIVER_NAME,
- sizeof(vpu->mdev.model));
- media_device_init(&vpu->mdev);
- vpu->v4l2_dev.mdev = &vpu->mdev;
-
- ret = rockchip_vpu_video_device_register(vpu);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register encoder\n");
- goto err_m2m_rel;
- }
-
- ret = media_device_register(&vpu->mdev);
- if (ret) {
- v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n");
- goto err_video_dev_unreg;
- }
- return 0;
-err_video_dev_unreg:
- if (vpu->vfd_enc) {
- v4l2_m2m_unregister_media_controller(vpu->m2m_dev);
- video_unregister_device(vpu->vfd_enc);
- video_device_release(vpu->vfd_enc);
- }
-err_m2m_rel:
- media_device_cleanup(&vpu->mdev);
- v4l2_m2m_release(vpu->m2m_dev);
-err_v4l2_unreg:
- v4l2_device_unregister(&vpu->v4l2_dev);
-err_clk_unprepare:
- clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
- pm_runtime_dont_use_autosuspend(vpu->dev);
- pm_runtime_disable(vpu->dev);
- return ret;
-}
-
-static int rockchip_vpu_remove(struct platform_device *pdev)
-{
- struct rockchip_vpu_dev *vpu = platform_get_drvdata(pdev);
-
- v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
-
- media_device_unregister(&vpu->mdev);
- if (vpu->vfd_enc) {
- v4l2_m2m_unregister_media_controller(vpu->m2m_dev);
- video_unregister_device(vpu->vfd_enc);
- video_device_release(vpu->vfd_enc);
- }
- media_device_cleanup(&vpu->mdev);
- v4l2_m2m_release(vpu->m2m_dev);
- v4l2_device_unregister(&vpu->v4l2_dev);
- clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
- pm_runtime_dont_use_autosuspend(vpu->dev);
- pm_runtime_disable(vpu->dev);
- return 0;
-}
-
-static const struct dev_pm_ops rockchip_vpu_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
-
-static struct platform_driver rockchip_vpu_driver = {
- .probe = rockchip_vpu_probe,
- .remove = rockchip_vpu_remove,
- .driver = {
- .name = DRIVER_NAME,
- .of_match_table = of_match_ptr(of_rockchip_vpu_match),
- .pm = &rockchip_vpu_pm_ops,
- },
-};
-module_platform_driver(rockchip_vpu_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
-MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
-MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
-MODULE_DESCRIPTION("Rockchip VPU codec driver");
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c b/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
deleted file mode 100644
index dcbfc3cbc9f3..000000000000
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
+++ /dev/null
@@ -1,671 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Rockchip VPU codec driver
- *
- * Copyright (C) 2018 Collabora, Ltd.
- * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
- * Alpha Lin <Alpha.Lin@rock-chips.com>
- * Jeffy Chen <jeffy.chen@rock-chips.com>
- *
- * Copyright 2018 Google LLC.
- * Tomasz Figa <tfiga@chromium.org>
- *
- * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
- * Copyright (C) 2010-2011 Samsung Electronics Co., Ltd.
- */
-
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/pm_runtime.h>
-#include <linux/videodev2.h>
-#include <linux/workqueue.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-mem2mem.h>
-#include <media/videobuf2-core.h>
-#include <media/videobuf2-dma-sg.h>
-
-#include "rockchip_vpu.h"
-#include "rockchip_vpu_hw.h"
-#include "rockchip_vpu_common.h"
-
-/**
- * struct v4l2_format_info - information about a V4L2 format
- * @format: 4CC format identifier (V4L2_PIX_FMT_*)
- * @header_size: Size of header, optional and used by compressed formats
- * @num_planes: Number of planes (1 to 3)
- * @cpp: Number of bytes per pixel (per plane)
- * @hsub: Horizontal chroma subsampling factor
- * @vsub: Vertical chroma subsampling factor
- * @is_compressed: Is it a compressed format?
- * @multiplanar: Is it a multiplanar variant format? (e.g. NV12M)
- */
-struct rockchip_vpu_v4l2_format_info {
- u32 format;
- u32 header_size;
- u8 num_planes;
- u8 cpp[3];
- u8 hsub;
- u8 vsub;
- u8 is_compressed;
- u8 multiplanar;
-};
-
-static const struct rockchip_vpu_v4l2_format_info *
-rockchip_vpu_v4l2_format_info(u32 format)
-{
- static const struct rockchip_vpu_v4l2_format_info formats[] = {
- { .format = V4L2_PIX_FMT_YUV420M, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .multiplanar = 1 },
- { .format = V4L2_PIX_FMT_NV12M, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .multiplanar = 1 },
- { .format = V4L2_PIX_FMT_YUYV, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- { .format = V4L2_PIX_FMT_UYVY, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- };
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(formats); ++i) {
- if (formats[i].format == format)
- return &formats[i];
- }
-
- vpu_err("Unsupported V4L 4CC format (%08x)\n", format);
- return NULL;
-}
-
-static void
-fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
- int pixelformat, int width, int height)
-{
- const struct rockchip_vpu_v4l2_format_info *info;
- struct v4l2_plane_pix_format *plane;
- int i;
-
- info = rockchip_vpu_v4l2_format_info(pixelformat);
- if (!info)
- return;
-
- pixfmt->width = width;
- pixfmt->height = height;
- pixfmt->pixelformat = pixelformat;
-
- if (!info->multiplanar) {
- pixfmt->num_planes = 1;
- plane = &pixfmt->plane_fmt[0];
- plane->bytesperline = info->is_compressed ?
- 0 : width * info->cpp[0];
- plane->sizeimage = info->header_size;
- for (i = 0; i < info->num_planes; i++) {
- unsigned int hsub = (i == 0) ? 1 : info->hsub;
- unsigned int vsub = (i == 0) ? 1 : info->vsub;
-
- plane->sizeimage += info->cpp[i] *
- DIV_ROUND_UP(width, hsub) *
- DIV_ROUND_UP(height, vsub);
- }
- } else {
- pixfmt->num_planes = info->num_planes;
- for (i = 0; i < info->num_planes; i++) {
- unsigned int hsub = (i == 0) ? 1 : info->hsub;
- unsigned int vsub = (i == 0) ? 1 : info->vsub;
-
- plane = &pixfmt->plane_fmt[i];
- plane->bytesperline =
- info->cpp[i] * DIV_ROUND_UP(width, hsub);
- plane->sizeimage =
- plane->bytesperline * DIV_ROUND_UP(height, vsub);
- }
- }
-}
-
-static const struct rockchip_vpu_fmt *
-rockchip_vpu_find_format(struct rockchip_vpu_ctx *ctx, u32 fourcc)
-{
- struct rockchip_vpu_dev *dev = ctx->dev;
- const struct rockchip_vpu_fmt *formats;
- unsigned int num_fmts, i;
-
- formats = dev->variant->enc_fmts;
- num_fmts = dev->variant->num_enc_fmts;
- for (i = 0; i < num_fmts; i++)
- if (formats[i].fourcc == fourcc)
- return &formats[i];
- return NULL;
-}
-
-static const struct rockchip_vpu_fmt *
-rockchip_vpu_get_default_fmt(struct rockchip_vpu_ctx *ctx, bool bitstream)
-{
- struct rockchip_vpu_dev *dev = ctx->dev;
- const struct rockchip_vpu_fmt *formats;
- unsigned int num_fmts, i;
-
- formats = dev->variant->enc_fmts;
- num_fmts = dev->variant->num_enc_fmts;
- for (i = 0; i < num_fmts; i++) {
- if (bitstream == (formats[i].codec_mode != RK_VPU_MODE_NONE))
- return &formats[i];
- }
- return NULL;
-}
-
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct rockchip_vpu_dev *vpu = video_drvdata(file);
- struct video_device *vdev = video_devdata(file);
-
- strscpy(cap->driver, vpu->dev->driver->name, sizeof(cap->driver));
- strscpy(cap->card, vdev->name, sizeof(cap->card));
- snprintf(cap->bus_info, sizeof(cap->bus_info), "platform: %s",
- vpu->dev->driver->name);
- return 0;
-}
-
-static int vidioc_enum_framesizes(struct file *file, void *priv,
- struct v4l2_frmsizeenum *fsize)
-{
- struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
- const struct rockchip_vpu_fmt *fmt;
-
- if (fsize->index != 0) {
- vpu_debug(0, "invalid frame size index (expected 0, got %d)\n",
- fsize->index);
- return -EINVAL;
- }
-
- fmt = rockchip_vpu_find_format(ctx, fsize->pixel_format);
- if (!fmt) {
- vpu_debug(0, "unsupported bitstream format (%08x)\n",
- fsize->pixel_format);
- return -EINVAL;
- }
-
- /* This only makes sense for coded formats */
- if (fmt->codec_mode == RK_VPU_MODE_NONE)
- return -EINVAL;
-
- fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise = fmt->frmsize;
-
- return 0;
-}
-
-static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- struct rockchip_vpu_dev *dev = video_drvdata(file);
- const struct rockchip_vpu_fmt *fmt;
- const struct rockchip_vpu_fmt *formats;
- int num_fmts, i, j = 0;
-
- formats = dev->variant->enc_fmts;
- num_fmts = dev->variant->num_enc_fmts;
- for (i = 0; i < num_fmts; i++) {
- /* Skip uncompressed formats */
- if (formats[i].codec_mode == RK_VPU_MODE_NONE)
- continue;
- if (j == f->index) {
- fmt = &formats[i];
- f->pixelformat = fmt->fourcc;
- return 0;
- }
- ++j;
- }
- return -EINVAL;
-}
-
-static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- struct rockchip_vpu_dev *dev = video_drvdata(file);
- const struct rockchip_vpu_fmt *formats;
- const struct rockchip_vpu_fmt *fmt;
- int num_fmts, i, j = 0;
-
- formats = dev->variant->enc_fmts;
- num_fmts = dev->variant->num_enc_fmts;
- for (i = 0; i < num_fmts; i++) {
- if (formats[i].codec_mode != RK_VPU_MODE_NONE)
- continue;
- if (j == f->index) {
- fmt = &formats[i];
- f->pixelformat = fmt->fourcc;
- return 0;
- }
- ++j;
- }
- return -EINVAL;
-}
-
-static int vidioc_g_fmt_out_mplane(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
-
- vpu_debug(4, "f->type = %d\n", f->type);
-
- *pix_mp = ctx->src_fmt;
-
- return 0;
-}
-
-static int vidioc_g_fmt_cap_mplane(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
-
- vpu_debug(4, "f->type = %d\n", f->type);
-
- *pix_mp = ctx->dst_fmt;
-
- return 0;
-}
-
-static int
-vidioc_try_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f)
-{
- struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- const struct rockchip_vpu_fmt *fmt;
-
- vpu_debug(4, "%c%c%c%c\n",
- (pix_mp->pixelformat & 0x7f),
- (pix_mp->pixelformat >> 8) & 0x7f,
- (pix_mp->pixelformat >> 16) & 0x7f,
- (pix_mp->pixelformat >> 24) & 0x7f);
-
- fmt = rockchip_vpu_find_format(ctx, pix_mp->pixelformat);
- if (!fmt) {
- fmt = rockchip_vpu_get_default_fmt(ctx, true);
- f->fmt.pix.pixelformat = fmt->fourcc;
- }
-
- pix_mp->num_planes = 1;
- pix_mp->field = V4L2_FIELD_NONE;
- pix_mp->width = clamp(pix_mp->width,
- fmt->frmsize.min_width,
- fmt->frmsize.max_width);
- pix_mp->height = clamp(pix_mp->height,
- fmt->frmsize.min_height,
- fmt->frmsize.max_height);
- /* Round up to macroblocks. */
- pix_mp->width = round_up(pix_mp->width, JPEG_MB_DIM);
- pix_mp->height = round_up(pix_mp->height, JPEG_MB_DIM);
-
- /*
- * For compressed formats the application can specify
- * sizeimage. If the application passes a zero sizeimage,
- * let's default to the maximum frame size.
- */
- if (!pix_mp->plane_fmt[0].sizeimage)
- pix_mp->plane_fmt[0].sizeimage = fmt->header_size +
- pix_mp->width * pix_mp->height * fmt->max_depth;
- memset(pix_mp->plane_fmt[0].reserved, 0,
- sizeof(pix_mp->plane_fmt[0].reserved));
- return 0;
-}
-
-static int
-vidioc_try_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
-{
- struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- const struct rockchip_vpu_fmt *fmt;
- unsigned int width, height;
- int i;
-
- vpu_debug(4, "%c%c%c%c\n",
- (pix_mp->pixelformat & 0x7f),
- (pix_mp->pixelformat >> 8) & 0x7f,
- (pix_mp->pixelformat >> 16) & 0x7f,
- (pix_mp->pixelformat >> 24) & 0x7f);
-
- fmt = rockchip_vpu_find_format(ctx, pix_mp->pixelformat);
- if (!fmt) {
- fmt = rockchip_vpu_get_default_fmt(ctx, false);
- f->fmt.pix.pixelformat = fmt->fourcc;
- }
-
- pix_mp->field = V4L2_FIELD_NONE;
- width = clamp(pix_mp->width,
- ctx->vpu_dst_fmt->frmsize.min_width,
- ctx->vpu_dst_fmt->frmsize.max_width);
- height = clamp(pix_mp->height,
- ctx->vpu_dst_fmt->frmsize.min_height,
- ctx->vpu_dst_fmt->frmsize.max_height);
- /* Round up to macroblocks. */
- width = round_up(width, JPEG_MB_DIM);
- height = round_up(height, JPEG_MB_DIM);
-
- /* Fill remaining fields */
- fill_pixfmt_mp(pix_mp, fmt->fourcc, width, height);
-
- for (i = 0; i < pix_mp->num_planes; i++) {
- memset(pix_mp->plane_fmt[i].reserved, 0,
- sizeof(pix_mp->plane_fmt[i].reserved));
- }
- return 0;
-}
-
-void rockchip_vpu_enc_reset_dst_fmt(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx)
-{
- struct v4l2_pix_format_mplane *fmt = &ctx->dst_fmt;
-
- ctx->vpu_dst_fmt = rockchip_vpu_get_default_fmt(ctx, true);
-
- memset(fmt, 0, sizeof(*fmt));
-
- fmt->num_planes = 1;
- fmt->width = clamp(fmt->width, ctx->vpu_dst_fmt->frmsize.min_width,
- ctx->vpu_dst_fmt->frmsize.max_width);
- fmt->height = clamp(fmt->height, ctx->vpu_dst_fmt->frmsize.min_height,
- ctx->vpu_dst_fmt->frmsize.max_height);
- fmt->pixelformat = ctx->vpu_dst_fmt->fourcc;
- fmt->field = V4L2_FIELD_NONE;
- fmt->colorspace = V4L2_COLORSPACE_JPEG,
- fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
- fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
- fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
-
- fmt->plane_fmt[0].sizeimage = ctx->vpu_dst_fmt->header_size +
- fmt->width * fmt->height * ctx->vpu_dst_fmt->max_depth;
-}
-
-void rockchip_vpu_enc_reset_src_fmt(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx)
-{
- struct v4l2_pix_format_mplane *fmt = &ctx->src_fmt;
- unsigned int width, height;
-
- ctx->vpu_src_fmt = rockchip_vpu_get_default_fmt(ctx, false);
-
- memset(fmt, 0, sizeof(*fmt));
-
- width = clamp(fmt->width, ctx->vpu_dst_fmt->frmsize.min_width,
- ctx->vpu_dst_fmt->frmsize.max_width);
- height = clamp(fmt->height, ctx->vpu_dst_fmt->frmsize.min_height,
- ctx->vpu_dst_fmt->frmsize.max_height);
- fmt->field = V4L2_FIELD_NONE;
- fmt->colorspace = V4L2_COLORSPACE_JPEG,
- fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
- fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
- fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
-
- fill_pixfmt_mp(fmt, ctx->vpu_src_fmt->fourcc, width, height);
-}
-
-static int
-vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
-{
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
- struct vb2_queue *vq;
- int ret;
-
- /* Change not allowed if queue is streaming. */
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
- if (vb2_is_streaming(vq))
- return -EBUSY;
-
- ret = vidioc_try_fmt_out_mplane(file, priv, f);
- if (ret)
- return ret;
-
- ctx->vpu_src_fmt = rockchip_vpu_find_format(ctx, pix_mp->pixelformat);
- ctx->src_fmt = *pix_mp;
-
- /* Propagate to the CAPTURE format */
- ctx->dst_fmt.colorspace = pix_mp->colorspace;
- ctx->dst_fmt.ycbcr_enc = pix_mp->ycbcr_enc;
- ctx->dst_fmt.xfer_func = pix_mp->xfer_func;
- ctx->dst_fmt.quantization = pix_mp->quantization;
- ctx->dst_fmt.width = pix_mp->width;
- ctx->dst_fmt.height = pix_mp->height;
-
- vpu_debug(0, "OUTPUT codec mode: %d\n", ctx->vpu_src_fmt->codec_mode);
- vpu_debug(0, "fmt - w: %d, h: %d, mb - w: %d, h: %d\n",
- pix_mp->width, pix_mp->height,
- JPEG_MB_WIDTH(pix_mp->width),
- JPEG_MB_HEIGHT(pix_mp->height));
- return 0;
-}
-
-static int
-vidioc_s_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f)
-{
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
- struct rockchip_vpu_dev *vpu = ctx->dev;
- struct vb2_queue *vq, *peer_vq;
- int ret;
-
- /* Change not allowed if queue is streaming. */
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
- if (vb2_is_streaming(vq))
- return -EBUSY;
-
- /*
- * Since format change on the CAPTURE queue will reset
- * the OUTPUT queue, we can't allow doing so
- * when the OUTPUT queue has buffers allocated.
- */
- peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
- V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- if (vb2_is_busy(peer_vq) &&
- (pix_mp->pixelformat != ctx->dst_fmt.pixelformat ||
- pix_mp->height != ctx->dst_fmt.height ||
- pix_mp->width != ctx->dst_fmt.width))
- return -EBUSY;
-
- ret = vidioc_try_fmt_cap_mplane(file, priv, f);
- if (ret)
- return ret;
-
- ctx->vpu_dst_fmt = rockchip_vpu_find_format(ctx, pix_mp->pixelformat);
- ctx->dst_fmt = *pix_mp;
-
- vpu_debug(0, "CAPTURE codec mode: %d\n", ctx->vpu_dst_fmt->codec_mode);
- vpu_debug(0, "fmt - w: %d, h: %d, mb - w: %d, h: %d\n",
- pix_mp->width, pix_mp->height,
- JPEG_MB_WIDTH(pix_mp->width),
- JPEG_MB_HEIGHT(pix_mp->height));
-
- /*
- * Current raw format might have become invalid with newly
- * selected codec, so reset it to default just to be safe and
- * keep internal driver state sane. User is mandated to set
- * the raw format again after we return, so we don't need
- * anything smarter.
- */
- rockchip_vpu_enc_reset_src_fmt(vpu, ctx);
- return 0;
-}
-
-const struct v4l2_ioctl_ops rockchip_vpu_enc_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_framesizes = vidioc_enum_framesizes,
-
- .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_cap_mplane,
- .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_out_mplane,
- .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_out_mplane,
- .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_cap_mplane,
- .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_out_mplane,
- .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_cap_mplane,
- .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
- .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
-
- .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
- .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
- .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
- .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
- .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
- .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
- .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
-
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-
- .vidioc_streamon = v4l2_m2m_ioctl_streamon,
- .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
-};
-
-static int
-rockchip_vpu_queue_setup(struct vb2_queue *vq,
- unsigned int *num_buffers,
- unsigned int *num_planes,
- unsigned int sizes[],
- struct device *alloc_devs[])
-{
- struct rockchip_vpu_ctx *ctx = vb2_get_drv_priv(vq);
- struct v4l2_pix_format_mplane *pixfmt;
- int i;
-
- switch (vq->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- pixfmt = &ctx->dst_fmt;
- break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- pixfmt = &ctx->src_fmt;
- break;
- default:
- vpu_err("invalid queue type: %d\n", vq->type);
- return -EINVAL;
- }
-
- if (*num_planes) {
- if (*num_planes != pixfmt->num_planes)
- return -EINVAL;
- for (i = 0; i < pixfmt->num_planes; ++i)
- if (sizes[i] < pixfmt->plane_fmt[i].sizeimage)
- return -EINVAL;
- return 0;
- }
-
- *num_planes = pixfmt->num_planes;
- for (i = 0; i < pixfmt->num_planes; ++i)
- sizes[i] = pixfmt->plane_fmt[i].sizeimage;
- return 0;
-}
-
-static int rockchip_vpu_buf_prepare(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct vb2_queue *vq = vb->vb2_queue;
- struct rockchip_vpu_ctx *ctx = vb2_get_drv_priv(vq);
- struct v4l2_pix_format_mplane *pixfmt;
- unsigned int sz;
- int ret = 0;
- int i;
-
- switch (vq->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- pixfmt = &ctx->dst_fmt;
- break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- pixfmt = &ctx->src_fmt;
-
- if (vbuf->field == V4L2_FIELD_ANY)
- vbuf->field = V4L2_FIELD_NONE;
- if (vbuf->field != V4L2_FIELD_NONE) {
- vpu_debug(4, "field %d not supported\n",
- vbuf->field);
- return -EINVAL;
- }
- break;
- default:
- vpu_err("invalid queue type: %d\n", vq->type);
- return -EINVAL;
- }
-
- for (i = 0; i < pixfmt->num_planes; ++i) {
- sz = pixfmt->plane_fmt[i].sizeimage;
- vpu_debug(4, "plane %d size: %ld, sizeimage: %u\n",
- i, vb2_plane_size(vb, i), sz);
- if (vb2_plane_size(vb, i) < sz) {
- vpu_err("plane %d is too small\n", i);
- ret = -EINVAL;
- break;
- }
- }
-
- return ret;
-}
-
-static void rockchip_vpu_buf_queue(struct vb2_buffer *vb)
-{
- struct rockchip_vpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-
- v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
-}
-
-static int rockchip_vpu_start_streaming(struct vb2_queue *q, unsigned int count)
-{
- struct rockchip_vpu_ctx *ctx = vb2_get_drv_priv(q);
- enum rockchip_vpu_codec_mode codec_mode;
-
- if (V4L2_TYPE_IS_OUTPUT(q->type))
- ctx->sequence_out = 0;
- else
- ctx->sequence_cap = 0;
-
- /* Set codec_ops for the chosen destination format */
- codec_mode = ctx->vpu_dst_fmt->codec_mode;
-
- vpu_debug(4, "Codec mode = %d\n", codec_mode);
- ctx->codec_ops = &ctx->dev->variant->codec_ops[codec_mode];
-
- /* A bounce buffer is needed for the JPEG payload */
- if (!V4L2_TYPE_IS_OUTPUT(q->type)) {
- ctx->bounce_size = ctx->dst_fmt.plane_fmt[0].sizeimage -
- ctx->vpu_dst_fmt->header_size;
- ctx->bounce_buf = dma_alloc_attrs(ctx->dev->dev,
- ctx->bounce_size,
- &ctx->bounce_dma_addr,
- GFP_KERNEL,
- DMA_ATTR_ALLOC_SINGLE_PAGES);
- }
- return 0;
-}
-
-static void rockchip_vpu_stop_streaming(struct vb2_queue *q)
-{
- struct rockchip_vpu_ctx *ctx = vb2_get_drv_priv(q);
-
- if (!V4L2_TYPE_IS_OUTPUT(q->type))
- dma_free_attrs(ctx->dev->dev,
- ctx->bounce_size,
- ctx->bounce_buf,
- ctx->bounce_dma_addr,
- DMA_ATTR_ALLOC_SINGLE_PAGES);
-
- /*
- * The mem2mem framework calls v4l2_m2m_cancel_job before
- * .stop_streaming, so there isn't any job running and
- * it is safe to return all the buffers.
- */
- for (;;) {
- struct vb2_v4l2_buffer *vbuf;
-
- if (V4L2_TYPE_IS_OUTPUT(q->type))
- vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- else
- vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- if (!vbuf)
- break;
- v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
- }
-}
-
-const struct vb2_ops rockchip_vpu_enc_queue_ops = {
- .queue_setup = rockchip_vpu_queue_setup,
- .buf_prepare = rockchip_vpu_buf_prepare,
- .buf_queue = rockchip_vpu_buf_queue,
- .start_streaming = rockchip_vpu_start_streaming,
- .stop_streaming = rockchip_vpu_stop_streaming,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
-};
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_hw.h b/drivers/staging/media/rockchip/vpu/rockchip_vpu_hw.h
deleted file mode 100644
index 2b955da1be1a..000000000000
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_hw.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Rockchip VPU codec driver
- *
- * Copyright 2018 Google LLC.
- * Tomasz Figa <tfiga@chromium.org>
- */
-
-#ifndef ROCKCHIP_VPU_HW_H_
-#define ROCKCHIP_VPU_HW_H_
-
-#include <linux/interrupt.h>
-#include <linux/v4l2-controls.h>
-#include <media/videobuf2-core.h>
-
-struct rockchip_vpu_dev;
-struct rockchip_vpu_ctx;
-struct rockchip_vpu_buf;
-struct rockchip_vpu_variant;
-
-/**
- * struct rockchip_vpu_codec_ops - codec mode specific operations
- *
- * @run: Start single {en,de)coding job. Called from atomic context
- * to indicate that a pair of buffers is ready and the hardware
- * should be programmed and started.
- * @done: Read back processing results and additional data from hardware.
- * @reset: Reset the hardware in case of a timeout.
- */
-struct rockchip_vpu_codec_ops {
- void (*run)(struct rockchip_vpu_ctx *ctx);
- void (*done)(struct rockchip_vpu_ctx *ctx, enum vb2_buffer_state);
- void (*reset)(struct rockchip_vpu_ctx *ctx);
-};
-
-/**
- * enum rockchip_vpu_enc_fmt - source format ID for hardware registers.
- */
-enum rockchip_vpu_enc_fmt {
- RK3288_VPU_ENC_FMT_YUV420P = 0,
- RK3288_VPU_ENC_FMT_YUV420SP = 1,
- RK3288_VPU_ENC_FMT_YUYV422 = 2,
- RK3288_VPU_ENC_FMT_UYVY422 = 3,
-};
-
-extern const struct rockchip_vpu_variant rk3399_vpu_variant;
-extern const struct rockchip_vpu_variant rk3288_vpu_variant;
-
-void rockchip_vpu_watchdog(struct work_struct *work);
-void rockchip_vpu_run(struct rockchip_vpu_ctx *ctx);
-void rockchip_vpu_irq_done(struct rockchip_vpu_dev *vpu,
- unsigned int bytesused,
- enum vb2_buffer_state result);
-
-void rk3288_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx);
-void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx);
-
-#endif /* ROCKCHIP_VPU_HW_H_ */
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.h b/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.h
deleted file mode 100644
index 72645d8e2ade..000000000000
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-
-#define JPEG_HEADER_SIZE 601
-
-struct rockchip_vpu_jpeg_ctx {
- int width;
- int height;
- int quality;
- unsigned char *buffer;
-};
-
-unsigned char *
-rockchip_vpu_jpeg_get_qtable(struct rockchip_vpu_jpeg_ctx *ctx, int index);
-void rockchip_vpu_jpeg_header_assemble(struct rockchip_vpu_jpeg_ctx *ctx);
diff --git a/drivers/staging/media/soc_camera/imx074.c b/drivers/staging/media/soc_camera/imx074.c
index d907aa62f898..14240b74cdd0 100644
--- a/drivers/staging/media/soc_camera/imx074.c
+++ b/drivers/staging/media/soc_camera/imx074.c
@@ -409,7 +409,7 @@ static int imx074_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct imx074 *priv;
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
diff --git a/drivers/staging/media/soc_camera/mt9t031.c b/drivers/staging/media/soc_camera/mt9t031.c
index 615ae9df2c57..c14f23221544 100644
--- a/drivers/staging/media/soc_camera/mt9t031.c
+++ b/drivers/staging/media/soc_camera/mt9t031.c
@@ -751,7 +751,7 @@ static int mt9t031_probe(struct i2c_client *client,
{
struct mt9t031 *mt9t031;
struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
int ret;
if (!ssdd) {
diff --git a/drivers/staging/media/soc_camera/soc_mt9v022.c b/drivers/staging/media/soc_camera/soc_mt9v022.c
index e7e0d3d29499..1739a618846d 100644
--- a/drivers/staging/media/soc_camera/soc_mt9v022.c
+++ b/drivers/staging/media/soc_camera/soc_mt9v022.c
@@ -883,7 +883,7 @@ static int mt9v022_probe(struct i2c_client *client,
{
struct mt9v022 *mt9v022;
struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
struct mt9v022_platform_data *pdata;
int ret;
diff --git a/drivers/staging/media/soc_camera/soc_ov5642.c b/drivers/staging/media/soc_camera/soc_ov5642.c
index 94696d7baf83..39ae24dca65f 100644
--- a/drivers/staging/media/soc_camera/soc_ov5642.c
+++ b/drivers/staging/media/soc_camera/soc_ov5642.c
@@ -687,7 +687,8 @@ static int reg_write16(struct i2c_client *client, u16 reg, u16 val16)
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int ov5642_get_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
+static int ov5642_get_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
@@ -705,7 +706,8 @@ static int ov5642_get_register(struct v4l2_subdev *sd, struct v4l2_dbg_register
return ret;
}
-static int ov5642_set_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
+static int ov5642_set_register(struct v4l2_subdev *sd,
+ const struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
diff --git a/drivers/staging/media/sunxi/cedrus/Makefile b/drivers/staging/media/sunxi/cedrus/Makefile
index 808842f0119e..c85ac6db0302 100644
--- a/drivers/staging/media/sunxi/cedrus/Makefile
+++ b/drivers/staging/media/sunxi/cedrus/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += sunxi-cedrus.o
-sunxi-cedrus-y = cedrus.o cedrus_video.o cedrus_hw.o cedrus_dec.o cedrus_mpeg2.o
+sunxi-cedrus-y = cedrus.o cedrus_video.o cedrus_hw.o cedrus_dec.o \
+ cedrus_mpeg2.o cedrus_h264.o
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index d0429c0e6b6b..370937edfc14 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -40,6 +40,36 @@ static const struct cedrus_control cedrus_controls[] = {
.codec = CEDRUS_CODEC_MPEG2,
.required = false,
},
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS,
+ .elem_size = sizeof(struct v4l2_ctrl_h264_decode_params),
+ .codec = CEDRUS_CODEC_H264,
+ .required = true,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS,
+ .elem_size = sizeof(struct v4l2_ctrl_h264_slice_params),
+ .codec = CEDRUS_CODEC_H264,
+ .required = true,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_SPS,
+ .elem_size = sizeof(struct v4l2_ctrl_h264_sps),
+ .codec = CEDRUS_CODEC_H264,
+ .required = true,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PPS,
+ .elem_size = sizeof(struct v4l2_ctrl_h264_pps),
+ .codec = CEDRUS_CODEC_H264,
+ .required = true,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX,
+ .elem_size = sizeof(struct v4l2_ctrl_h264_scaling_matrix),
+ .codec = CEDRUS_CODEC_H264,
+ .required = true,
+ },
};
#define CEDRUS_CONTROLS_COUNT ARRAY_SIZE(cedrus_controls)
@@ -278,6 +308,7 @@ static int cedrus_probe(struct platform_device *pdev)
}
dev->dec_ops[CEDRUS_CODEC_MPEG2] = &cedrus_dec_ops_mpeg2;
+ dev->dec_ops[CEDRUS_CODEC_H264] = &cedrus_dec_ops_h264;
mutex_init(&dev->dev_mutex);
@@ -369,36 +400,41 @@ static int cedrus_remove(struct platform_device *pdev)
}
static const struct cedrus_variant sun4i_a10_cedrus_variant = {
- /* No particular capability. */
+ .mod_rate = 320000000,
};
static const struct cedrus_variant sun5i_a13_cedrus_variant = {
- /* No particular capability. */
+ .mod_rate = 320000000,
};
static const struct cedrus_variant sun7i_a20_cedrus_variant = {
- /* No particular capability. */
+ .mod_rate = 320000000,
};
static const struct cedrus_variant sun8i_a33_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .mod_rate = 320000000,
};
static const struct cedrus_variant sun8i_h3_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_a64_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_h5_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_h6_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED,
.quirks = CEDRUS_QUIRK_NO_DMA_OFFSET,
+ .mod_rate = 600000000,
};
static const struct of_device_id cedrus_dt_match[] = {
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index c57c04b41d2e..3f476d0fd981 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -32,7 +32,7 @@
enum cedrus_codec {
CEDRUS_CODEC_MPEG2,
-
+ CEDRUS_CODEC_H264,
CEDRUS_CODEC_LAST,
};
@@ -42,6 +42,12 @@ enum cedrus_irq_status {
CEDRUS_IRQ_OK,
};
+enum cedrus_h264_pic_type {
+ CEDRUS_H264_PIC_TYPE_FRAME = 0,
+ CEDRUS_H264_PIC_TYPE_FIELD,
+ CEDRUS_H264_PIC_TYPE_MBAFF,
+};
+
struct cedrus_control {
u32 id;
u32 elem_size;
@@ -49,6 +55,14 @@ struct cedrus_control {
unsigned char required:1;
};
+struct cedrus_h264_run {
+ const struct v4l2_ctrl_h264_decode_params *decode_params;
+ const struct v4l2_ctrl_h264_pps *pps;
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix;
+ const struct v4l2_ctrl_h264_slice_params *slice_params;
+ const struct v4l2_ctrl_h264_sps *sps;
+};
+
struct cedrus_mpeg2_run {
const struct v4l2_ctrl_mpeg2_slice_params *slice_params;
const struct v4l2_ctrl_mpeg2_quantization *quantization;
@@ -59,12 +73,20 @@ struct cedrus_run {
struct vb2_v4l2_buffer *dst;
union {
+ struct cedrus_h264_run h264;
struct cedrus_mpeg2_run mpeg2;
};
};
struct cedrus_buffer {
struct v4l2_m2m_buffer m2m_buf;
+
+ union {
+ struct {
+ unsigned int position;
+ enum cedrus_h264_pic_type pic_type;
+ } h264;
+ } codec;
};
struct cedrus_ctx {
@@ -79,6 +101,19 @@ struct cedrus_ctx {
struct v4l2_ctrl **ctrls;
struct vb2_buffer *dst_bufs[VIDEO_MAX_FRAME];
+
+ union {
+ struct {
+ void *mv_col_buf;
+ dma_addr_t mv_col_buf_dma;
+ ssize_t mv_col_buf_field_size;
+ ssize_t mv_col_buf_size;
+ void *pic_info_buf;
+ dma_addr_t pic_info_buf_dma;
+ void *neighbor_info_buf;
+ dma_addr_t neighbor_info_buf_dma;
+ } h264;
+ } codec;
};
struct cedrus_dec_ops {
@@ -94,6 +129,7 @@ struct cedrus_dec_ops {
struct cedrus_variant {
unsigned int capabilities;
unsigned int quirks;
+ unsigned int mod_rate;
};
struct cedrus_dev {
@@ -121,6 +157,7 @@ struct cedrus_dev {
};
extern struct cedrus_dec_ops cedrus_dec_ops_mpeg2;
+extern struct cedrus_dec_ops cedrus_dec_ops_h264;
static inline void cedrus_write(struct cedrus_dev *dev, u32 reg, u32 val)
{
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index 4d6d602cdde6..bdad87eb9d79 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -46,6 +46,19 @@ void cedrus_device_run(void *priv)
V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION);
break;
+ case V4L2_PIX_FMT_H264_SLICE_RAW:
+ run.h264.decode_params = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS);
+ run.h264.pps = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_H264_PPS);
+ run.h264.scaling_matrix = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX);
+ run.h264.slice_params = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS);
+ run.h264.sps = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_H264_SPS);
+ break;
+
default:
break;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
new file mode 100644
index 000000000000..a30bb283f69f
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (c) 2013 Jens Kuske <jenskuske@gmail.com>
+ * Copyright (c) 2018 Bootlin
+ */
+
+#include <linux/types.h>
+
+#include <media/videobuf2-dma-contig.h>
+
+#include "cedrus.h"
+#include "cedrus_hw.h"
+#include "cedrus_regs.h"
+
+enum cedrus_h264_sram_off {
+ CEDRUS_SRAM_H264_PRED_WEIGHT_TABLE = 0x000,
+ CEDRUS_SRAM_H264_FRAMEBUFFER_LIST = 0x100,
+ CEDRUS_SRAM_H264_REF_LIST_0 = 0x190,
+ CEDRUS_SRAM_H264_REF_LIST_1 = 0x199,
+ CEDRUS_SRAM_H264_SCALING_LIST_8x8_0 = 0x200,
+ CEDRUS_SRAM_H264_SCALING_LIST_8x8_1 = 0x210,
+ CEDRUS_SRAM_H264_SCALING_LIST_4x4 = 0x220,
+};
+
+struct cedrus_h264_sram_ref_pic {
+ __le32 top_field_order_cnt;
+ __le32 bottom_field_order_cnt;
+ __le32 frame_info;
+ __le32 luma_ptr;
+ __le32 chroma_ptr;
+ __le32 mv_col_top_ptr;
+ __le32 mv_col_bot_ptr;
+ __le32 reserved;
+} __packed;
+
+#define CEDRUS_H264_FRAME_NUM 18
+
+#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (16 * SZ_1K)
+#define CEDRUS_PIC_INFO_BUF_SIZE (128 * SZ_1K)
+
+static void cedrus_h264_write_sram(struct cedrus_dev *dev,
+ enum cedrus_h264_sram_off off,
+ const void *data, size_t len)
+{
+ const u32 *buffer = data;
+ size_t count = DIV_ROUND_UP(len, 4);
+
+ cedrus_write(dev, VE_AVC_SRAM_PORT_OFFSET, off << 2);
+
+ while (count--)
+ cedrus_write(dev, VE_AVC_SRAM_PORT_DATA, *buffer++);
+}
+
+static dma_addr_t cedrus_h264_mv_col_buf_addr(struct cedrus_ctx *ctx,
+ unsigned int position,
+ unsigned int field)
+{
+ dma_addr_t addr = ctx->codec.h264.mv_col_buf_dma;
+
+ /* Adjust for the position */
+ addr += position * ctx->codec.h264.mv_col_buf_field_size * 2;
+
+ /* Adjust for the field */
+ addr += field * ctx->codec.h264.mv_col_buf_field_size;
+
+ return addr;
+}
+
+static void cedrus_fill_ref_pic(struct cedrus_ctx *ctx,
+ struct cedrus_buffer *buf,
+ unsigned int top_field_order_cnt,
+ unsigned int bottom_field_order_cnt,
+ struct cedrus_h264_sram_ref_pic *pic)
+{
+ struct vb2_buffer *vbuf = &buf->m2m_buf.vb.vb2_buf;
+ unsigned int position = buf->codec.h264.position;
+
+ pic->top_field_order_cnt = cpu_to_le32(top_field_order_cnt);
+ pic->bottom_field_order_cnt = cpu_to_le32(bottom_field_order_cnt);
+ pic->frame_info = cpu_to_le32(buf->codec.h264.pic_type << 8);
+
+ pic->luma_ptr = cpu_to_le32(cedrus_buf_addr(vbuf, &ctx->dst_fmt, 0));
+ pic->chroma_ptr = cpu_to_le32(cedrus_buf_addr(vbuf, &ctx->dst_fmt, 1));
+ pic->mv_col_top_ptr =
+ cpu_to_le32(cedrus_h264_mv_col_buf_addr(ctx, position, 0));
+ pic->mv_col_bot_ptr =
+ cpu_to_le32(cedrus_h264_mv_col_buf_addr(ctx, position, 1));
+}
+
+static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ struct cedrus_h264_sram_ref_pic pic_list[CEDRUS_H264_FRAME_NUM];
+ const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
+ const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
+ const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
+ struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ struct cedrus_buffer *output_buf;
+ struct cedrus_dev *dev = ctx->dev;
+ unsigned long used_dpbs = 0;
+ unsigned int position;
+ unsigned int output = 0;
+ unsigned int i;
+
+ memset(pic_list, 0, sizeof(pic_list));
+
+ for (i = 0; i < ARRAY_SIZE(decode->dpb); i++) {
+ const struct v4l2_h264_dpb_entry *dpb = &decode->dpb[i];
+ struct cedrus_buffer *cedrus_buf;
+ int buf_idx;
+
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_VALID))
+ continue;
+
+ buf_idx = vb2_find_timestamp(cap_q, dpb->reference_ts, 0);
+ if (buf_idx < 0)
+ continue;
+
+ cedrus_buf = vb2_to_cedrus_buffer(ctx->dst_bufs[buf_idx]);
+ position = cedrus_buf->codec.h264.position;
+ used_dpbs |= BIT(position);
+
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ continue;
+
+ cedrus_fill_ref_pic(ctx, cedrus_buf,
+ dpb->top_field_order_cnt,
+ dpb->bottom_field_order_cnt,
+ &pic_list[position]);
+
+ output = max(position, output);
+ }
+
+ position = find_next_zero_bit(&used_dpbs, CEDRUS_H264_FRAME_NUM,
+ output);
+ if (position >= CEDRUS_H264_FRAME_NUM)
+ position = find_first_zero_bit(&used_dpbs, CEDRUS_H264_FRAME_NUM);
+
+ output_buf = vb2_to_cedrus_buffer(&run->dst->vb2_buf);
+ output_buf->codec.h264.position = position;
+
+ if (slice->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
+ output_buf->codec.h264.pic_type = CEDRUS_H264_PIC_TYPE_FIELD;
+ else if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
+ output_buf->codec.h264.pic_type = CEDRUS_H264_PIC_TYPE_MBAFF;
+ else
+ output_buf->codec.h264.pic_type = CEDRUS_H264_PIC_TYPE_FRAME;
+
+ cedrus_fill_ref_pic(ctx, output_buf,
+ decode->top_field_order_cnt,
+ decode->bottom_field_order_cnt,
+ &pic_list[position]);
+
+ cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_FRAMEBUFFER_LIST,
+ pic_list, sizeof(pic_list));
+
+ cedrus_write(dev, VE_H264_OUTPUT_FRAME_IDX, position);
+}
+
+#define CEDRUS_MAX_REF_IDX 32
+
+static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
+ struct cedrus_run *run,
+ const u8 *ref_list, u8 num_ref,
+ enum cedrus_h264_sram_off sram)
+{
+ const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
+ struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ struct cedrus_dev *dev = ctx->dev;
+ u8 sram_array[CEDRUS_MAX_REF_IDX];
+ unsigned int i;
+ size_t size;
+
+ memset(sram_array, 0, sizeof(sram_array));
+
+ for (i = 0; i < num_ref; i++) {
+ const struct v4l2_h264_dpb_entry *dpb;
+ const struct cedrus_buffer *cedrus_buf;
+ const struct vb2_v4l2_buffer *ref_buf;
+ unsigned int position;
+ int buf_idx;
+ u8 dpb_idx;
+
+ dpb_idx = ref_list[i];
+ dpb = &decode->dpb[dpb_idx];
+
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ continue;
+
+ buf_idx = vb2_find_timestamp(cap_q, dpb->reference_ts, 0);
+ if (buf_idx < 0)
+ continue;
+
+ ref_buf = to_vb2_v4l2_buffer(ctx->dst_bufs[buf_idx]);
+ cedrus_buf = vb2_v4l2_to_cedrus_buffer(ref_buf);
+ position = cedrus_buf->codec.h264.position;
+
+ sram_array[i] |= position << 1;
+ if (ref_buf->field == V4L2_FIELD_BOTTOM)
+ sram_array[i] |= BIT(0);
+ }
+
+ size = min_t(size_t, ALIGN(num_ref, 4), sizeof(sram_array));
+ cedrus_h264_write_sram(dev, sram, &sram_array, size);
+}
+
+static void cedrus_write_ref_list0(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
+
+ _cedrus_write_ref_list(ctx, run,
+ slice->ref_pic_list0,
+ slice->num_ref_idx_l0_active_minus1 + 1,
+ CEDRUS_SRAM_H264_REF_LIST_0);
+}
+
+static void cedrus_write_ref_list1(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
+
+ _cedrus_write_ref_list(ctx, run,
+ slice->ref_pic_list1,
+ slice->num_ref_idx_l1_active_minus1 + 1,
+ CEDRUS_SRAM_H264_REF_LIST_1);
+}
+
+static void cedrus_write_scaling_lists(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling =
+ run->h264.scaling_matrix;
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_0,
+ scaling->scaling_list_8x8[0],
+ sizeof(scaling->scaling_list_8x8[0]));
+
+ cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_1,
+ scaling->scaling_list_8x8[3],
+ sizeof(scaling->scaling_list_8x8[3]));
+
+ cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_4x4,
+ scaling->scaling_list_4x4,
+ sizeof(scaling->scaling_list_4x4));
+}
+
+static void cedrus_write_pred_weight_table(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_h264_slice_params *slice =
+ run->h264.slice_params;
+ const struct v4l2_h264_pred_weight_table *pred_weight =
+ &slice->pred_weight_table;
+ struct cedrus_dev *dev = ctx->dev;
+ int i, j, k;
+
+ cedrus_write(dev, VE_H264_SHS_WP,
+ ((pred_weight->chroma_log2_weight_denom & 0x7) << 4) |
+ ((pred_weight->luma_log2_weight_denom & 0x7) << 0));
+
+ cedrus_write(dev, VE_AVC_SRAM_PORT_OFFSET,
+ CEDRUS_SRAM_H264_PRED_WEIGHT_TABLE << 2);
+
+ for (i = 0; i < ARRAY_SIZE(pred_weight->weight_factors); i++) {
+ const struct v4l2_h264_weight_factors *factors =
+ &pred_weight->weight_factors[i];
+
+ for (j = 0; j < ARRAY_SIZE(factors->luma_weight); j++) {
+ u32 val;
+
+ val = (((u32)factors->luma_offset[j] & 0x1ff) << 16) |
+ (factors->luma_weight[j] & 0x1ff);
+ cedrus_write(dev, VE_AVC_SRAM_PORT_DATA, val);
+ }
+
+ for (j = 0; j < ARRAY_SIZE(factors->chroma_weight); j++) {
+ for (k = 0; k < ARRAY_SIZE(factors->chroma_weight[0]); k++) {
+ u32 val;
+
+ val = (((u32)factors->chroma_offset[j][k] & 0x1ff) << 16) |
+ (factors->chroma_weight[j][k] & 0x1ff);
+ cedrus_write(dev, VE_AVC_SRAM_PORT_DATA, val);
+ }
+ }
+ }
+}
+
+static void cedrus_set_params(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
+ const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
+ const struct v4l2_ctrl_h264_pps *pps = run->h264.pps;
+ const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
+ struct vb2_buffer *src_buf = &run->src->vb2_buf;
+ struct cedrus_dev *dev = ctx->dev;
+ dma_addr_t src_buf_addr;
+ u32 offset = slice->header_bit_size;
+ u32 len = (slice->size * 8) - offset;
+ u32 reg;
+
+ cedrus_write(dev, VE_H264_VLD_LEN, len);
+ cedrus_write(dev, VE_H264_VLD_OFFSET, offset);
+
+ src_buf_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ cedrus_write(dev, VE_H264_VLD_END,
+ src_buf_addr + vb2_get_plane_payload(src_buf, 0));
+ cedrus_write(dev, VE_H264_VLD_ADDR,
+ VE_H264_VLD_ADDR_VAL(src_buf_addr) |
+ VE_H264_VLD_ADDR_FIRST | VE_H264_VLD_ADDR_VALID |
+ VE_H264_VLD_ADDR_LAST);
+
+ /*
+ * FIXME: Since the bitstream parsing is done in software, and
+ * in userspace, this shouldn't be needed anymore. But it
+ * turns out that removing it breaks the decoding process,
+ * without any clear indication why.
+ */
+ cedrus_write(dev, VE_H264_TRIGGER_TYPE,
+ VE_H264_TRIGGER_TYPE_INIT_SWDEC);
+
+ if (((pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED) &&
+ (slice->slice_type == V4L2_H264_SLICE_TYPE_P ||
+ slice->slice_type == V4L2_H264_SLICE_TYPE_SP)) ||
+ (pps->weighted_bipred_idc == 1 &&
+ slice->slice_type == V4L2_H264_SLICE_TYPE_B))
+ cedrus_write_pred_weight_table(ctx, run);
+
+ if ((slice->slice_type == V4L2_H264_SLICE_TYPE_P) ||
+ (slice->slice_type == V4L2_H264_SLICE_TYPE_SP) ||
+ (slice->slice_type == V4L2_H264_SLICE_TYPE_B))
+ cedrus_write_ref_list0(ctx, run);
+
+ if (slice->slice_type == V4L2_H264_SLICE_TYPE_B)
+ cedrus_write_ref_list1(ctx, run);
+
+ // picture parameters
+ reg = 0;
+ /*
+ * FIXME: the kernel headers are allowing the default value to
+ * be passed, but the libva doesn't give us that.
+ */
+ reg |= (slice->num_ref_idx_l0_active_minus1 & 0x1f) << 10;
+ reg |= (slice->num_ref_idx_l1_active_minus1 & 0x1f) << 5;
+ reg |= (pps->weighted_bipred_idc & 0x3) << 2;
+ if (pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE)
+ reg |= VE_H264_PPS_ENTROPY_CODING_MODE;
+ if (pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED)
+ reg |= VE_H264_PPS_WEIGHTED_PRED;
+ if (pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED)
+ reg |= VE_H264_PPS_CONSTRAINED_INTRA_PRED;
+ if (pps->flags & V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE)
+ reg |= VE_H264_PPS_TRANSFORM_8X8_MODE;
+ cedrus_write(dev, VE_H264_PPS, reg);
+
+ // sequence parameters
+ reg = 0;
+ reg |= (sps->chroma_format_idc & 0x7) << 19;
+ reg |= (sps->pic_width_in_mbs_minus1 & 0xff) << 8;
+ reg |= sps->pic_height_in_map_units_minus1 & 0xff;
+ if (sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY)
+ reg |= VE_H264_SPS_MBS_ONLY;
+ if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
+ reg |= VE_H264_SPS_MB_ADAPTIVE_FRAME_FIELD;
+ if (sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE)
+ reg |= VE_H264_SPS_DIRECT_8X8_INFERENCE;
+ cedrus_write(dev, VE_H264_SPS, reg);
+
+ // slice parameters
+ reg = 0;
+ reg |= decode->nal_ref_idc ? BIT(12) : 0;
+ reg |= (slice->slice_type & 0xf) << 8;
+ reg |= slice->cabac_init_idc & 0x3;
+ reg |= VE_H264_SHS_FIRST_SLICE_IN_PIC;
+ if (slice->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
+ reg |= VE_H264_SHS_FIELD_PIC;
+ if (slice->flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
+ reg |= VE_H264_SHS_BOTTOM_FIELD;
+ if (slice->flags & V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED)
+ reg |= VE_H264_SHS_DIRECT_SPATIAL_MV_PRED;
+ cedrus_write(dev, VE_H264_SHS, reg);
+
+ reg = 0;
+ reg |= VE_H264_SHS2_NUM_REF_IDX_ACTIVE_OVRD;
+ reg |= (slice->num_ref_idx_l0_active_minus1 & 0x1f) << 24;
+ reg |= (slice->num_ref_idx_l1_active_minus1 & 0x1f) << 16;
+ reg |= (slice->disable_deblocking_filter_idc & 0x3) << 8;
+ reg |= (slice->slice_alpha_c0_offset_div2 & 0xf) << 4;
+ reg |= slice->slice_beta_offset_div2 & 0xf;
+ cedrus_write(dev, VE_H264_SHS2, reg);
+
+ reg = 0;
+ reg |= (pps->second_chroma_qp_index_offset & 0x3f) << 16;
+ reg |= (pps->chroma_qp_index_offset & 0x3f) << 8;
+ reg |= (pps->pic_init_qp_minus26 + 26 + slice->slice_qp_delta) & 0x3f;
+ cedrus_write(dev, VE_H264_SHS_QP, reg);
+
+ // clear status flags
+ cedrus_write(dev, VE_H264_STATUS, cedrus_read(dev, VE_H264_STATUS));
+
+ // enable int
+ cedrus_write(dev, VE_H264_CTRL,
+ VE_H264_CTRL_SLICE_DECODE_INT |
+ VE_H264_CTRL_DECODE_ERR_INT |
+ VE_H264_CTRL_VLD_DATA_REQ_INT);
+}
+
+static enum cedrus_irq_status
+cedrus_h264_irq_status(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_H264_STATUS);
+
+ if (reg & (VE_H264_STATUS_DECODE_ERR_INT |
+ VE_H264_STATUS_VLD_DATA_REQ_INT))
+ return CEDRUS_IRQ_ERROR;
+
+ if (reg & VE_H264_CTRL_SLICE_DECODE_INT)
+ return CEDRUS_IRQ_OK;
+
+ return CEDRUS_IRQ_NONE;
+}
+
+static void cedrus_h264_irq_clear(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_H264_STATUS,
+ VE_H264_STATUS_INT_MASK);
+}
+
+static void cedrus_h264_irq_disable(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_H264_CTRL);
+
+ cedrus_write(dev, VE_H264_CTRL,
+ reg & ~VE_H264_CTRL_INT_MASK);
+}
+
+static void cedrus_h264_setup(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_engine_enable(dev, CEDRUS_CODEC_H264);
+
+ cedrus_write(dev, VE_H264_SDROT_CTRL, 0);
+ cedrus_write(dev, VE_H264_EXTRA_BUFFER1,
+ ctx->codec.h264.pic_info_buf_dma);
+ cedrus_write(dev, VE_H264_EXTRA_BUFFER2,
+ ctx->codec.h264.neighbor_info_buf_dma);
+
+ cedrus_write_scaling_lists(ctx, run);
+ cedrus_write_frame_list(ctx, run);
+
+ cedrus_set_params(ctx, run);
+}
+
+static int cedrus_h264_start(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ unsigned int field_size;
+ unsigned int mv_col_size;
+ int ret;
+
+ /*
+ * FIXME: It seems that the H6 cedarX code is using a formula
+ * here based on the size of the frame, while all the older
+ * code is using a fixed size, so that might need to be
+ * changed at some point.
+ */
+ ctx->codec.h264.pic_info_buf =
+ dma_alloc_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ &ctx->codec.h264.pic_info_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->codec.h264.pic_info_buf)
+ return -ENOMEM;
+
+ /*
+ * That buffer is supposed to be 16kiB in size, and be aligned
+ * on 16kiB as well. However, dma_alloc_coherent provides the
+ * guarantee that we'll have a CPU and DMA address aligned on
+ * the smallest page order that is greater to the requested
+ * size, so we don't have to overallocate.
+ */
+ ctx->codec.h264.neighbor_info_buf =
+ dma_alloc_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
+ &ctx->codec.h264.neighbor_info_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->codec.h264.neighbor_info_buf) {
+ ret = -ENOMEM;
+ goto err_pic_buf;
+ }
+
+ field_size = DIV_ROUND_UP(ctx->src_fmt.width, 16) *
+ DIV_ROUND_UP(ctx->src_fmt.height, 16) * 16;
+
+ /*
+ * FIXME: This is actually conditional to
+ * V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE not being set, we
+ * might have to rework this if memory efficiency ever is
+ * something we need to work on.
+ */
+ field_size = field_size * 2;
+
+ /*
+ * FIXME: This is actually conditional to
+ * V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY not being set, we might
+ * have to rework this if memory efficiency ever is something
+ * we need to work on.
+ */
+ field_size = field_size * 2;
+ ctx->codec.h264.mv_col_buf_field_size = field_size;
+
+ mv_col_size = field_size * 2 * CEDRUS_H264_FRAME_NUM;
+ ctx->codec.h264.mv_col_buf_size = mv_col_size;
+ ctx->codec.h264.mv_col_buf = dma_alloc_coherent(dev->dev,
+ ctx->codec.h264.mv_col_buf_size,
+ &ctx->codec.h264.mv_col_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->codec.h264.mv_col_buf) {
+ ret = -ENOMEM;
+ goto err_neighbor_buf;
+ }
+
+ return 0;
+
+err_neighbor_buf:
+ dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h264.neighbor_info_buf,
+ ctx->codec.h264.neighbor_info_buf_dma);
+
+err_pic_buf:
+ dma_free_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ ctx->codec.h264.pic_info_buf,
+ ctx->codec.h264.pic_info_buf_dma);
+ return ret;
+}
+
+static void cedrus_h264_stop(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ dma_free_coherent(dev->dev, ctx->codec.h264.mv_col_buf_size,
+ ctx->codec.h264.mv_col_buf,
+ ctx->codec.h264.mv_col_buf_dma);
+ dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h264.neighbor_info_buf,
+ ctx->codec.h264.neighbor_info_buf_dma);
+ dma_free_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ ctx->codec.h264.pic_info_buf,
+ ctx->codec.h264.pic_info_buf_dma);
+}
+
+static void cedrus_h264_trigger(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_H264_TRIGGER_TYPE,
+ VE_H264_TRIGGER_TYPE_AVC_SLICE_DECODE);
+}
+
+struct cedrus_dec_ops cedrus_dec_ops_h264 = {
+ .irq_clear = cedrus_h264_irq_clear,
+ .irq_disable = cedrus_h264_irq_disable,
+ .irq_status = cedrus_h264_irq_status,
+ .setup = cedrus_h264_setup,
+ .start = cedrus_h264_start,
+ .stop = cedrus_h264_stop,
+ .trigger = cedrus_h264_trigger,
+};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index fbfff7c1c771..c34aec7c6e40 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -46,6 +46,10 @@ int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec)
reg |= VE_MODE_DEC_MPEG;
break;
+ case CEDRUS_CODEC_H264:
+ reg |= VE_MODE_DEC_H264;
+ break;
+
default:
return -EINVAL;
}
@@ -236,7 +240,7 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
goto err_sram;
}
- ret = clk_set_rate(dev->mod_clk, CEDRUS_CLOCK_RATE_DEFAULT);
+ ret = clk_set_rate(dev->mod_clk, variant->mod_rate);
if (ret) {
dev_err(dev->dev, "Failed to set clock rate\n");
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
index b43c77d54b95..27d0882397aa 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
@@ -16,8 +16,6 @@
#ifndef _CEDRUS_HW_H_
#define _CEDRUS_HW_H_
-#define CEDRUS_CLOCK_RATE_DEFAULT 320000000
-
int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec);
void cedrus_engine_disable(struct cedrus_dev *dev);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
index de2d6b6f64bf..3e9931416e45 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
@@ -232,4 +232,95 @@
#define VE_DEC_MPEG_ROT_LUMA (VE_ENGINE_DEC_MPEG + 0xcc)
#define VE_DEC_MPEG_ROT_CHROMA (VE_ENGINE_DEC_MPEG + 0xd0)
+#define VE_H264_SPS 0x200
+#define VE_H264_SPS_MBS_ONLY BIT(18)
+#define VE_H264_SPS_MB_ADAPTIVE_FRAME_FIELD BIT(17)
+#define VE_H264_SPS_DIRECT_8X8_INFERENCE BIT(16)
+
+#define VE_H264_PPS 0x204
+#define VE_H264_PPS_ENTROPY_CODING_MODE BIT(15)
+#define VE_H264_PPS_WEIGHTED_PRED BIT(4)
+#define VE_H264_PPS_CONSTRAINED_INTRA_PRED BIT(1)
+#define VE_H264_PPS_TRANSFORM_8X8_MODE BIT(0)
+
+#define VE_H264_SHS 0x208
+#define VE_H264_SHS_FIRST_SLICE_IN_PIC BIT(5)
+#define VE_H264_SHS_FIELD_PIC BIT(4)
+#define VE_H264_SHS_BOTTOM_FIELD BIT(3)
+#define VE_H264_SHS_DIRECT_SPATIAL_MV_PRED BIT(2)
+
+#define VE_H264_SHS2 0x20c
+#define VE_H264_SHS2_NUM_REF_IDX_ACTIVE_OVRD BIT(12)
+
+#define VE_H264_SHS_WP 0x210
+
+#define VE_H264_SHS_QP 0x21c
+#define VE_H264_SHS_QP_SCALING_MATRIX_DEFAULT BIT(24)
+
+#define VE_H264_CTRL 0x220
+#define VE_H264_CTRL_VLD_DATA_REQ_INT BIT(2)
+#define VE_H264_CTRL_DECODE_ERR_INT BIT(1)
+#define VE_H264_CTRL_SLICE_DECODE_INT BIT(0)
+
+#define VE_H264_CTRL_INT_MASK (VE_H264_CTRL_VLD_DATA_REQ_INT | \
+ VE_H264_CTRL_DECODE_ERR_INT | \
+ VE_H264_CTRL_SLICE_DECODE_INT)
+
+#define VE_H264_TRIGGER_TYPE 0x224
+#define VE_H264_TRIGGER_TYPE_AVC_SLICE_DECODE (8 << 0)
+#define VE_H264_TRIGGER_TYPE_INIT_SWDEC (7 << 0)
+
+#define VE_H264_STATUS 0x228
+#define VE_H264_STATUS_VLD_DATA_REQ_INT VE_H264_CTRL_VLD_DATA_REQ_INT
+#define VE_H264_STATUS_DECODE_ERR_INT VE_H264_CTRL_DECODE_ERR_INT
+#define VE_H264_STATUS_SLICE_DECODE_INT VE_H264_CTRL_SLICE_DECODE_INT
+
+#define VE_H264_STATUS_INT_MASK VE_H264_CTRL_INT_MASK
+
+#define VE_H264_CUR_MB_NUM 0x22c
+
+#define VE_H264_VLD_ADDR 0x230
+#define VE_H264_VLD_ADDR_FIRST BIT(30)
+#define VE_H264_VLD_ADDR_LAST BIT(29)
+#define VE_H264_VLD_ADDR_VALID BIT(28)
+#define VE_H264_VLD_ADDR_VAL(x) (((x) & 0x0ffffff0) | ((x) >> 28))
+
+#define VE_H264_VLD_OFFSET 0x234
+#define VE_H264_VLD_LEN 0x238
+#define VE_H264_VLD_END 0x23c
+#define VE_H264_SDROT_CTRL 0x240
+#define VE_H264_OUTPUT_FRAME_IDX 0x24c
+#define VE_H264_EXTRA_BUFFER1 0x250
+#define VE_H264_EXTRA_BUFFER2 0x254
+#define VE_H264_BASIC_BITS 0x2dc
+#define VE_AVC_SRAM_PORT_OFFSET 0x2e0
+#define VE_AVC_SRAM_PORT_DATA 0x2e4
+
+#define VE_ISP_INPUT_SIZE 0xa00
+#define VE_ISP_INPUT_STRIDE 0xa04
+#define VE_ISP_CTRL 0xa08
+#define VE_ISP_INPUT_LUMA 0xa78
+#define VE_ISP_INPUT_CHROMA 0xa7c
+
+#define VE_AVC_PARAM 0xb04
+#define VE_AVC_QP 0xb08
+#define VE_AVC_MOTION_EST 0xb10
+#define VE_AVC_CTRL 0xb14
+#define VE_AVC_TRIGGER 0xb18
+#define VE_AVC_STATUS 0xb1c
+#define VE_AVC_BASIC_BITS 0xb20
+#define VE_AVC_UNK_BUF 0xb60
+#define VE_AVC_VLE_ADDR 0xb80
+#define VE_AVC_VLE_END 0xb84
+#define VE_AVC_VLE_OFFSET 0xb88
+#define VE_AVC_VLE_MAX 0xb8c
+#define VE_AVC_VLE_LENGTH 0xb90
+#define VE_AVC_REF_LUMA 0xba0
+#define VE_AVC_REF_CHROMA 0xba4
+#define VE_AVC_REC_LUMA 0xbb0
+#define VE_AVC_REC_CHROMA 0xbb4
+#define VE_AVC_REF_SLUMA 0xbb8
+#define VE_AVC_REC_SLUMA 0xbbc
+#define VE_AVC_MB_INFO 0xbc0
+
#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index 9673874ece10..e2b530b1a956 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -38,6 +38,10 @@ static struct cedrus_format cedrus_formats[] = {
.directions = CEDRUS_DECODE_SRC,
},
{
+ .pixelformat = V4L2_PIX_FMT_H264_SLICE_RAW,
+ .directions = CEDRUS_DECODE_SRC,
+ },
+ {
.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12,
.directions = CEDRUS_DECODE_DST,
},
@@ -100,6 +104,7 @@ static void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
switch (pix_fmt->pixelformat) {
case V4L2_PIX_FMT_MPEG2_SLICE:
+ case V4L2_PIX_FMT_H264_SLICE_RAW:
/* Zero bytes per line for encoded source. */
bytesperline = 0;
@@ -464,6 +469,10 @@ static int cedrus_start_streaming(struct vb2_queue *vq, unsigned int count)
ctx->current_codec = CEDRUS_CODEC_MPEG2;
break;
+ case V4L2_PIX_FMT_H264_SLICE_RAW:
+ ctx->current_codec = CEDRUS_CODEC_H264;
+ break;
+
default:
return -EINVAL;
}
diff --git a/drivers/staging/media/tegra-vde/Kconfig b/drivers/staging/media/tegra-vde/Kconfig
index ff8e846cd15d..2e7f644ae591 100644
--- a/drivers/staging/media/tegra-vde/Kconfig
+++ b/drivers/staging/media/tegra-vde/Kconfig
@@ -3,6 +3,7 @@ config TEGRA_VDE
tristate "NVIDIA Tegra Video Decoder Engine driver"
depends on ARCH_TEGRA || COMPILE_TEST
select DMA_SHARED_BUFFER
+ select IOMMU_IOVA if IOMMU_SUPPORT
select SRAM
help
Say Y here to enable support for the NVIDIA Tegra video decoder
diff --git a/drivers/staging/media/tegra-vde/Makefile b/drivers/staging/media/tegra-vde/Makefile
index 7f9020e634f3..2827f7601de8 100644
--- a/drivers/staging/media/tegra-vde/Makefile
+++ b/drivers/staging/media/tegra-vde/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
+tegra-vde-y := vde.o iommu.o dmabuf-cache.o
obj-$(CONFIG_TEGRA_VDE) += tegra-vde.o
diff --git a/drivers/staging/media/tegra-vde/dmabuf-cache.c b/drivers/staging/media/tegra-vde/dmabuf-cache.c
new file mode 100644
index 000000000000..a93b317885bf
--- /dev/null
+++ b/drivers/staging/media/tegra-vde/dmabuf-cache.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NVIDIA Tegra Video decoder driver
+ *
+ * Copyright (C) 2016-2019 GRATE-DRIVER project
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/iova.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "vde.h"
+
+struct tegra_vde_cache_entry {
+ enum dma_data_direction dma_dir;
+ struct dma_buf_attachment *a;
+ struct delayed_work dwork;
+ struct tegra_vde *vde;
+ struct list_head list;
+ struct sg_table *sgt;
+ struct iova *iova;
+ unsigned int refcnt;
+};
+
+static void tegra_vde_release_entry(struct tegra_vde_cache_entry *entry)
+{
+ struct dma_buf *dmabuf = entry->a->dmabuf;
+
+ WARN_ON_ONCE(entry->refcnt);
+
+ if (entry->vde->domain)
+ tegra_vde_iommu_unmap(entry->vde, entry->iova);
+
+ dma_buf_unmap_attachment(entry->a, entry->sgt, entry->dma_dir);
+ dma_buf_detach(dmabuf, entry->a);
+ dma_buf_put(dmabuf);
+
+ list_del(&entry->list);
+ kfree(entry);
+}
+
+static void tegra_vde_delayed_unmap(struct work_struct *work)
+{
+ struct tegra_vde_cache_entry *entry;
+ struct tegra_vde *vde;
+
+ entry = container_of(work, struct tegra_vde_cache_entry,
+ dwork.work);
+ vde = entry->vde;
+
+ mutex_lock(&vde->map_lock);
+ tegra_vde_release_entry(entry);
+ mutex_unlock(&vde->map_lock);
+}
+
+int tegra_vde_dmabuf_cache_map(struct tegra_vde *vde,
+ struct dma_buf *dmabuf,
+ enum dma_data_direction dma_dir,
+ struct dma_buf_attachment **ap,
+ dma_addr_t *addrp)
+{
+ struct device *dev = vde->miscdev.parent;
+ struct dma_buf_attachment *attachment;
+ struct tegra_vde_cache_entry *entry;
+ struct sg_table *sgt;
+ struct iova *iova;
+ int err;
+
+ mutex_lock(&vde->map_lock);
+
+ list_for_each_entry(entry, &vde->map_list, list) {
+ if (entry->a->dmabuf != dmabuf)
+ continue;
+
+ if (!cancel_delayed_work(&entry->dwork))
+ continue;
+
+ if (entry->dma_dir != dma_dir)
+ entry->dma_dir = DMA_BIDIRECTIONAL;
+
+ dma_buf_put(dmabuf);
+
+ if (vde->domain)
+ *addrp = iova_dma_addr(&vde->iova, entry->iova);
+ else
+ *addrp = sg_dma_address(entry->sgt->sgl);
+
+ goto ref;
+ }
+
+ attachment = dma_buf_attach(dmabuf, dev);
+ if (IS_ERR(attachment)) {
+ dev_err(dev, "Failed to attach dmabuf\n");
+ err = PTR_ERR(attachment);
+ goto err_unlock;
+ }
+
+ sgt = dma_buf_map_attachment(attachment, dma_dir);
+ if (IS_ERR(sgt)) {
+ dev_err(dev, "Failed to get dmabufs sg_table\n");
+ err = PTR_ERR(sgt);
+ goto err_detach;
+ }
+
+ if (!vde->domain && sgt->nents > 1) {
+ dev_err(dev, "Sparse DMA region is unsupported, please enable IOMMU\n");
+ err = -EINVAL;
+ goto err_unmap;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto err_unmap;
+ }
+
+ if (vde->domain) {
+ err = tegra_vde_iommu_map(vde, sgt, &iova, dmabuf->size);
+ if (err)
+ goto err_free;
+
+ *addrp = iova_dma_addr(&vde->iova, iova);
+ } else {
+ *addrp = sg_dma_address(sgt->sgl);
+ iova = NULL;
+ }
+
+ INIT_DELAYED_WORK(&entry->dwork, tegra_vde_delayed_unmap);
+ list_add(&entry->list, &vde->map_list);
+
+ entry->dma_dir = dma_dir;
+ entry->iova = iova;
+ entry->vde = vde;
+ entry->sgt = sgt;
+ entry->a = attachment;
+ref:
+ entry->refcnt++;
+
+ *ap = entry->a;
+
+ mutex_unlock(&vde->map_lock);
+
+ return 0;
+
+err_free:
+ kfree(entry);
+err_unmap:
+ dma_buf_unmap_attachment(attachment, sgt, dma_dir);
+err_detach:
+ dma_buf_detach(dmabuf, attachment);
+err_unlock:
+ mutex_unlock(&vde->map_lock);
+
+ return err;
+}
+
+void tegra_vde_dmabuf_cache_unmap(struct tegra_vde *vde,
+ struct dma_buf_attachment *a,
+ bool release)
+{
+ struct tegra_vde_cache_entry *entry;
+
+ mutex_lock(&vde->map_lock);
+
+ list_for_each_entry(entry, &vde->map_list, list) {
+ if (entry->a != a)
+ continue;
+
+ WARN_ON_ONCE(!entry->refcnt);
+
+ if (--entry->refcnt == 0) {
+ if (release)
+ tegra_vde_release_entry(entry);
+ else
+ schedule_delayed_work(&entry->dwork, 5 * HZ);
+ }
+ break;
+ }
+
+ mutex_unlock(&vde->map_lock);
+}
+
+void tegra_vde_dmabuf_cache_unmap_sync(struct tegra_vde *vde)
+{
+ struct tegra_vde_cache_entry *entry, *tmp;
+
+ mutex_lock(&vde->map_lock);
+
+ list_for_each_entry_safe(entry, tmp, &vde->map_list, list) {
+ if (entry->refcnt)
+ continue;
+
+ if (!cancel_delayed_work(&entry->dwork))
+ continue;
+
+ tegra_vde_release_entry(entry);
+ }
+
+ mutex_unlock(&vde->map_lock);
+}
+
+void tegra_vde_dmabuf_cache_unmap_all(struct tegra_vde *vde)
+{
+ struct tegra_vde_cache_entry *entry, *tmp;
+
+ mutex_lock(&vde->map_lock);
+
+ while (!list_empty(&vde->map_list)) {
+ list_for_each_entry_safe(entry, tmp, &vde->map_list, list) {
+ if (!cancel_delayed_work(&entry->dwork))
+ continue;
+
+ tegra_vde_release_entry(entry);
+ }
+
+ mutex_unlock(&vde->map_lock);
+ schedule();
+ mutex_lock(&vde->map_lock);
+ }
+
+ mutex_unlock(&vde->map_lock);
+}
diff --git a/drivers/staging/media/tegra-vde/iommu.c b/drivers/staging/media/tegra-vde/iommu.c
new file mode 100644
index 000000000000..6af863d92123
--- /dev/null
+++ b/drivers/staging/media/tegra-vde/iommu.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NVIDIA Tegra Video decoder driver
+ *
+ * Copyright (C) 2016-2019 GRATE-DRIVER project
+ */
+
+#include <linux/iommu.h>
+#include <linux/iova.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#endif
+
+#include "vde.h"
+
+int tegra_vde_iommu_map(struct tegra_vde *vde,
+ struct sg_table *sgt,
+ struct iova **iovap,
+ size_t size)
+{
+ struct iova *iova;
+ unsigned long shift;
+ unsigned long end;
+ dma_addr_t addr;
+
+ end = vde->domain->geometry.aperture_end;
+ size = iova_align(&vde->iova, size);
+ shift = iova_shift(&vde->iova);
+
+ iova = alloc_iova(&vde->iova, size >> shift, end >> shift, true);
+ if (!iova)
+ return -ENOMEM;
+
+ addr = iova_dma_addr(&vde->iova, iova);
+
+ size = iommu_map_sg(vde->domain, addr, sgt->sgl, sgt->nents,
+ IOMMU_READ | IOMMU_WRITE);
+ if (!size) {
+ __free_iova(&vde->iova, iova);
+ return -ENXIO;
+ }
+
+ *iovap = iova;
+
+ return 0;
+}
+
+void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova)
+{
+ unsigned long shift = iova_shift(&vde->iova);
+ unsigned long size = iova_size(iova) << shift;
+ dma_addr_t addr = iova_dma_addr(&vde->iova, iova);
+
+ iommu_unmap(vde->domain, addr, size);
+ __free_iova(&vde->iova, iova);
+}
+
+int tegra_vde_iommu_init(struct tegra_vde *vde)
+{
+ struct device *dev = vde->miscdev.parent;
+ struct iova *iova;
+ unsigned long order;
+ unsigned long shift;
+ int err;
+
+ vde->group = iommu_group_get(dev);
+ if (!vde->group)
+ return 0;
+
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+ if (dev->archdata.mapping) {
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(mapping);
+ }
+#endif
+ vde->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!vde->domain) {
+ err = -ENOMEM;
+ goto put_group;
+ }
+
+ err = iova_cache_get();
+ if (err)
+ goto free_domain;
+
+ order = __ffs(vde->domain->pgsize_bitmap);
+ init_iova_domain(&vde->iova, 1UL << order, 0);
+
+ err = iommu_attach_group(vde->domain, vde->group);
+ if (err)
+ goto put_iova;
+
+ /*
+ * We're using some static addresses that are not accessible by VDE
+ * to trap invalid memory accesses.
+ */
+ shift = iova_shift(&vde->iova);
+ iova = reserve_iova(&vde->iova, 0x60000000 >> shift,
+ 0x70000000 >> shift);
+ if (!iova) {
+ err = -ENOMEM;
+ goto detach_group;
+ }
+
+ vde->iova_resv_static_addresses = iova;
+
+ /*
+ * BSEV's end-address wraps around due to integer overflow during
+ * of hardware context preparation if IOVA is allocated at the end
+ * of address space and VDE can't handle that. Hence simply reserve
+ * the last page to avoid the problem.
+ */
+ iova = reserve_iova(&vde->iova, 0xffffffff >> shift,
+ (0xffffffff >> shift) + 1);
+ if (!iova) {
+ err = -ENOMEM;
+ goto unreserve_iova;
+ }
+
+ vde->iova_resv_last_page = iova;
+
+ return 0;
+
+unreserve_iova:
+ __free_iova(&vde->iova, vde->iova_resv_static_addresses);
+detach_group:
+ iommu_detach_group(vde->domain, vde->group);
+put_iova:
+ put_iova_domain(&vde->iova);
+ iova_cache_put();
+free_domain:
+ iommu_domain_free(vde->domain);
+put_group:
+ iommu_group_put(vde->group);
+
+ return err;
+}
+
+void tegra_vde_iommu_deinit(struct tegra_vde *vde)
+{
+ if (vde->domain) {
+ __free_iova(&vde->iova, vde->iova_resv_last_page);
+ __free_iova(&vde->iova, vde->iova_resv_static_addresses);
+ iommu_detach_group(vde->domain, vde->group);
+ put_iova_domain(&vde->iova);
+ iova_cache_put();
+ iommu_domain_free(vde->domain);
+ iommu_group_put(vde->group);
+
+ vde->domain = NULL;
+ }
+}
diff --git a/drivers/staging/media/tegra-vde/trace.h b/drivers/staging/media/tegra-vde/trace.h
index 85e2f7e2d4d0..e5714107db58 100644
--- a/drivers/staging/media/tegra-vde/trace.h
+++ b/drivers/staging/media/tegra-vde/trace.h
@@ -8,6 +8,8 @@
#include <linux/tracepoint.h>
+#include "vde.h"
+
DECLARE_EVENT_CLASS(register_access,
TP_PROTO(struct tegra_vde *vde, void __iomem *base,
u32 offset, u32 value),
diff --git a/drivers/staging/media/tegra-vde/uapi.h b/drivers/staging/media/tegra-vde/uapi.h
index a0dad1ed94ef..ffb4983e5bb6 100644
--- a/drivers/staging/media/tegra-vde/uapi.h
+++ b/drivers/staging/media/tegra-vde/uapi.h
@@ -6,8 +6,8 @@
#include <linux/types.h>
#include <asm/ioctl.h>
-#define FLAG_B_FRAME BIT(0)
-#define FLAG_REFERENCE BIT(1)
+#define FLAG_B_FRAME 0x1
+#define FLAG_REFERENCE 0x2
struct tegra_vde_h264_frame {
__s32 y_fd;
@@ -21,40 +21,42 @@ struct tegra_vde_h264_frame {
__u32 frame_num;
__u32 flags;
- __u32 reserved;
-} __attribute__((packed));
+ // Must be zero'ed
+ __u32 reserved[6];
+};
struct tegra_vde_h264_decoder_ctx {
__s32 bitstream_data_fd;
__u32 bitstream_data_offset;
__u64 dpb_frames_ptr;
- __u8 dpb_frames_nb;
- __u8 dpb_ref_frames_with_earlier_poc_nb;
+ __u32 dpb_frames_nb;
+ __u32 dpb_ref_frames_with_earlier_poc_nb;
// SPS
- __u8 baseline_profile;
- __u8 level_idc;
- __u8 log2_max_pic_order_cnt_lsb;
- __u8 log2_max_frame_num;
- __u8 pic_order_cnt_type;
- __u8 direct_8x8_inference_flag;
- __u8 pic_width_in_mbs;
- __u8 pic_height_in_mbs;
+ __u32 baseline_profile;
+ __u32 level_idc;
+ __u32 log2_max_pic_order_cnt_lsb;
+ __u32 log2_max_frame_num;
+ __u32 pic_order_cnt_type;
+ __u32 direct_8x8_inference_flag;
+ __u32 pic_width_in_mbs;
+ __u32 pic_height_in_mbs;
// PPS
- __u8 pic_init_qp;
- __u8 deblocking_filter_control_present_flag;
- __u8 constrained_intra_pred_flag;
- __u8 chroma_qp_index_offset;
- __u8 pic_order_present_flag;
+ __u32 pic_init_qp;
+ __u32 deblocking_filter_control_present_flag;
+ __u32 constrained_intra_pred_flag;
+ __u32 chroma_qp_index_offset;
+ __u32 pic_order_present_flag;
// Slice header
- __u8 num_ref_idx_l0_active_minus1;
- __u8 num_ref_idx_l1_active_minus1;
+ __u32 num_ref_idx_l0_active_minus1;
+ __u32 num_ref_idx_l1_active_minus1;
- __u32 reserved;
-} __attribute__((packed));
+ // Must be zero'ed
+ __u32 reserved[11];
+};
#define VDE_IOCTL_BASE ('v' + 0x20)
diff --git a/drivers/staging/media/tegra-vde/tegra-vde.c b/drivers/staging/media/tegra-vde/vde.c
index a5020dbf6eef..3466daddf663 100644
--- a/drivers/staging/media/tegra-vde/tegra-vde.c
+++ b/drivers/staging/media/tegra-vde/vde.c
@@ -11,6 +11,7 @@
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
+#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -22,6 +23,10 @@
#include <soc/tegra/pmc.h>
#include "uapi.h"
+#include "vde.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
#define ICMDQUE_WR 0x00
#define CMDQUE_CONTROL 0x08
@@ -37,10 +42,6 @@ struct video_frame {
struct dma_buf_attachment *cb_dmabuf_attachment;
struct dma_buf_attachment *cr_dmabuf_attachment;
struct dma_buf_attachment *aux_dmabuf_attachment;
- struct sg_table *y_sgt;
- struct sg_table *cb_sgt;
- struct sg_table *cr_sgt;
- struct sg_table *aux_sgt;
dma_addr_t y_addr;
dma_addr_t cb_addr;
dma_addr_t cr_addr;
@@ -49,63 +50,6 @@ struct video_frame {
u32 flags;
};
-struct tegra_vde {
- void __iomem *sxe;
- void __iomem *bsev;
- void __iomem *mbe;
- void __iomem *ppe;
- void __iomem *mce;
- void __iomem *tfe;
- void __iomem *ppb;
- void __iomem *vdma;
- void __iomem *frameid;
- struct mutex lock;
- struct miscdevice miscdev;
- struct reset_control *rst;
- struct reset_control *rst_mc;
- struct gen_pool *iram_pool;
- struct completion decode_completion;
- struct clk *clk;
- dma_addr_t iram_lists_addr;
- u32 *iram;
-};
-
-static __maybe_unused char const *
-tegra_vde_reg_base_name(struct tegra_vde *vde, void __iomem *base)
-{
- if (vde->sxe == base)
- return "SXE";
-
- if (vde->bsev == base)
- return "BSEV";
-
- if (vde->mbe == base)
- return "MBE";
-
- if (vde->ppe == base)
- return "PPE";
-
- if (vde->mce == base)
- return "MCE";
-
- if (vde->tfe == base)
- return "TFE";
-
- if (vde->ppb == base)
- return "PPB";
-
- if (vde->vdma == base)
- return "VDMA";
-
- if (vde->frameid == base)
- return "FRAMEID";
-
- return "???";
-}
-
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-
static void tegra_vde_writel(struct tegra_vde *vde,
u32 value, void __iomem *base, u32 offset)
{
@@ -543,31 +487,18 @@ static void tegra_vde_decode_frame(struct tegra_vde *vde,
vde->sxe, 0x00);
}
-static void tegra_vde_detach_and_put_dmabuf(struct dma_buf_attachment *a,
- struct sg_table *sgt,
- enum dma_data_direction dma_dir)
-{
- struct dma_buf *dmabuf = a->dmabuf;
-
- dma_buf_unmap_attachment(a, sgt, dma_dir);
- dma_buf_detach(dmabuf, a);
- dma_buf_put(dmabuf);
-}
-
-static int tegra_vde_attach_dmabuf(struct device *dev,
+static int tegra_vde_attach_dmabuf(struct tegra_vde *vde,
int fd,
unsigned long offset,
size_t min_size,
size_t align_size,
struct dma_buf_attachment **a,
- dma_addr_t *addr,
- struct sg_table **s,
+ dma_addr_t *addrp,
size_t *size,
enum dma_data_direction dma_dir)
{
- struct dma_buf_attachment *attachment;
+ struct device *dev = vde->miscdev.parent;
struct dma_buf *dmabuf;
- struct sg_table *sgt;
int err;
dmabuf = dma_buf_get(fd);
@@ -588,46 +519,24 @@ static int tegra_vde_attach_dmabuf(struct device *dev,
return -EINVAL;
}
- attachment = dma_buf_attach(dmabuf, dev);
- if (IS_ERR(attachment)) {
- dev_err(dev, "Failed to attach dmabuf\n");
- err = PTR_ERR(attachment);
+ err = tegra_vde_dmabuf_cache_map(vde, dmabuf, dma_dir, a, addrp);
+ if (err)
goto err_put;
- }
- sgt = dma_buf_map_attachment(attachment, dma_dir);
- if (IS_ERR(sgt)) {
- dev_err(dev, "Failed to get dmabufs sg_table\n");
- err = PTR_ERR(sgt);
- goto err_detach;
- }
-
- if (sgt->nents != 1) {
- dev_err(dev, "Sparse DMA region is unsupported\n");
- err = -EINVAL;
- goto err_unmap;
- }
-
- *addr = sg_dma_address(sgt->sgl) + offset;
- *a = attachment;
- *s = sgt;
+ *addrp = *addrp + offset;
if (size)
*size = dmabuf->size - offset;
return 0;
-err_unmap:
- dma_buf_unmap_attachment(attachment, sgt, dma_dir);
-err_detach:
- dma_buf_detach(dmabuf, attachment);
err_put:
dma_buf_put(dmabuf);
return err;
}
-static int tegra_vde_attach_dmabufs_to_frame(struct device *dev,
+static int tegra_vde_attach_dmabufs_to_frame(struct tegra_vde *vde,
struct video_frame *frame,
struct tegra_vde_h264_frame *src,
enum dma_data_direction dma_dir,
@@ -636,29 +545,26 @@ static int tegra_vde_attach_dmabufs_to_frame(struct device *dev,
{
int err;
- err = tegra_vde_attach_dmabuf(dev, src->y_fd,
+ err = tegra_vde_attach_dmabuf(vde, src->y_fd,
src->y_offset, lsize, SZ_256,
&frame->y_dmabuf_attachment,
&frame->y_addr,
- &frame->y_sgt,
NULL, dma_dir);
if (err)
return err;
- err = tegra_vde_attach_dmabuf(dev, src->cb_fd,
+ err = tegra_vde_attach_dmabuf(vde, src->cb_fd,
src->cb_offset, csize, SZ_256,
&frame->cb_dmabuf_attachment,
&frame->cb_addr,
- &frame->cb_sgt,
NULL, dma_dir);
if (err)
goto err_release_y;
- err = tegra_vde_attach_dmabuf(dev, src->cr_fd,
+ err = tegra_vde_attach_dmabuf(vde, src->cr_fd,
src->cr_offset, csize, SZ_256,
&frame->cr_dmabuf_attachment,
&frame->cr_addr,
- &frame->cr_sgt,
NULL, dma_dir);
if (err)
goto err_release_cb;
@@ -668,11 +574,10 @@ static int tegra_vde_attach_dmabufs_to_frame(struct device *dev,
return 0;
}
- err = tegra_vde_attach_dmabuf(dev, src->aux_fd,
+ err = tegra_vde_attach_dmabuf(vde, src->aux_fd,
src->aux_offset, csize, SZ_256,
&frame->aux_dmabuf_attachment,
&frame->aux_addr,
- &frame->aux_sgt,
NULL, dma_dir);
if (err)
goto err_release_cr;
@@ -680,34 +585,28 @@ static int tegra_vde_attach_dmabufs_to_frame(struct device *dev,
return 0;
err_release_cr:
- tegra_vde_detach_and_put_dmabuf(frame->cr_dmabuf_attachment,
- frame->cr_sgt, dma_dir);
+ tegra_vde_dmabuf_cache_unmap(vde, frame->cr_dmabuf_attachment, true);
err_release_cb:
- tegra_vde_detach_and_put_dmabuf(frame->cb_dmabuf_attachment,
- frame->cb_sgt, dma_dir);
+ tegra_vde_dmabuf_cache_unmap(vde, frame->cb_dmabuf_attachment, true);
err_release_y:
- tegra_vde_detach_and_put_dmabuf(frame->y_dmabuf_attachment,
- frame->y_sgt, dma_dir);
+ tegra_vde_dmabuf_cache_unmap(vde, frame->y_dmabuf_attachment, true);
return err;
}
-static void tegra_vde_release_frame_dmabufs(struct video_frame *frame,
+static void tegra_vde_release_frame_dmabufs(struct tegra_vde *vde,
+ struct video_frame *frame,
enum dma_data_direction dma_dir,
- bool baseline_profile)
+ bool baseline_profile,
+ bool release)
{
if (!baseline_profile)
- tegra_vde_detach_and_put_dmabuf(frame->aux_dmabuf_attachment,
- frame->aux_sgt, dma_dir);
+ tegra_vde_dmabuf_cache_unmap(vde, frame->aux_dmabuf_attachment,
+ release);
- tegra_vde_detach_and_put_dmabuf(frame->cr_dmabuf_attachment,
- frame->cr_sgt, dma_dir);
-
- tegra_vde_detach_and_put_dmabuf(frame->cb_dmabuf_attachment,
- frame->cb_sgt, dma_dir);
-
- tegra_vde_detach_and_put_dmabuf(frame->y_dmabuf_attachment,
- frame->y_sgt, dma_dir);
+ tegra_vde_dmabuf_cache_unmap(vde, frame->cr_dmabuf_attachment, release);
+ tegra_vde_dmabuf_cache_unmap(vde, frame->cb_dmabuf_attachment, release);
+ tegra_vde_dmabuf_cache_unmap(vde, frame->y_dmabuf_attachment, release);
}
static int tegra_vde_validate_frame(struct device *dev,
@@ -795,11 +694,10 @@ static int tegra_vde_ioctl_decode_h264(struct tegra_vde *vde,
{
struct device *dev = vde->miscdev.parent;
struct tegra_vde_h264_decoder_ctx ctx;
- struct tegra_vde_h264_frame frames[17];
+ struct tegra_vde_h264_frame *frames;
struct tegra_vde_h264_frame __user *frames_user;
struct video_frame *dpb_frames;
struct dma_buf_attachment *bitstream_data_dmabuf_attachment;
- struct sg_table *bitstream_sgt;
enum dma_data_direction dma_dir;
dma_addr_t bitstream_data_addr;
dma_addr_t bsev_ptr;
@@ -819,22 +717,27 @@ static int tegra_vde_ioctl_decode_h264(struct tegra_vde *vde,
if (ret)
return ret;
- ret = tegra_vde_attach_dmabuf(dev, ctx.bitstream_data_fd,
+ ret = tegra_vde_attach_dmabuf(vde, ctx.bitstream_data_fd,
ctx.bitstream_data_offset,
SZ_16K, SZ_16K,
&bitstream_data_dmabuf_attachment,
&bitstream_data_addr,
- &bitstream_sgt,
&bitstream_data_size,
DMA_TO_DEVICE);
if (ret)
return ret;
+ frames = kmalloc_array(ctx.dpb_frames_nb, sizeof(*frames), GFP_KERNEL);
+ if (!frames) {
+ ret = -ENOMEM;
+ goto release_bitstream_dmabuf;
+ }
+
dpb_frames = kcalloc(ctx.dpb_frames_nb, sizeof(*dpb_frames),
GFP_KERNEL);
if (!dpb_frames) {
ret = -ENOMEM;
- goto release_bitstream_dmabuf;
+ goto free_frames;
}
macroblocks_nb = ctx.pic_width_in_mbs * ctx.pic_height_in_mbs;
@@ -860,7 +763,7 @@ static int tegra_vde_ioctl_decode_h264(struct tegra_vde *vde,
dma_dir = (i == 0) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- ret = tegra_vde_attach_dmabufs_to_frame(dev, &dpb_frames[i],
+ ret = tegra_vde_attach_dmabufs_to_frame(vde, &dpb_frames[i],
&frames[i], dma_dir,
ctx.baseline_profile,
lsize, csize);
@@ -948,16 +851,19 @@ release_dpb_frames:
while (i--) {
dma_dir = (i == 0) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- tegra_vde_release_frame_dmabufs(&dpb_frames[i], dma_dir,
- ctx.baseline_profile);
+ tegra_vde_release_frame_dmabufs(vde, &dpb_frames[i], dma_dir,
+ ctx.baseline_profile, ret != 0);
}
free_dpb_frames:
kfree(dpb_frames);
+free_frames:
+ kfree(frames);
+
release_bitstream_dmabuf:
- tegra_vde_detach_and_put_dmabuf(bitstream_data_dmabuf_attachment,
- bitstream_sgt, DMA_TO_DEVICE);
+ tegra_vde_dmabuf_cache_unmap(vde, bitstream_data_dmabuf_attachment,
+ ret != 0);
return ret;
}
@@ -979,9 +885,21 @@ static long tegra_vde_unlocked_ioctl(struct file *filp,
return -ENOTTY;
}
+static int tegra_vde_release_file(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *miscdev = filp->private_data;
+ struct tegra_vde *vde = container_of(miscdev, struct tegra_vde,
+ miscdev);
+
+ tegra_vde_dmabuf_cache_unmap_sync(vde);
+
+ return 0;
+}
+
static const struct file_operations tegra_vde_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = tegra_vde_unlocked_ioctl,
+ .release = tegra_vde_release_file,
};
static irqreturn_t tegra_vde_isr(int irq, void *data)
@@ -1159,6 +1077,8 @@ static int tegra_vde_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ INIT_LIST_HEAD(&vde->map_list);
+ mutex_init(&vde->map_lock);
mutex_init(&vde->lock);
init_completion(&vde->decode_completion);
@@ -1167,10 +1087,16 @@ static int tegra_vde_probe(struct platform_device *pdev)
vde->miscdev.fops = &tegra_vde_fops;
vde->miscdev.parent = dev;
+ err = tegra_vde_iommu_init(vde);
+ if (err) {
+ dev_err(dev, "Failed to initialize IOMMU: %d\n", err);
+ goto err_gen_free;
+ }
+
err = misc_register(&vde->miscdev);
if (err) {
dev_err(dev, "Failed to register misc device: %d\n", err);
- goto err_gen_free;
+ goto err_deinit_iommu;
}
pm_runtime_enable(dev);
@@ -1188,6 +1114,9 @@ static int tegra_vde_probe(struct platform_device *pdev)
err_misc_unreg:
misc_deregister(&vde->miscdev);
+err_deinit_iommu:
+ tegra_vde_iommu_deinit(vde);
+
err_gen_free:
gen_pool_free(vde->iram_pool, (unsigned long)vde->iram,
gen_pool_size(vde->iram_pool));
@@ -1212,6 +1141,9 @@ static int tegra_vde_remove(struct platform_device *pdev)
misc_deregister(&vde->miscdev);
+ tegra_vde_dmabuf_cache_unmap_all(vde);
+ tegra_vde_iommu_deinit(vde);
+
gen_pool_free(vde->iram_pool, (unsigned long)vde->iram,
gen_pool_size(vde->iram_pool));
diff --git a/drivers/staging/media/tegra-vde/vde.h b/drivers/staging/media/tegra-vde/vde.h
new file mode 100644
index 000000000000..d369f1466bc7
--- /dev/null
+++ b/drivers/staging/media/tegra-vde/vde.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * NVIDIA Tegra Video decoder driver
+ *
+ * Copyright (C) 2016-2019 GRATE-DRIVER project
+ */
+
+#ifndef TEGRA_VDE_H
+#define TEGRA_VDE_H
+
+#include <linux/completion.h>
+#include <linux/dma-direction.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/iova.h>
+
+struct clk;
+struct dma_buf;
+struct gen_pool;
+struct iommu_group;
+struct iommu_domain;
+struct reset_control;
+struct dma_buf_attachment;
+
+struct tegra_vde {
+ void __iomem *sxe;
+ void __iomem *bsev;
+ void __iomem *mbe;
+ void __iomem *ppe;
+ void __iomem *mce;
+ void __iomem *tfe;
+ void __iomem *ppb;
+ void __iomem *vdma;
+ void __iomem *frameid;
+ struct mutex lock;
+ struct mutex map_lock;
+ struct list_head map_list;
+ struct miscdevice miscdev;
+ struct reset_control *rst;
+ struct reset_control *rst_mc;
+ struct gen_pool *iram_pool;
+ struct completion decode_completion;
+ struct clk *clk;
+ struct iommu_domain *domain;
+ struct iommu_group *group;
+ struct iova_domain iova;
+ struct iova *iova_resv_static_addresses;
+ struct iova *iova_resv_last_page;
+ dma_addr_t iram_lists_addr;
+ u32 *iram;
+};
+
+int tegra_vde_iommu_init(struct tegra_vde *vde);
+void tegra_vde_iommu_deinit(struct tegra_vde *vde);
+int tegra_vde_iommu_map(struct tegra_vde *vde,
+ struct sg_table *sgt,
+ struct iova **iovap,
+ size_t size);
+void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova);
+
+int tegra_vde_dmabuf_cache_map(struct tegra_vde *vde,
+ struct dma_buf *dmabuf,
+ enum dma_data_direction dma_dir,
+ struct dma_buf_attachment **ap,
+ dma_addr_t *addrp);
+void tegra_vde_dmabuf_cache_unmap(struct tegra_vde *vde,
+ struct dma_buf_attachment *a,
+ bool release);
+void tegra_vde_dmabuf_cache_unmap_sync(struct tegra_vde *vde);
+void tegra_vde_dmabuf_cache_unmap_all(struct tegra_vde *vde);
+
+static __maybe_unused char const *
+tegra_vde_reg_base_name(struct tegra_vde *vde, void __iomem *base)
+{
+ if (vde->sxe == base)
+ return "SXE";
+
+ if (vde->bsev == base)
+ return "BSEV";
+
+ if (vde->mbe == base)
+ return "MBE";
+
+ if (vde->ppe == base)
+ return "PPE";
+
+ if (vde->mce == base)
+ return "MCE";
+
+ if (vde->tfe == base)
+ return "TFE";
+
+ if (vde->ppb == base)
+ return "PPB";
+
+ if (vde->vdma == base)
+ return "VDMA";
+
+ if (vde->frameid == base)
+ return "FRAMEID";
+
+ return "???";
+}
+
+#endif /* TEGRA_VDE_H */
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
index 665a0b061719..fe09efbc7f77 100644
--- a/drivers/staging/olpc_dcon/TODO
+++ b/drivers/staging/olpc_dcon/TODO
@@ -1,4 +1,11 @@
TODO:
+ - complete rewrite:
+ 1. The underlying fbdev drivers need to be converted into drm kernel
+ modesetting drivers.
+ 2. The dcon low-power display mode can then be integrated using the
+ drm damage tracking and self-refresh helpers.
+ This bolted-on self-refresh support that digs around in fbdev
+ internals, but isn't properly integrated, is not the correct solution.
- see if vx855 gpio API can be made similar enough to cs5535 so we can
share more code
- convert all uses of the old GPIO API from <linux/gpio.h> to the
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 6b714f740ac3..a254238be181 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -250,11 +250,7 @@ static bool dcon_blank_fb(struct dcon_priv *dcon, bool blank)
int err;
console_lock();
- if (!lock_fb_info(dcon->fbinfo)) {
- console_unlock();
- dev_err(&dcon->client->dev, "unable to lock framebuffer\n");
- return false;
- }
+ lock_fb_info(dcon->fbinfo);
dcon->ignore_fb_events = true;
err = fb_blank(dcon->fbinfo,
diff --git a/drivers/staging/sm750fb/Kconfig b/drivers/staging/sm750fb/Kconfig
index fb5a086bf9b1..8c0d8a873d5b 100644
--- a/drivers/staging/sm750fb/Kconfig
+++ b/drivers/staging/sm750fb/Kconfig
@@ -12,4 +12,4 @@ config FB_SM750
This driver is also available as a module. The module will be
called sm750fb. If you want to compile it as a module, say M
- here and read <file:Documentation/kbuild/modules.txt>.
+ here and read <file:Documentation/kbuild/modules.rst>.
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index 68f08dc18da9..49d0470f9a7e 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -864,10 +864,6 @@ static int vidioc_querycap(struct file *file, void *priv,
snprintf((char *)cap->bus_info, sizeof(cap->bus_info),
"platform:%s", dev->v4l2_dev.name);
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY |
- V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -1446,6 +1442,8 @@ static const struct video_device vdev_template = {
.fops = &camera0_fops,
.ioctl_ops = &camera0_ioctl_ops,
.release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE,
};
/* Returns the number of cameras, and also the max resolution supported
diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
index 9841c30450ce..dade79738a29 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
@@ -572,7 +572,7 @@ exit:
dev->colourfx.enable ? "true" : "false",
dev->colourfx.u, dev->colourfx.v,
ret, (ret == 0 ? 0 : -EINVAL));
- return (ret == 0 ? 0 : EINVAL);
+ return (ret == 0 ? 0 : -EINVAL);
}
static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
@@ -596,7 +596,7 @@ static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
"%s: After: mmal_ctrl:%p ctrl id:0x%x ctrl val:%d ret %d(%d)\n",
__func__, mmal_ctrl, ctrl->id, ctrl->val, ret,
(ret == 0 ? 0 : -EINVAL));
- return (ret == 0 ? 0 : EINVAL);
+ return (ret == 0 ? 0 : -EINVAL);
}
static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index a9a22917ecdb..c557c9953724 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -368,9 +368,18 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
int dma_buffers;
dma_addr_t dma_addr;
+ if (count >= INT_MAX - PAGE_SIZE)
+ return NULL;
+
offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
+ if (num_pages > (SIZE_MAX - sizeof(struct pagelist) -
+ sizeof(struct vchiq_pagelist_info)) /
+ (sizeof(u32) + sizeof(pages[0]) +
+ sizeof(struct scatterlist)))
+ return NULL;
+
pagelist_size = sizeof(struct pagelist) +
(num_pages * sizeof(u32)) +
(num_pages * sizeof(pages[0]) +
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 0a713409ea98..95eaf8fdf4f2 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -1076,13 +1076,17 @@ void wilc_wlan_cleanup(struct net_device *dev)
acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
- if (!ret)
+ if (!ret) {
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
+ return;
+ }
ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
(reg | ABORT_INT));
- if (!ret)
+ if (!ret) {
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
+ return;
+ }
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
wilc->hif_func->hif_deinit(NULL);
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 6fde75d4f064..ab734534093b 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -3119,7 +3119,9 @@ static void hfa384x_usbin_callback(struct urb *urb)
break;
}
+ /* Save values from the RX URB before reposting overwrites it. */
urb_status = urb->status;
+ usbin = (union hfa384x_usbin *)urb->transfer_buffer;
if (action != ABORT) {
/* Repost the RX URB */
@@ -3136,7 +3138,6 @@ static void hfa384x_usbin_callback(struct urb *urb)
/* Note: the check of the sw_support field, the type field doesn't
* have bit 12 set like the docs suggest.
*/
- usbin = (union hfa384x_usbin *)urb->transfer_buffer;
type = le16_to_cpu(usbin->type);
if (HFA384x_USB_ISRXFRM(type)) {
if (action == HANDLE) {