aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 15:53:46 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 15:53:46 -0700
commit5a0387a8a8efb90ae7fea1e2e5c62de3efa74691 (patch)
tree9e5bbbafe7fea01c843d86c7c3d40f29f962c474 /drivers
parentMerge branch 'work.compat' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs (diff)
parentcrypto: stm32 - Fix OF module alias information (diff)
downloadlinux-dev-5a0387a8a8efb90ae7fea1e2e5c62de3efa74691.tar.xz
linux-dev-5a0387a8a8efb90ae7fea1e2e5c62de3efa74691.zip
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 4.12: API: - Add batch registration for acomp/scomp - Change acomp testing to non-unique compressed result - Extend algorithm name limit to 128 bytes - Require setkey before accept(2) in algif_aead Algorithms: - Add support for deflate rfc1950 (zlib) Drivers: - Add accelerated crct10dif for powerpc - Add crc32 in stm32 - Add sha384/sha512 in ccp - Add 3des/gcm(aes) for v5 devices in ccp - Add Queue Interface (QI) backend support in caam - Add new Exynos RNG driver - Add ThunderX ZIP driver - Add driver for hardware random generator on MT7623 SoC" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (101 commits) crypto: stm32 - Fix OF module alias information crypto: algif_aead - Require setkey before accept(2) crypto: scomp - add support for deflate rfc1950 (zlib) crypto: scomp - allow registration of multiple scomps crypto: ccp - Change ISR handler method for a v5 CCP crypto: ccp - Change ISR handler method for a v3 CCP crypto: crypto4xx - rename ce_ring_contol to ce_ring_control crypto: testmgr - Allow ecb(cipher_null) in FIPS mode Revert "crypto: arm64/sha - Add constant operand modifier to ASM_EXPORT" crypto: ccp - Disable interrupts early on unload crypto: ccp - Use only the relevant interrupt bits hwrng: mtk - Add driver for hardware random generator on MT7623 SoC dt-bindings: hwrng: Add Mediatek hardware random generator bindings crypto: crct10dif-vpmsum - Fix missing preempt_disable() crypto: testmgr - replace compression known answer test crypto: acomp - allow registration of multiple acomps hwrng: n2 - Use devm_kcalloc() in n2rng_probe() crypto: chcr - Fix error handling related to 'chcr_alloc_shash' padata: get_next is never NULL crypto: exynos - Add new Exynos RNG driver ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/Kconfig28
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/exynos-rng.c231
-rw-r--r--drivers/char/hw_random/meson-rng.c22
-rw-r--r--drivers/char/hw_random/mtk-rng.c168
-rw-r--r--drivers/char/hw_random/n2-drv.c4
-rw-r--r--drivers/char/hw_random/omap-rng.c22
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c157
-rw-r--r--drivers/clk/meson/gxbb.h2
-rw-r--r--drivers/crypto/Kconfig24
-rw-r--r--drivers/crypto/Makefile3
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c2
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h2
-rw-r--r--drivers/crypto/bcm/util.c2
-rw-r--r--drivers/crypto/caam/Kconfig20
-rw-r--r--drivers/crypto/caam/Makefile5
-rw-r--r--drivers/crypto/caam/caamalg.c9
-rw-r--r--drivers/crypto/caam/caamalg_desc.c77
-rw-r--r--drivers/crypto/caam/caamalg_desc.h15
-rw-r--r--drivers/crypto/caam/caamalg_qi.c2387
-rw-r--r--drivers/crypto/caam/ctrl.c59
-rw-r--r--drivers/crypto/caam/desc_constr.h5
-rw-r--r--drivers/crypto/caam/intern.h24
-rw-r--r--drivers/crypto/caam/qi.c805
-rw-r--r--drivers/crypto/caam/qi.h201
-rw-r--r--drivers/crypto/caam/sg_sw_qm.h108
-rw-r--r--drivers/crypto/cavium/Makefile4
-rw-r--r--drivers/crypto/cavium/zip/Makefile11
-rw-r--r--drivers/crypto/cavium/zip/common.h202
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c313
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.h79
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.c200
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_device.c202
-rw-r--r--drivers/crypto/cavium/zip/zip_device.h108
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.c223
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c729
-rw-r--r--drivers/crypto/cavium/zip/zip_main.h121
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.c114
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.h78
-rw-r--r--drivers/crypto/cavium/zip/zip_regs.h1347
-rw-r--r--drivers/crypto/ccp/Makefile2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c252
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c254
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c22
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c22
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h44
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c121
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c168
-rw-r--r--drivers/crypto/ccp/ccp-dev.h30
-rw-r--r--drivers/crypto/ccp/ccp-ops.c522
-rw-r--r--drivers/crypto/ccp/ccp-pci.c2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c304
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h4
-rw-r--r--drivers/crypto/chelsio/chcr_core.h2
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h10
-rw-r--r--drivers/crypto/exynos-rng.c389
-rw-r--r--drivers/crypto/ixp4xx_crypto.c2
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c421
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c15
-rw-r--r--drivers/crypto/mediatek/mtk-platform.h56
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c309
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c2
-rw-r--r--drivers/crypto/s5p-sss.c35
-rw-r--r--drivers/crypto/stm32/Kconfig7
-rw-r--r--drivers/crypto/stm32/Makefile2
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c324
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h3
-rw-r--r--drivers/soc/fsl/qbman/qman.c4
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c6
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h97
75 files changed, 10530 insertions, 1152 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index b9918fb9587d..1b223c32a8ae 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -294,20 +294,6 @@ config HW_RANDOM_POWERNV
If unsure, say Y.
-config HW_RANDOM_EXYNOS
- tristate "EXYNOS HW random number generator support"
- depends on ARCH_EXYNOS || COMPILE_TEST
- depends on HAS_IOMEM
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator hardware found on EXYNOS SOCs.
-
- To compile this driver as a module, choose M here: the
- module will be called exynos-rng.
-
- If unsure, say Y.
-
config HW_RANDOM_TPM
tristate "TPM HW Random Number Generator support"
depends on TCG_TPM
@@ -423,6 +409,20 @@ config HW_RANDOM_CAVIUM
If unsure, say Y.
+config HW_RANDOM_MTK
+ tristate "Mediatek Random Number Generator support"
+ depends on HW_RANDOM
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ default y
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Mediatek SoCs.
+
+ To compile this driver as a module, choose M here. the
+ module will be called mtk-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_S390
tristate "S390 True Random Number Generator support"
depends on S390
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index dd1765246255..b085975ec1d2 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -24,7 +24,6 @@ obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
-obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
@@ -36,4 +35,5 @@ obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
+obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o
obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
deleted file mode 100644
index 23d358553b21..000000000000
--- a/drivers/char/hw_random/exynos-rng.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * exynos-rng.c - Random Number Generator driver for the exynos
- *
- * Copyright (C) 2012 Samsung Electronics
- * Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation;
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/hw_random.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/pm_runtime.h>
-#include <linux/err.h>
-
-#define EXYNOS_PRNG_STATUS_OFFSET 0x10
-#define EXYNOS_PRNG_SEED_OFFSET 0x140
-#define EXYNOS_PRNG_OUT1_OFFSET 0x160
-#define SEED_SETTING_DONE BIT(1)
-#define PRNG_START 0x18
-#define PRNG_DONE BIT(5)
-#define EXYNOS_AUTOSUSPEND_DELAY 100
-
-struct exynos_rng {
- struct device *dev;
- struct hwrng rng;
- void __iomem *mem;
- struct clk *clk;
-};
-
-static u32 exynos_rng_readl(struct exynos_rng *rng, u32 offset)
-{
- return readl_relaxed(rng->mem + offset);
-}
-
-static void exynos_rng_writel(struct exynos_rng *rng, u32 val, u32 offset)
-{
- writel_relaxed(val, rng->mem + offset);
-}
-
-static int exynos_rng_configure(struct exynos_rng *exynos_rng)
-{
- int i;
- int ret = 0;
-
- for (i = 0 ; i < 5 ; i++)
- exynos_rng_writel(exynos_rng, jiffies,
- EXYNOS_PRNG_SEED_OFFSET + 4*i);
-
- if (!(exynos_rng_readl(exynos_rng, EXYNOS_PRNG_STATUS_OFFSET)
- & SEED_SETTING_DONE))
- ret = -EIO;
-
- return ret;
-}
-
-static int exynos_init(struct hwrng *rng)
-{
- struct exynos_rng *exynos_rng = container_of(rng,
- struct exynos_rng, rng);
- int ret = 0;
-
- pm_runtime_get_sync(exynos_rng->dev);
- ret = exynos_rng_configure(exynos_rng);
- pm_runtime_mark_last_busy(exynos_rng->dev);
- pm_runtime_put_autosuspend(exynos_rng->dev);
-
- return ret;
-}
-
-static int exynos_read(struct hwrng *rng, void *buf,
- size_t max, bool wait)
-{
- struct exynos_rng *exynos_rng = container_of(rng,
- struct exynos_rng, rng);
- u32 *data = buf;
- int retry = 100;
- int ret = 4;
-
- pm_runtime_get_sync(exynos_rng->dev);
-
- exynos_rng_writel(exynos_rng, PRNG_START, 0);
-
- while (!(exynos_rng_readl(exynos_rng,
- EXYNOS_PRNG_STATUS_OFFSET) & PRNG_DONE) && --retry)
- cpu_relax();
- if (!retry) {
- ret = -ETIMEDOUT;
- goto out;
- }
-
- exynos_rng_writel(exynos_rng, PRNG_DONE, EXYNOS_PRNG_STATUS_OFFSET);
-
- *data = exynos_rng_readl(exynos_rng, EXYNOS_PRNG_OUT1_OFFSET);
-
-out:
- pm_runtime_mark_last_busy(exynos_rng->dev);
- pm_runtime_put_sync_autosuspend(exynos_rng->dev);
-
- return ret;
-}
-
-static int exynos_rng_probe(struct platform_device *pdev)
-{
- struct exynos_rng *exynos_rng;
- struct resource *res;
- int ret;
-
- exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
- GFP_KERNEL);
- if (!exynos_rng)
- return -ENOMEM;
-
- exynos_rng->dev = &pdev->dev;
- exynos_rng->rng.name = "exynos";
- exynos_rng->rng.init = exynos_init;
- exynos_rng->rng.read = exynos_read;
- exynos_rng->clk = devm_clk_get(&pdev->dev, "secss");
- if (IS_ERR(exynos_rng->clk)) {
- dev_err(&pdev->dev, "Couldn't get clock.\n");
- return -ENOENT;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- exynos_rng->mem = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(exynos_rng->mem))
- return PTR_ERR(exynos_rng->mem);
-
- platform_set_drvdata(pdev, exynos_rng);
-
- pm_runtime_set_autosuspend_delay(&pdev->dev, EXYNOS_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
- ret = devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
- if (ret) {
- pm_runtime_dont_use_autosuspend(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- }
-
- return ret;
-}
-
-static int exynos_rng_remove(struct platform_device *pdev)
-{
- pm_runtime_dont_use_autosuspend(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
-static int __maybe_unused exynos_rng_runtime_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(exynos_rng->clk);
-
- return 0;
-}
-
-static int __maybe_unused exynos_rng_runtime_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
-
- return clk_prepare_enable(exynos_rng->clk);
-}
-
-static int __maybe_unused exynos_rng_suspend(struct device *dev)
-{
- return pm_runtime_force_suspend(dev);
-}
-
-static int __maybe_unused exynos_rng_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
- int ret;
-
- ret = pm_runtime_force_resume(dev);
- if (ret)
- return ret;
-
- return exynos_rng_configure(exynos_rng);
-}
-
-static const struct dev_pm_ops exynos_rng_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(exynos_rng_suspend, exynos_rng_resume)
- SET_RUNTIME_PM_OPS(exynos_rng_runtime_suspend,
- exynos_rng_runtime_resume, NULL)
-};
-
-static const struct of_device_id exynos_rng_dt_match[] = {
- {
- .compatible = "samsung,exynos4-rng",
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, exynos_rng_dt_match);
-
-static struct platform_driver exynos_rng_driver = {
- .driver = {
- .name = "exynos-rng",
- .pm = &exynos_rng_pm_ops,
- .of_match_table = exynos_rng_dt_match,
- },
- .probe = exynos_rng_probe,
- .remove = exynos_rng_remove,
-};
-
-module_platform_driver(exynos_rng_driver);
-
-MODULE_DESCRIPTION("EXYNOS 4 H/W Random Number Generator driver");
-MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 119d698439ae..2e23be802a62 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -62,6 +62,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/of.h>
+#include <linux/clk.h>
#define RNG_DATA 0x00
@@ -69,6 +70,7 @@ struct meson_rng_data {
void __iomem *base;
struct platform_device *pdev;
struct hwrng rng;
+ struct clk *core_clk;
};
static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -81,11 +83,17 @@ static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
return sizeof(u32);
}
+static void meson_rng_clk_disable(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int meson_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_rng_data *data;
struct resource *res;
+ int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -98,6 +106,20 @@ static int meson_rng_probe(struct platform_device *pdev)
if (IS_ERR(data->base))
return PTR_ERR(data->base);
+ data->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(data->core_clk))
+ data->core_clk = NULL;
+
+ if (data->core_clk) {
+ ret = clk_prepare_enable(data->core_clk);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, meson_rng_clk_disable,
+ data->core_clk);
+ if (ret)
+ return ret;
+ }
+
data->rng.name = pdev->name;
data->rng.read = meson_rng_read;
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
new file mode 100644
index 000000000000..df8eb54fd5a3
--- /dev/null
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -0,0 +1,168 @@
+/*
+ * Driver for Mediatek Hardware Random Number Generator
+ *
+ * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define MTK_RNG_DEV KBUILD_MODNAME
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define USEC_POLL 2
+#define TIMEOUT_POLL 20
+
+#define RNG_CTRL 0x00
+#define RNG_EN BIT(0)
+#define RNG_READY BIT(31)
+
+#define RNG_DATA 0x08
+
+#define to_mtk_rng(p) container_of(p, struct mtk_rng, rng)
+
+struct mtk_rng {
+ void __iomem *base;
+ struct clk *clk;
+ struct hwrng rng;
+};
+
+static int mtk_rng_init(struct hwrng *rng)
+{
+ struct mtk_rng *priv = to_mtk_rng(rng);
+ u32 val;
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+
+ val = readl(priv->base + RNG_CTRL);
+ val |= RNG_EN;
+ writel(val, priv->base + RNG_CTRL);
+
+ return 0;
+}
+
+static void mtk_rng_cleanup(struct hwrng *rng)
+{
+ struct mtk_rng *priv = to_mtk_rng(rng);
+ u32 val;
+
+ val = readl(priv->base + RNG_CTRL);
+ val &= ~RNG_EN;
+ writel(val, priv->base + RNG_CTRL);
+
+ clk_disable_unprepare(priv->clk);
+}
+
+static bool mtk_rng_wait_ready(struct hwrng *rng, bool wait)
+{
+ struct mtk_rng *priv = to_mtk_rng(rng);
+ int ready;
+
+ ready = readl(priv->base + RNG_CTRL) & RNG_READY;
+ if (!ready && wait)
+ readl_poll_timeout_atomic(priv->base + RNG_CTRL, ready,
+ ready & RNG_READY, USEC_POLL,
+ TIMEOUT_POLL);
+ return !!ready;
+}
+
+static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct mtk_rng *priv = to_mtk_rng(rng);
+ int retval = 0;
+
+ while (max >= sizeof(u32)) {
+ if (!mtk_rng_wait_ready(rng, wait))
+ break;
+
+ *(u32 *)buf = readl(priv->base + RNG_DATA);
+ retval += sizeof(u32);
+ buf += sizeof(u32);
+ max -= sizeof(u32);
+ }
+
+ return retval || !wait ? retval : -EIO;
+}
+
+static int mtk_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+ struct mtk_rng *priv;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no iomem resource\n");
+ return -ENXIO;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->rng.name = pdev->name;
+ priv->rng.init = mtk_rng_init;
+ priv->rng.cleanup = mtk_rng_cleanup;
+ priv->rng.read = mtk_rng_read;
+
+ priv->clk = devm_clk_get(&pdev->dev, "rng");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ dev_err(&pdev->dev, "no clock for device: %d\n", ret);
+ return ret;
+ }
+
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ ret = devm_hwrng_register(&pdev->dev, &priv->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register rng device: %d\n",
+ ret);
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "registered RNG driver\n");
+
+ return 0;
+}
+
+static const struct of_device_id mtk_rng_match[] = {
+ { .compatible = "mediatek,mt7623-rng" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_rng_match);
+
+static struct platform_driver mtk_rng_driver = {
+ .probe = mtk_rng_probe,
+ .driver = {
+ .name = MTK_RNG_DEV,
+ .of_match_table = mtk_rng_match,
+ },
+};
+
+module_platform_driver(mtk_rng_driver);
+
+MODULE_DESCRIPTION("Mediatek Random Number Generator Driver");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 31cbdbbaebfc..92dd4e925315 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -748,9 +748,7 @@ static int n2rng_probe(struct platform_device *op)
dev_info(&op->dev, "Registered RNG HVAPI major %lu minor %lu\n",
np->hvapi_major, np->hvapi_minor);
-
- np->units = devm_kzalloc(&op->dev,
- sizeof(struct n2rng_unit) * np->num_units,
+ np->units = devm_kcalloc(&op->dev, np->num_units, sizeof(*np->units),
GFP_KERNEL);
err = -ENOMEM;
if (!np->units)
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index b1ad12552b56..74d11ae6abe9 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -398,16 +398,6 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
return err;
}
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (!IS_ERR(priv->clk)) {
- err = clk_prepare_enable(priv->clk);
- if (err)
- dev_err(&pdev->dev, "unable to enable the clk, "
- "err = %d\n", err);
- }
-
/*
* On OMAP4, enabling the shutdown_oflo interrupt is
* done in the interrupt mask register. There is no
@@ -478,6 +468,18 @@ static int omap_rng_probe(struct platform_device *pdev)
goto err_ioremap;
}
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (!IS_ERR(priv->clk)) {
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Unable to enable the clk: %d\n", ret);
+ goto err_register;
+ }
+ }
+
ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
get_omap_rng_device_details(priv);
if (ret)
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index cf37db263ecd..a0faa5f05deb 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -20,84 +20,100 @@
* TODO: add support for reading sizes other than 32bits and masking
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/hrtimer.h>
#include <linux/hw_random.h>
#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/time.h>
#include <linux/timeriomem-rng.h>
-#include <linux/jiffies.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/completion.h>
-struct timeriomem_rng_private_data {
+struct timeriomem_rng_private {
void __iomem *io_base;
- unsigned int expires;
- unsigned int period;
+ ktime_t period;
unsigned int present:1;
- struct timer_list timer;
+ struct hrtimer timer;
struct completion completion;
- struct hwrng timeriomem_rng_ops;
+ struct hwrng rng_ops;
};
-#define to_rng_priv(rng) \
- ((struct timeriomem_rng_private_data *)rng->priv)
-
-/*
- * have data return 1, however return 0 if we have nothing
- */
-static int timeriomem_rng_data_present(struct hwrng *rng, int wait)
+static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
+ size_t max, bool wait)
{
- struct timeriomem_rng_private_data *priv = to_rng_priv(rng);
-
- if (!wait || priv->present)
- return priv->present;
+ struct timeriomem_rng_private *priv =
+ container_of(hwrng, struct timeriomem_rng_private, rng_ops);
+ int retval = 0;
+ int period_us = ktime_to_us(priv->period);
+
+ /*
+ * The RNG provides 32-bits per read. Ensure there is enough space for
+ * at minimum one read.
+ */
+ if (max < sizeof(u32))
+ return 0;
+
+ /*
+ * There may not have been enough time for new data to be generated
+ * since the last request. If the caller doesn't want to wait, let them
+ * bail out. Otherwise, wait for the completion. If the new data has
+ * already been generated, the completion should already be available.
+ */
+ if (!wait && !priv->present)
+ return 0;
wait_for_completion(&priv->completion);
- return 1;
-}
-
-static int timeriomem_rng_data_read(struct hwrng *rng, u32 *data)
-{
- struct timeriomem_rng_private_data *priv = to_rng_priv(rng);
- unsigned long cur;
- s32 delay;
-
- *data = readl(priv->io_base);
-
- cur = jiffies;
-
- delay = cur - priv->expires;
- delay = priv->period - (delay % priv->period);
-
- priv->expires = cur + delay;
+ do {
+ /*
+ * After the first read, all additional reads will need to wait
+ * for the RNG to generate new data. Since the period can have
+ * a wide range of values (1us to 1s have been observed), allow
+ * for 1% tolerance in the sleep time rather than a fixed value.
+ */
+ if (retval > 0)
+ usleep_range(period_us,
+ period_us + min(1, period_us / 100));
+
+ *(u32 *)data = readl(priv->io_base);
+ retval += sizeof(u32);
+ data += sizeof(u32);
+ max -= sizeof(u32);
+ } while (wait && max > sizeof(u32));
+
+ /*
+ * Block any new callers until the RNG has had time to generate new
+ * data.
+ */
priv->present = 0;
-
reinit_completion(&priv->completion);
- mod_timer(&priv->timer, priv->expires);
+ hrtimer_forward_now(&priv->timer, priv->period);
+ hrtimer_restart(&priv->timer);
- return 4;
+ return retval;
}
-static void timeriomem_rng_trigger(unsigned long data)
+static enum hrtimer_restart timeriomem_rng_trigger(struct hrtimer *timer)
{
- struct timeriomem_rng_private_data *priv
- = (struct timeriomem_rng_private_data *)data;
+ struct timeriomem_rng_private *priv
+ = container_of(timer, struct timeriomem_rng_private, timer);
priv->present = 1;
complete(&priv->completion);
+
+ return HRTIMER_NORESTART;
}
static int timeriomem_rng_probe(struct platform_device *pdev)
{
struct timeriomem_rng_data *pdata = pdev->dev.platform_data;
- struct timeriomem_rng_private_data *priv;
+ struct timeriomem_rng_private *priv;
struct resource *res;
int err = 0;
int period;
@@ -119,7 +135,7 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
/* Allocate memory for the device structure (and zero it) */
priv = devm_kzalloc(&pdev->dev,
- sizeof(struct timeriomem_rng_private_data), GFP_KERNEL);
+ sizeof(struct timeriomem_rng_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -139,54 +155,41 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
period = pdata->period;
}
- priv->period = usecs_to_jiffies(period);
- if (priv->period < 1) {
- dev_err(&pdev->dev, "period is less than one jiffy\n");
- return -EINVAL;
- }
-
- priv->expires = jiffies;
- priv->present = 1;
-
+ priv->period = ns_to_ktime(period * NSEC_PER_USEC);
init_completion(&priv->completion);
- complete(&priv->completion);
+ hrtimer_init(&priv->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ priv->timer.function = timeriomem_rng_trigger;
- setup_timer(&priv->timer, timeriomem_rng_trigger, (unsigned long)priv);
-
- priv->timeriomem_rng_ops.name = dev_name(&pdev->dev);
- priv->timeriomem_rng_ops.data_present = timeriomem_rng_data_present;
- priv->timeriomem_rng_ops.data_read = timeriomem_rng_data_read;
- priv->timeriomem_rng_ops.priv = (unsigned long)priv;
+ priv->rng_ops.name = dev_name(&pdev->dev);
+ priv->rng_ops.read = timeriomem_rng_read;
priv->io_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->io_base)) {
- err = PTR_ERR(priv->io_base);
- goto out_timer;
+ return PTR_ERR(priv->io_base);
}
- err = hwrng_register(&priv->timeriomem_rng_ops);
+ /* Assume random data is already available. */
+ priv->present = 1;
+ complete(&priv->completion);
+
+ err = hwrng_register(&priv->rng_ops);
if (err) {
dev_err(&pdev->dev, "problem registering\n");
- goto out_timer;
+ return err;
}
dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
priv->io_base, period);
return 0;
-
-out_timer:
- del_timer_sync(&priv->timer);
- return err;
}
static int timeriomem_rng_remove(struct platform_device *pdev)
{
- struct timeriomem_rng_private_data *priv = platform_get_drvdata(pdev);
-
- hwrng_unregister(&priv->timeriomem_rng_ops);
+ struct timeriomem_rng_private *priv = platform_get_drvdata(pdev);
- del_timer_sync(&priv->timer);
+ hwrng_unregister(&priv->rng_ops);
+ hrtimer_cancel(&priv->timer);
return 0;
}
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index 8ee2022ce5d5..cbd62e46bb5b 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -193,7 +193,7 @@
/* CLKID_I2C */
/* #define CLKID_SAR_ADC */
#define CLKID_SMART_CARD 24
-#define CLKID_RNG0 25
+/* CLKID_RNG0 */
#define CLKID_UART0 26
#define CLKID_SDHC 27
#define CLKID_STREAM 28
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 473d31288ad8..fb1e60f5002e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -388,6 +388,21 @@ config CRYPTO_DEV_MXC_SCC
This option enables support for the Security Controller (SCC)
found in Freescale i.MX25 chips.
+config CRYPTO_DEV_EXYNOS_RNG
+ tristate "EXYNOS HW pseudo random number generator support"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on HAS_IOMEM
+ select CRYPTO_RNG
+ ---help---
+ This driver provides kernel-side support through the
+ cryptographic API for the pseudo random number generator hardware
+ found on Exynos SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called exynos-rng.
+
+ If unsure, say Y.
+
config CRYPTO_DEV_S5P
tristate "Support for Samsung S5PV210/Exynos crypto accelerator"
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
@@ -515,6 +530,13 @@ config CRYPTO_DEV_MXS_DCP
source "drivers/crypto/qat/Kconfig"
source "drivers/crypto/cavium/cpt/Kconfig"
+config CRYPTO_DEV_CAVIUM_ZIP
+ tristate "Cavium ZIP driver"
+ depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
+ ---help---
+ Select this option if you want to enable compression/decompression
+ acceleration on Cavium's ARM based SoCs
+
config CRYPTO_DEV_QCE
tristate "Qualcomm crypto engine accelerator"
depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM
@@ -619,4 +641,6 @@ config CRYPTO_DEV_BCM_SPU
Secure Processing Unit (SPU). The SPU driver registers ablkcipher,
ahash, and aead algorithms with the kernel cryptographic API.
+source "drivers/crypto/stm32/Kconfig"
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 739609471169..463f33592d93 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,9 +2,11 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
+obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
+obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
@@ -30,6 +32,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
+obj-$(CONFIG_CRYPTO_DEV_STM32) += stm32/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index d10b4ae5e0da..fdc83a2281ca 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -50,7 +50,7 @@
static void crypto4xx_hw_init(struct crypto4xx_device *dev)
{
union ce_ring_size ring_size;
- union ce_ring_contol ring_ctrl;
+ union ce_ring_control ring_ctrl;
union ce_part_ring_size part_ring_size;
union ce_io_threshold io_threshold;
u32 rand_num;
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 46fe57c8f6eb..279b8725559f 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -180,7 +180,7 @@ union ce_ring_size {
} __attribute__((packed));
#define CRYPTO4XX_RING_CONTROL_OFFSET 0x54
-union ce_ring_contol {
+union ce_ring_control {
struct {
u32 continuous:1;
u32 rsv:5;
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index 0502f460dacd..430c5570ea87 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -312,7 +312,7 @@ int do_shash(unsigned char *name, unsigned char *result,
}
rc = crypto_shash_final(&sdesc->shash, result);
if (rc)
- pr_err("%s: Could not genereate %s hash", __func__, name);
+ pr_err("%s: Could not generate %s hash", __func__, name);
do_shash_err:
crypto_free_shash(hash);
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index bc0d3569f8d9..e36aeacd7635 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -87,6 +87,23 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
To compile this as a module, choose M here: the module
will be called caamalg.
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
+ tristate "Queue Interface as Crypto API backend"
+ depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
+ default y
+ select CRYPTO_AUTHENC
+ select CRYPTO_BLKCIPHER
+ help
+ Selecting this will use CAAM Queue Interface (QI) for sending
+ & receiving crypto jobs to/from CAAM. This gives better performance
+ than job ring interface when the number of cores are more than the
+ number of job rings assigned to the kernel. The number of portals
+ assigned to the kernel should also be more than the number of
+ job rings.
+
+ To compile this as a module, choose M here: the module
+ will be called caamalg_qi.
+
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
depends on CRYPTO_DEV_FSL_CAAM_JR
@@ -136,4 +153,5 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
information in the CAAM driver.
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
- def_tristate CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+ def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
+ CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 6554742f357e..9e2e98856b9b 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -8,6 +8,7 @@ endif
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
@@ -16,3 +17,7 @@ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
caam-objs := ctrl.o
caam_jr-objs := jr.o key_gen.o error.o
caam_pkc-y := caampkc.o pkc_desc.o
+ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
+ ccflags-y += -DCONFIG_CAAM_QI
+ caam-objs += qi.o
+endif
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 9bc80eb06934..398807d1b77e 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -266,8 +266,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize,
- is_rfc3686, nonce, ctx1_iv_off);
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
+ false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), DMA_TO_DEVICE);
@@ -299,7 +300,7 @@ skip_enc:
desc = ctx->sh_desc_dec;
cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, alg->caam.geniv, is_rfc3686,
- nonce, ctx1_iv_off);
+ nonce, ctx1_iv_off, false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), DMA_TO_DEVICE);
@@ -333,7 +334,7 @@ skip_enc:
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off);
+ ctx1_iv_off, false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index f3f48c10b9d6..6f9c7ec0e339 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -265,17 +265,19 @@ static void init_sh_desc_key_aead(u32 * const desc,
* split key is to be used, the size of the split key itself is
* specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
* SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
*
* Note: Requires an MDHA split key.
*/
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
- struct alginfo *adata, unsigned int icvsize,
- const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off)
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
{
/* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
@@ -284,6 +286,25 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_ENCRYPT);
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ }
+
/* Read and write assoclen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
@@ -338,6 +359,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
*
* Note: Requires an MDHA split key.
*/
@@ -345,7 +367,7 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off)
+ const u32 ctx1_iv_off, const bool is_qi)
{
/* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
@@ -354,6 +376,26 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ if (!geniv)
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ }
+
/* Read and write assoclen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
if (geniv)
@@ -423,21 +465,44 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
*
* Note: Requires an MDHA split key.
*/
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
- u32 *nonce, const u32 ctx1_iv_off)
+ u32 *nonce, const u32 ctx1_iv_off,
+ const bool is_qi)
{
u32 geniv, moveiv;
/* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
- if (is_rfc3686)
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+ }
+
+ if (is_rfc3686) {
+ if (is_qi)
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
goto copy_iv;
+ }
/* Generate IV */
geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index 95551737333a..8731e4a7ff05 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -12,6 +12,9 @@
#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
+#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
+#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
/* Note: Nonce is counted in cdata.keylen */
#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
@@ -45,20 +48,22 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize);
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
- struct alginfo *adata, unsigned int icvsize,
- const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off);
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off,
+ const bool is_qi);
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off);
+ const u32 ctx1_iv_off, const bool is_qi);
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
- u32 *nonce, const u32 ctx1_iv_off);
+ u32 *nonce, const u32 ctx1_iv_off,
+ const bool is_qi);
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
unsigned int icvsize);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
new file mode 100644
index 000000000000..ea0e5b8b9171
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -0,0 +1,2387 @@
+/*
+ * Freescale FSL CAAM support for crypto API over QI backend.
+ * Based on caamalg.c
+ *
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ */
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "error.h"
+#include "sg_sw_sec4.h"
+#include "sg_sw_qm.h"
+#include "key_gen.h"
+#include "qi.h"
+#include "jr.h"
+#include "caamalg_desc.h"
+
+/*
+ * crypto alg
+ */
+#define CAAM_CRA_PRIORITY 2000
+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
+ SHA512_DIGEST_SIZE * 2)
+
+#define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
+ CAAM_MAX_KEY_SIZE)
+#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+struct caam_alg_entry {
+ int class1_alg_type;
+ int class2_alg_type;
+ bool rfc3686;
+ bool geniv;
+};
+
+struct caam_aead_alg {
+ struct aead_alg aead;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
+/*
+ * per-session context
+ */
+struct caam_ctx {
+ struct device *jrdev;
+ u32 sh_desc_enc[DESC_MAX_USED_LEN];
+ u32 sh_desc_dec[DESC_MAX_USED_LEN];
+ u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t key_dma;
+ struct alginfo adata;
+ struct alginfo cdata;
+ unsigned int authsize;
+ struct device *qidev;
+ spinlock_t lock; /* Protects multiple init of driver context */
+ struct caam_drv_ctx *drv_ctx[NUM_OP];
+};
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ u32 ctx1_iv_off = 0;
+ u32 *nonce = NULL;
+ unsigned int data_len[2];
+ u32 inl_mask;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+ }
+
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
+
+ if (alg->caam.geniv)
+ goto skip_enc;
+
+ /* aead_encrypt shared descriptor */
+ if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off, true);
+
+skip_enc:
+ /* aead_decrypt shared descriptor */
+ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, alg->caam.geniv,
+ is_rfc3686, nonce, ctx1_iv_off, true);
+
+ if (!alg->caam.geniv)
+ goto skip_givenc;
+
+ /* aead_givencrypt shared descriptor */
+ if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off, true);
+
+skip_givenc:
+ return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ aead_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ struct crypto_authenc_keys keys;
+ int ret = 0;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
+#ifdef DEBUG
+ dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
+ if (ret)
+ goto badkey;
+
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
+ keys.enckeylen, DMA_TO_DEVICE);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
+#endif
+
+ ctx->cdata.keylen = keys.enckeylen;
+
+ ret = aead_set_sh_desc(aead);
+ if (ret)
+ goto badkey;
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+ goto badkey;
+ }
+ }
+
+ if (ctx->drv_ctx[DECRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+ goto badkey;
+ }
+ }
+
+ return ret;
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct device *jrdev = ctx->jrdev;
+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ u32 ctx1_iv_off = 0;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
+ int ret = 0;
+
+ memcpy(ctx->key, key, keylen);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
+
+ /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
+ cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
+ cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
+ cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
+ ivsize, is_rfc3686, ctx1_iv_off);
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+ goto badkey;
+ }
+ }
+
+ if (ctx->drv_ctx[DECRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+ goto badkey;
+ }
+ }
+
+ if (ctx->drv_ctx[GIVENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
+ ctx->sh_desc_givenc);
+ if (ret) {
+ dev_err(jrdev, "driver givenc context update failed\n");
+ goto badkey;
+ }
+ }
+
+ return ret;
+badkey:
+ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+
+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(ablkcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ dev_err(jrdev, "key size mismatch\n");
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
+
+ /* xts ablkcipher encrypt, decrypt shared descriptors */
+ cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
+ cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+ goto badkey;
+ }
+ }
+
+ if (ctx->drv_ctx[DECRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+ goto badkey;
+ }
+ }
+
+ return ret;
+badkey:
+ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return 0;
+}
+
+/*
+ * aead_edesc - s/w-extended aead descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @assoclen_dma: bus physical mapped address of req->assoclen
+ * @drv_req: driver-specific request structure
+ * @sgt: the h/w link table
+ */
+struct aead_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ dma_addr_t assoclen_dma;
+ struct caam_drv_req drv_req;
+ struct qm_sg_entry sgt[0];
+};
+
+/*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @drv_req: driver-specific request structure
+ * @sgt: the h/w link table
+ */
+struct ablkcipher_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ struct caam_drv_req drv_req;
+ struct qm_sg_entry sgt[0];
+};
+
+static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
+ enum optype type)
+{
+ /*
+ * This function is called on the fast path with values of 'type'
+ * known at compile time. Invalid arguments are not expected and
+ * thus no checks are made.
+ */
+ struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
+ u32 *desc;
+
+ if (unlikely(!drv_ctx)) {
+ spin_lock(&ctx->lock);
+
+ /* Read again to check if some other core init drv_ctx */
+ drv_ctx = ctx->drv_ctx[type];
+ if (!drv_ctx) {
+ int cpu;
+
+ if (type == ENCRYPT)
+ desc = ctx->sh_desc_enc;
+ else if (type == DECRYPT)
+ desc = ctx->sh_desc_dec;
+ else /* (type == GIVENCRYPT) */
+ desc = ctx->sh_desc_givenc;
+
+ cpu = smp_processor_id();
+ drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
+ if (likely(!IS_ERR_OR_NULL(drv_ctx)))
+ drv_ctx->op_type = type;
+
+ ctx->drv_ctx[type] = drv_ctx;
+ }
+
+ spin_unlock(&ctx->lock);
+ }
+
+ return drv_ctx;
+}
+
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+ struct scatterlist *dst, int src_nents,
+ int dst_nents, dma_addr_t iv_dma, int ivsize,
+ enum optype op_type, dma_addr_t qm_sg_dma,
+ int qm_sg_bytes)
+{
+ if (dst != src) {
+ if (src_nents)
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+ }
+
+ if (iv_dma)
+ dma_unmap_single(dev, iv_dma, ivsize,
+ op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ if (qm_sg_bytes)
+ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
+}
+
+static void aead_unmap(struct device *dev,
+ struct aead_edesc *edesc,
+ struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ivsize = crypto_aead_ivsize(aead);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
+ edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+}
+
+static void ablkcipher_unmap(struct device *dev,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
+ edesc->qm_sg_dma, edesc->qm_sg_bytes);
+}
+
+static void aead_done(struct caam_drv_req *drv_req, u32 status)
+{
+ struct device *qidev;
+ struct aead_edesc *edesc;
+ struct aead_request *aead_req = drv_req->app_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+ qidev = caam_ctx->qidev;
+
+ if (unlikely(status)) {
+ caam_jr_strstatus(qidev, status);
+ ecode = -EIO;
+ }
+
+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
+ aead_unmap(qidev, edesc, aead_req);
+
+ aead_request_complete(aead_req, ecode);
+ qi_cache_free(edesc);
+}
+
+/*
+ * allocate and map the aead extended descriptor
+ */
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *qidev = ctx->qidev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
+ unsigned int authsize = ctx->authsize;
+ int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct qm_sg_entry *sg_table, *fd_sgt;
+ struct caam_drv_ctx *drv_ctx;
+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+
+ drv_ctx = get_drv_ctx(ctx, op_type);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+ return (struct aead_edesc *)drv_ctx;
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = qi_cache_alloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(qidev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (likely(req->src == req->dst)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0));
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(qidev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen);
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize :
+ (-authsize)));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : (-authsize)));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
+
+ if (src_nents) {
+ mapped_src_nents = dma_map_sg(qidev, req->src,
+ src_nents, DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(qidev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = 0;
+ }
+
+ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
+ ivsize = crypto_aead_ivsize(aead);
+ iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, iv_dma)) {
+ dev_err(qidev, "unable to map IV\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ /*
+ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+ * Input is not contiguous.
+ */
+ qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ edesc->drv_req.app_ctx = req;
+ edesc->drv_req.cbk = aead_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
+ edesc->assoclen_dma = dma_map_single(qidev, &req->assoclen, 4,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
+ dev_err(qidev, "unable to map assoclen\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
+ qm_sg_index++;
+ if (ivsize) {
+ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
+ qm_sg_index++;
+ }
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, qm_sg_dma)) {
+ dev_err(qidev, "unable to map S/G table\n");
+ dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ out_len = req->assoclen + req->cryptlen +
+ (encrypt ? ctx->authsize : (-ctx->authsize));
+ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
+
+ fd_sgt = &edesc->drv_req.fd_sgt[0];
+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
+
+ if (req->dst == req->src) {
+ if (mapped_src_nents == 1)
+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
+ out_len, 0);
+ else
+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
+ (1 + !!ivsize) * sizeof(*sg_table),
+ out_len, 0);
+ } else if (mapped_dst_nents == 1) {
+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
+ 0);
+ } else {
+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
+ qm_sg_index, out_len, 0);
+ }
+
+ return edesc;
+}
+
+static inline int aead_crypt(struct aead_request *req, bool encrypt)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ret;
+
+ if (unlikely(caam_congested))
+ return -EAGAIN;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, encrypt);
+ if (IS_ERR_OR_NULL(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor */
+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(ctx->qidev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+ return aead_crypt(req, true);
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+ return aead_crypt(req, false);
+}
+
+static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+{
+ struct ablkcipher_edesc *edesc;
+ struct ablkcipher_request *req = drv_req->app_ctx;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *qidev = caam_ctx->qidev;
+#ifdef DEBUG
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
+#endif
+
+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
+
+ if (status)
+ caam_jr_strstatus(qidev, status);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ dbg_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+ ablkcipher_unmap(qidev, edesc, req);
+ qi_cache_free(edesc);
+
+ ablkcipher_request_complete(req, status);
+}
+
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ *req, bool encrypt)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *qidev = ctx->qidev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma;
+ bool in_contig;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int dst_sg_idx, qm_sg_ents;
+ struct qm_sg_entry *sg_table, *fd_sgt;
+ struct caam_drv_ctx *drv_ctx;
+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+
+ drv_ctx = get_drv_ctx(ctx, op_type);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+ return (struct ablkcipher_edesc *)drv_ctx;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->nbytes);
+ return ERR_PTR(src_nents);
+ }
+
+ if (unlikely(req->src != req->dst)) {
+ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (unlikely(dst_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
+ req->nbytes);
+ return ERR_PTR(dst_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(qidev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(qidev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, iv_dma)) {
+ dev_err(qidev, "unable to map IV\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (mapped_src_nents == 1 &&
+ iv_dma + ivsize == sg_dma_address(req->src)) {
+ in_contig = true;
+ qm_sg_ents = 0;
+ } else {
+ in_contig = false;
+ qm_sg_ents = 1 + mapped_src_nents;
+ }
+ dst_sg_idx = qm_sg_ents;
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_alloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(qidev, "could not allocate extended descriptor\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ sg_table = &edesc->sgt[0];
+ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+ edesc->drv_req.app_ctx = req;
+ edesc->drv_req.cbk = ablkcipher_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
+ if (!in_contig) {
+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+ }
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ dst_sg_idx, 0);
+
+ edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
+ dev_err(qidev, "unable to map S/G table\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fd_sgt = &edesc->drv_req.fd_sgt[0];
+
+ if (!in_contig)
+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
+ ivsize + req->nbytes, 0);
+ else
+ dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
+ 0);
+
+ if (req->src == req->dst) {
+ if (!in_contig)
+ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
+ sizeof(*sg_table), req->nbytes, 0);
+ else
+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
+ req->nbytes, 0);
+ } else if (mapped_dst_nents > 1) {
+ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+ sizeof(*sg_table), req->nbytes, 0);
+ } else {
+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
+ req->nbytes, 0);
+ }
+
+ return edesc;
+}
+
+static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ struct skcipher_givcrypt_request *creq)
+{
+ struct ablkcipher_request *req = &creq->creq;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *qidev = ctx->qidev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma;
+ bool out_contig;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct qm_sg_entry *sg_table, *fd_sgt;
+ int dst_sg_idx, qm_sg_ents;
+ struct caam_drv_ctx *drv_ctx;
+
+ drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+ return (struct ablkcipher_edesc *)drv_ctx;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->nbytes);
+ return ERR_PTR(src_nents);
+ }
+
+ if (unlikely(req->src != req->dst)) {
+ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (unlikely(dst_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
+ req->nbytes);
+ return ERR_PTR(dst_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(qidev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(qidev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dst_nents = src_nents;
+ mapped_dst_nents = src_nents;
+ }
+
+ iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
+ if (dma_mapping_error(qidev, iv_dma)) {
+ dev_err(qidev, "unable to map IV\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
+ dst_sg_idx = qm_sg_ents;
+ if (mapped_dst_nents == 1 &&
+ iv_dma + ivsize == sg_dma_address(req->dst)) {
+ out_contig = true;
+ } else {
+ out_contig = false;
+ qm_sg_ents += 1 + mapped_dst_nents;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_alloc(GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(qidev, "could not allocate extended descriptor\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, GIVENCRYPT, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ sg_table = &edesc->sgt[0];
+ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+ edesc->drv_req.app_ctx = req;
+ edesc->drv_req.cbk = ablkcipher_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
+ if (mapped_src_nents > 1)
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
+
+ if (!out_contig) {
+ dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ dst_sg_idx + 1, 0);
+ }
+
+ edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
+ dev_err(qidev, "unable to map S/G table\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, GIVENCRYPT, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fd_sgt = &edesc->drv_req.fd_sgt[0];
+
+ if (mapped_src_nents > 1)
+ dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
+ 0);
+ else
+ dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
+ req->nbytes, 0);
+
+ if (!out_contig)
+ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+ sizeof(*sg_table), ivsize + req->nbytes,
+ 0);
+ else
+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
+ ivsize + req->nbytes, 0);
+
+ return edesc;
+}
+
+static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
+{
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ int ret;
+
+ if (unlikely(caam_congested))
+ return -EAGAIN;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(req, encrypt);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(ctx->qidev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ return ablkcipher_crypt(req, true);
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ return ablkcipher_crypt(req, false);
+}
+
+static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
+{
+ struct ablkcipher_request *req = &creq->creq;
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ int ret;
+
+ if (unlikely(caam_congested))
+ return -EAGAIN;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_giv_edesc_alloc(creq);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(ctx->qidev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+#define template_ablkcipher template_u.ablkcipher
+struct caam_alg_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ u32 type;
+ union {
+ struct ablkcipher_alg ablkcipher;
+ } template_u;
+ u32 class1_alg_type;
+ u32 class2_alg_type;
+};
+
+static struct caam_alg_template driver_algs[] = {
+ /* ablkcipher descriptor */
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-caam-qi",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-3des-caam-qi",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-caam-qi",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-caam-qi",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "chainiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .name = "rfc3686(ctr(aes))",
+ .driver_name = "rfc3686-ctr-aes-caam-qi",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .name = "xts(aes)",
+ .driver_name = "xts-aes-caam-qi",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = xts_ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ },
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+ /* single-pass ipsec_esp descriptor */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-aes-"
+ "caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-aes-"
+ "caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-aes-"
+ "caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-des-"
+ "caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-desi-"
+ "caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-des-"
+ "caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-des-"
+ "caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+};
+
+struct caam_crypto_alg {
+ struct list_head entry;
+ struct crypto_alg crypto_alg;
+ struct caam_alg_entry caam;
+};
+
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+{
+ struct caam_drv_private *priv;
+
+ /*
+ * distribute tfms across job rings to ensure in-order
+ * crypto request processing per tfm
+ */
+ ctx->jrdev = caam_jr_alloc();
+ if (IS_ERR(ctx->jrdev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->jrdev);
+ }
+
+ ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
+ dev_err(ctx->jrdev, "unable to map key\n");
+ caam_jr_free(ctx->jrdev);
+ return -ENOMEM;
+ }
+
+ /* copy descriptor header template value */
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
+
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ ctx->qidev = priv->qidev;
+
+ spin_lock_init(&ctx->lock);
+ ctx->drv_ctx[ENCRYPT] = NULL;
+ ctx->drv_ctx[DECRYPT] = NULL;
+ ctx->drv_ctx[GIVENCRYPT] = NULL;
+
+ return 0;
+}
+
+static int caam_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+ crypto_alg);
+ struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return caam_init_common(ctx, &caam_alg->caam);
+}
+
+static int caam_aead_init(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+ aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+
+ return caam_init_common(ctx, &caam_alg->caam);
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
+ caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
+ caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
+ caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
+
+ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
+ DMA_TO_DEVICE);
+
+ caam_jr_free(ctx->jrdev);
+}
+
+static void caam_cra_exit(struct crypto_tfm *tfm)
+{
+ caam_exit_common(crypto_tfm_ctx(tfm));
+}
+
+static void caam_aead_exit(struct crypto_aead *tfm)
+{
+ caam_exit_common(crypto_aead_ctx(tfm));
+}
+
+static struct list_head alg_list;
+static void __exit caam_qi_algapi_exit(void)
+{
+ struct caam_crypto_alg *t_alg, *n;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+
+ if (t_alg->registered)
+ crypto_unregister_aead(&t_alg->aead);
+ }
+
+ if (!alg_list.next)
+ return;
+
+ list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
+ crypto_unregister_alg(&t_alg->crypto_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+}
+
+static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
+ *template)
+{
+ struct caam_crypto_alg *t_alg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ if (!t_alg)
+ return ERR_PTR(-ENOMEM);
+
+ alg = &t_alg->crypto_alg;
+
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ alg->cra_module = THIS_MODULE;
+ alg->cra_init = caam_cra_init;
+ alg->cra_exit = caam_cra_exit;
+ alg->cra_priority = CAAM_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_ctxsize = sizeof(struct caam_ctx);
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+ template->type;
+ switch (template->type) {
+ case CRYPTO_ALG_TYPE_GIVCIPHER:
+ alg->cra_type = &crypto_givcipher_type;
+ alg->cra_ablkcipher = template->template_ablkcipher;
+ break;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_ablkcipher = template->template_ablkcipher;
+ break;
+ }
+
+ t_alg->caam.class1_alg_type = template->class1_alg_type;
+ t_alg->caam.class2_alg_type = template->class2_alg_type;
+
+ return t_alg;
+}
+
+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
+{
+ struct aead_alg *alg = &t_alg->aead;
+
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ alg->init = caam_aead_init;
+ alg->exit = caam_aead_exit;
+}
+
+static int __init caam_qi_algapi_init(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ struct caam_drv_private *priv;
+ int i = 0, err = 0;
+ u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+ unsigned int md_limit = SHA512_DIGEST_SIZE;
+ bool registered = false;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ of_node_put(dev_node);
+ if (!pdev)
+ return -ENODEV;
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+
+ /*
+ * If priv is NULL, it's probably because the caam driver wasn't
+ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+ */
+ if (!priv || !priv->qi_present)
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&alg_list);
+
+ /*
+ * Register crypto algorithms the device supports.
+ * First, detect presence and attributes of DES, AES, and MD blocks.
+ */
+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
+ aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
+ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+ /* If MD is present, limit digest size based on LP256 */
+ if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+ md_limit = SHA256_DIGEST_SIZE;
+
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_crypto_alg *t_alg;
+ struct caam_alg_template *alg = driver_algs + i;
+ u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!des_inst &&
+ ((alg_sel == OP_ALG_ALGSEL_3DES) ||
+ (alg_sel == OP_ALG_ALGSEL_DES)))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
+ t_alg = caam_alg_alloc(alg);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(priv->qidev, "%s alg allocation failed\n",
+ alg->driver_name);
+ continue;
+ }
+
+ err = crypto_register_alg(&t_alg->crypto_alg);
+ if (err) {
+ dev_warn(priv->qidev, "%s alg registration failed\n",
+ t_alg->crypto_alg.cra_driver_name);
+ kfree(t_alg);
+ continue;
+ }
+
+ list_add_tail(&t_alg->entry, &alg_list);
+ registered = true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+ u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+ OP_ALG_ALGSEL_MASK;
+ u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+ OP_ALG_ALGSEL_MASK;
+ u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!des_inst &&
+ ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
+ (c1_alg_sel == OP_ALG_ALGSEL_DES)))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
+ /*
+ * Check support for AES algorithms not available
+ * on LP devices.
+ */
+ if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
+ (alg_aai == OP_ALG_AAI_GCM))
+ continue;
+
+ /*
+ * Skip algorithms requiring message digests
+ * if MD or MD size is not supported by device.
+ */
+ if (c2_alg_sel &&
+ (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
+ continue;
+
+ caam_aead_alg_init(t_alg);
+
+ err = crypto_register_aead(&t_alg->aead);
+ if (err) {
+ pr_warn("%s alg registration failed\n",
+ t_alg->aead.base.cra_driver_name);
+ continue;
+ }
+
+ t_alg->registered = true;
+ registered = true;
+ }
+
+ if (registered)
+ dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
+
+ return err;
+}
+
+module_init(caam_qi_algapi_init);
+module_exit(caam_qi_algapi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
+MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 5d7f73d60515..dd353e342c12 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -18,6 +18,10 @@
bool caam_little_end;
EXPORT_SYMBOL(caam_little_end);
+#ifdef CONFIG_CAAM_QI
+#include "qi.h"
+#endif
+
/*
* i.MX targets tend to have clock control subsystems that can
* enable/disable clocking to our device.
@@ -310,6 +314,11 @@ static int caam_remove(struct platform_device *pdev)
/* Remove platform devices under the crypto node */
of_platform_depopulate(ctrldev);
+#ifdef CONFIG_CAAM_QI
+ if (ctrlpriv->qidev)
+ caam_qi_shutdown(ctrlpriv->qidev);
+#endif
+
/* De-initialize RNG state handles initialized by this driver. */
if (ctrlpriv->rng4_sh_init)
deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
@@ -400,23 +409,6 @@ int caam_get_era(void)
}
EXPORT_SYMBOL(caam_get_era);
-#ifdef CONFIG_DEBUG_FS
-static int caam_debugfs_u64_get(void *data, u64 *val)
-{
- *val = caam64_to_cpu(*(u64 *)data);
- return 0;
-}
-
-static int caam_debugfs_u32_get(void *data, u64 *val)
-{
- *val = caam32_to_cpu(*(u32 *)data);
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
-#endif
-
static const struct of_device_id caam_match[] = {
{
.compatible = "fsl,sec-v4.0",
@@ -613,6 +605,18 @@ static int caam_probe(struct platform_device *pdev)
goto iounmap_ctrl;
}
+#ifdef CONFIG_DEBUG_FS
+ /*
+ * FIXME: needs better naming distinction, as some amalgamation of
+ * "caam" and nprop->full_name. The OF name isn't distinctive,
+ * but does separate instances
+ */
+ perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
+
+ ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
+ ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
+#endif
+
ring = 0;
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
@@ -637,6 +641,13 @@ static int caam_probe(struct platform_device *pdev)
);
/* This is all that's required to physically enable QI */
wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
+
+ /* If QMAN driver is present, init CAAM-QI backend */
+#ifdef CONFIG_CAAM_QI
+ ret = caam_qi_init(pdev);
+ if (ret)
+ dev_err(dev, "caam qi i/f init failed: %d\n", ret);
+#endif
}
/* If no QI and no rings specified, quit and go home */
@@ -724,17 +735,6 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->total_jobrs, ctrlpriv->qi_present);
#ifdef CONFIG_DEBUG_FS
- /*
- * FIXME: needs better naming distinction, as some amalgamation of
- * "caam" and nprop->full_name. The OF name isn't distinctive,
- * but does separate instances
- */
- perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
-
- ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
- ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
-
- /* Controller-level - performance monitor counters */
ctrlpriv->ctl_rq_dequeued =
debugfs_create_file("rq_dequeued",
@@ -817,6 +817,9 @@ static int caam_probe(struct platform_device *pdev)
return 0;
caam_remove:
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(ctrlpriv->dfs_root);
+#endif
caam_remove(pdev);
return ret;
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index b9c8d98ef826..d8e83ca104e0 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -4,6 +4,9 @@
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
+#ifndef DESC_CONSTR_H
+#define DESC_CONSTR_H
+
#include "desc.h"
#include "regs.h"
@@ -491,3 +494,5 @@ static inline int desc_inline_query(unsigned int sd_base_len,
return (rem_bytes >= 0) ? 0 : -1;
}
+
+#endif /* DESC_CONSTR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index dbed8baeebe5..85b6c5835b8f 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -66,6 +66,9 @@ struct caam_drv_private_jr {
struct caam_drv_private {
struct device *dev;
+#ifdef CONFIG_CAAM_QI
+ struct device *qidev;
+#endif
struct platform_device *pdev;
/* Physical-presence section */
@@ -109,9 +112,30 @@ struct caam_drv_private {
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
+#ifdef CONFIG_CAAM_QI
+ struct dentry *qi_congested;
+#endif
#endif
};
void caam_jr_algapi_init(struct device *dev);
void caam_jr_algapi_remove(struct device *dev);
+
+#ifdef CONFIG_DEBUG_FS
+static int caam_debugfs_u64_get(void *data, u64 *val)
+{
+ *val = caam64_to_cpu(*(u64 *)data);
+ return 0;
+}
+
+static int caam_debugfs_u32_get(void *data, u64 *val)
+{
+ *val = caam32_to_cpu(*(u32 *)data);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
+#endif
+
#endif /* INTERN_H */
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
new file mode 100644
index 000000000000..1990ed460c46
--- /dev/null
+++ b/drivers/crypto/caam/qi.c
@@ -0,0 +1,805 @@
+/*
+ * CAAM/SEC 4.x QI transport/backend driver
+ * Queue Interface backend functionality
+ *
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ */
+
+#include <linux/cpumask.h>
+#include <linux/kthread.h>
+#include <soc/fsl/qman.h>
+
+#include "regs.h"
+#include "qi.h"
+#include "desc.h"
+#include "intern.h"
+#include "desc_constr.h"
+
+#define PREHDR_RSLS_SHIFT 31
+
+/*
+ * Use a reasonable backlog of frames (per CPU) as congestion threshold,
+ * so that resources used by the in-flight buffers do not become a memory hog.
+ */
+#define MAX_RSP_FQ_BACKLOG_PER_CPU 256
+
+/* Length of a single buffer in the QI driver memory cache */
+#define CAAM_QI_MEMCACHE_SIZE 512
+
+#define CAAM_QI_ENQUEUE_RETRIES 10000
+
+#define CAAM_NAPI_WEIGHT 63
+
+/*
+ * caam_napi - struct holding CAAM NAPI-related params
+ * @irqtask: IRQ task for QI backend
+ * @p: QMan portal
+ */
+struct caam_napi {
+ struct napi_struct irqtask;
+ struct qman_portal *p;
+};
+
+/*
+ * caam_qi_pcpu_priv - percpu private data structure to main list of pending
+ * responses expected on each cpu.
+ * @caam_napi: CAAM NAPI params
+ * @net_dev: netdev used by NAPI
+ * @rsp_fq: response FQ from CAAM
+ */
+struct caam_qi_pcpu_priv {
+ struct caam_napi caam_napi;
+ struct net_device net_dev;
+ struct qman_fq *rsp_fq;
+} ____cacheline_aligned;
+
+static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
+
+/*
+ * caam_qi_priv - CAAM QI backend private params
+ * @cgr: QMan congestion group
+ * @qi_pdev: platform device for QI backend
+ */
+struct caam_qi_priv {
+ struct qman_cgr cgr;
+ struct platform_device *qi_pdev;
+};
+
+static struct caam_qi_priv qipriv ____cacheline_aligned;
+
+/*
+ * This is written by only one core - the one that initialized the CGR - and
+ * read by multiple cores (all the others).
+ */
+bool caam_congested __read_mostly;
+EXPORT_SYMBOL(caam_congested);
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * This is a counter for the number of times the congestion group (where all
+ * the request and response queueus are) reached congestion. Incremented
+ * each time the congestion callback is called with congested == true.
+ */
+static u64 times_congested;
+#endif
+
+/*
+ * CPU from where the module initialised. This is required because QMan driver
+ * requires CGRs to be removed from same CPU from where they were originally
+ * allocated.
+ */
+static int mod_init_cpu;
+
+/*
+ * This is a a cache of buffers, from which the users of CAAM QI driver
+ * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
+ * doing malloc on the hotpath.
+ * NOTE: A more elegant solution would be to have some headroom in the frames
+ * being processed. This could be added by the dpaa-ethernet driver.
+ * This would pose a problem for userspace application processing which
+ * cannot know of this limitation. So for now, this will work.
+ * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
+ */
+static struct kmem_cache *qi_cache;
+
+int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
+{
+ struct qm_fd fd;
+ dma_addr_t addr;
+ int ret;
+ int num_retries = 0;
+
+ qm_fd_clear_fd(&fd);
+ qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
+
+ addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(qidev, addr)) {
+ dev_err(qidev, "DMA mapping error for QI enqueue request\n");
+ return -EIO;
+ }
+ qm_fd_addr_set64(&fd, addr);
+
+ do {
+ ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
+ if (likely(!ret))
+ return 0;
+
+ if (ret != -EBUSY)
+ break;
+ num_retries++;
+ } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
+
+ dev_err(qidev, "qman_enqueue failed: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(caam_qi_enqueue);
+
+static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
+ const union qm_mr_entry *msg)
+{
+ const struct qm_fd *fd;
+ struct caam_drv_req *drv_req;
+ struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+
+ fd = &msg->ern.fd;
+
+ if (qm_fd_get_format(fd) != qm_fd_compound) {
+ dev_err(qidev, "Non-compound FD from CAAM\n");
+ return;
+ }
+
+ drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
+ if (!drv_req) {
+ dev_err(qidev,
+ "Can't find original request for CAAM response\n");
+ return;
+ }
+
+ dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
+ sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
+
+ drv_req->cbk(drv_req, -EIO);
+}
+
+static struct qman_fq *create_caam_req_fq(struct device *qidev,
+ struct qman_fq *rsp_fq,
+ dma_addr_t hwdesc,
+ int fq_sched_flag)
+{
+ int ret;
+ struct qman_fq *req_fq;
+ struct qm_mcc_initfq opts;
+
+ req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
+ if (!req_fq)
+ return ERR_PTR(-ENOMEM);
+
+ req_fq->cb.ern = caam_fq_ern_cb;
+ req_fq->cb.fqs = NULL;
+
+ ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
+ QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
+ if (ret) {
+ dev_err(qidev, "Failed to create session req FQ\n");
+ goto create_req_fq_fail;
+ }
+
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
+ QM_INITFQ_WE_CONTEXTB |
+ QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
+ opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
+ qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
+ opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
+ qm_fqd_context_a_set64(&opts.fqd, hwdesc);
+ opts.fqd.cgid = qipriv.cgr.cgrid;
+
+ ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
+ if (ret) {
+ dev_err(qidev, "Failed to init session req FQ\n");
+ goto init_req_fq_fail;
+ }
+
+ dev_info(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
+ smp_processor_id());
+ return req_fq;
+
+init_req_fq_fail:
+ qman_destroy_fq(req_fq);
+create_req_fq_fail:
+ kfree(req_fq);
+ return ERR_PTR(ret);
+}
+
+static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
+{
+ int ret;
+
+ ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
+ QMAN_VOLATILE_FLAG_FINISH,
+ QM_VDQCR_PRECEDENCE_VDQCR |
+ QM_VDQCR_NUMFRAMES_TILLEMPTY);
+ if (ret) {
+ dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
+ return ret;
+ }
+
+ do {
+ struct qman_portal *p;
+
+ p = qman_get_affine_portal(smp_processor_id());
+ qman_p_poll_dqrr(p, 16);
+ } while (fq->flags & QMAN_FQ_STATE_NE);
+
+ return 0;
+}
+
+static int kill_fq(struct device *qidev, struct qman_fq *fq)
+{
+ u32 flags;
+ int ret;
+
+ ret = qman_retire_fq(fq, &flags);
+ if (ret < 0) {
+ dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
+ return ret;
+ }
+
+ if (!ret)
+ goto empty_fq;
+
+ /* Async FQ retirement condition */
+ if (ret == 1) {
+ /* Retry till FQ gets in retired state */
+ do {
+ msleep(20);
+ } while (fq->state != qman_fq_state_retired);
+
+ WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
+ WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
+ }
+
+empty_fq:
+ if (fq->flags & QMAN_FQ_STATE_NE) {
+ ret = empty_retired_fq(qidev, fq);
+ if (ret) {
+ dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
+ fq->fqid);
+ return ret;
+ }
+ }
+
+ ret = qman_oos_fq(fq);
+ if (ret)
+ dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
+
+ qman_destroy_fq(fq);
+
+ return ret;
+}
+
+static int empty_caam_fq(struct qman_fq *fq)
+{
+ int ret;
+ struct qm_mcr_queryfq_np np;
+
+ /* Wait till the older CAAM FQ get empty */
+ do {
+ ret = qman_query_fq_np(fq, &np);
+ if (ret)
+ return ret;
+
+ if (!qm_mcr_np_get(&np, frm_cnt))
+ break;
+
+ msleep(20);
+ } while (1);
+
+ /*
+ * Give extra time for pending jobs from this FQ in holding tanks
+ * to get processed
+ */
+ msleep(20);
+ return 0;
+}
+
+int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
+{
+ int ret;
+ u32 num_words;
+ struct qman_fq *new_fq, *old_fq;
+ struct device *qidev = drv_ctx->qidev;
+
+ num_words = desc_len(sh_desc);
+ if (num_words > MAX_SDLEN) {
+ dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
+ return -EINVAL;
+ }
+
+ /* Note down older req FQ */
+ old_fq = drv_ctx->req_fq;
+
+ /* Create a new req FQ in parked state */
+ new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
+ drv_ctx->context_a, 0);
+ if (unlikely(IS_ERR_OR_NULL(new_fq))) {
+ dev_err(qidev, "FQ allocation for shdesc update failed\n");
+ return PTR_ERR(new_fq);
+ }
+
+ /* Hook up new FQ to context so that new requests keep queuing */
+ drv_ctx->req_fq = new_fq;
+
+ /* Empty and remove the older FQ */
+ ret = empty_caam_fq(old_fq);
+ if (ret) {
+ dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
+
+ /* We can revert to older FQ */
+ drv_ctx->req_fq = old_fq;
+
+ if (kill_fq(qidev, new_fq))
+ dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
+ new_fq->fqid);
+
+ return ret;
+ }
+
+ /*
+ * Re-initialise pre-header. Set RSLS and SDLEN.
+ * Update the shared descriptor for driver context.
+ */
+ drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
+ num_words);
+ memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
+ dma_sync_single_for_device(qidev, drv_ctx->context_a,
+ sizeof(drv_ctx->sh_desc) +
+ sizeof(drv_ctx->prehdr),
+ DMA_BIDIRECTIONAL);
+
+ /* Put the new FQ in scheduled state */
+ ret = qman_schedule_fq(new_fq);
+ if (ret) {
+ dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
+
+ /*
+ * We can kill new FQ and revert to old FQ.
+ * Since the desc is already modified, it is success case
+ */
+
+ drv_ctx->req_fq = old_fq;
+
+ if (kill_fq(qidev, new_fq))
+ dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
+ new_fq->fqid);
+ } else if (kill_fq(qidev, old_fq)) {
+ dev_warn(qidev, "Old CAAM FQ: %u kill failed\n", old_fq->fqid);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(caam_drv_ctx_update);
+
+struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
+ int *cpu,
+ u32 *sh_desc)
+{
+ size_t size;
+ u32 num_words;
+ dma_addr_t hwdesc;
+ struct caam_drv_ctx *drv_ctx;
+ const cpumask_t *cpus = qman_affine_cpus();
+ static DEFINE_PER_CPU(int, last_cpu);
+
+ num_words = desc_len(sh_desc);
+ if (num_words > MAX_SDLEN) {
+ dev_err(qidev, "Invalid descriptor len: %d words\n",
+ num_words);
+ return ERR_PTR(-EINVAL);
+ }
+
+ drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
+ if (!drv_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
+ * and dma-map them.
+ */
+ drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
+ num_words);
+ memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
+ size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
+ hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(qidev, hwdesc)) {
+ dev_err(qidev, "DMA map error for preheader + shdesc\n");
+ kfree(drv_ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+ drv_ctx->context_a = hwdesc;
+
+ /* If given CPU does not own the portal, choose another one that does */
+ if (!cpumask_test_cpu(*cpu, cpus)) {
+ int *pcpu = &get_cpu_var(last_cpu);
+
+ *pcpu = cpumask_next(*pcpu, cpus);
+ if (*pcpu >= nr_cpu_ids)
+ *pcpu = cpumask_first(cpus);
+ *cpu = *pcpu;
+
+ put_cpu_var(last_cpu);
+ }
+ drv_ctx->cpu = *cpu;
+
+ /* Find response FQ hooked with this CPU */
+ drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
+
+ /* Attach request FQ */
+ drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
+ QMAN_INITFQ_FLAG_SCHED);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
+ dev_err(qidev, "create_caam_req_fq failed\n");
+ dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
+ kfree(drv_ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drv_ctx->qidev = qidev;
+ return drv_ctx;
+}
+EXPORT_SYMBOL(caam_drv_ctx_init);
+
+void *qi_cache_alloc(gfp_t flags)
+{
+ return kmem_cache_alloc(qi_cache, flags);
+}
+EXPORT_SYMBOL(qi_cache_alloc);
+
+void qi_cache_free(void *obj)
+{
+ kmem_cache_free(qi_cache, obj);
+}
+EXPORT_SYMBOL(qi_cache_free);
+
+static int caam_qi_poll(struct napi_struct *napi, int budget)
+{
+ struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
+
+ int cleaned = qman_p_poll_dqrr(np->p, budget);
+
+ if (cleaned < budget) {
+ napi_complete(napi);
+ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
+ }
+
+ return cleaned;
+}
+
+void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
+{
+ if (IS_ERR_OR_NULL(drv_ctx))
+ return;
+
+ /* Remove request FQ */
+ if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
+ dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
+
+ dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
+ sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
+ DMA_BIDIRECTIONAL);
+ kfree(drv_ctx);
+}
+EXPORT_SYMBOL(caam_drv_ctx_rel);
+
+int caam_qi_shutdown(struct device *qidev)
+{
+ int i, ret;
+ struct caam_qi_priv *priv = dev_get_drvdata(qidev);
+ const cpumask_t *cpus = qman_affine_cpus();
+ struct cpumask old_cpumask = current->cpus_allowed;
+
+ for_each_cpu(i, cpus) {
+ struct napi_struct *irqtask;
+
+ irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
+ napi_disable(irqtask);
+ netif_napi_del(irqtask);
+
+ if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
+ dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
+ kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
+ }
+
+ /*
+ * QMan driver requires CGRs to be deleted from same CPU from where they
+ * were instantiated. Hence we get the module removal execute from the
+ * same CPU from where it was originally inserted.
+ */
+ set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
+
+ ret = qman_delete_cgr(&priv->cgr);
+ if (ret)
+ dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
+ else
+ qman_release_cgrid(priv->cgr.cgrid);
+
+ kmem_cache_destroy(qi_cache);
+
+ /* Now that we're done with the CGRs, restore the cpus allowed mask */
+ set_cpus_allowed_ptr(current, &old_cpumask);
+
+ platform_device_unregister(priv->qi_pdev);
+ return ret;
+}
+
+static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
+{
+ caam_congested = congested;
+
+ if (congested) {
+#ifdef CONFIG_DEBUG_FS
+ times_congested++;
+#endif
+ pr_debug_ratelimited("CAAM entered congestion\n");
+
+ } else {
+ pr_debug_ratelimited("CAAM exited congestion\n");
+ }
+}
+
+static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
+{
+ /*
+ * In case of threaded ISR, for RT kernels in_irq() does not return
+ * appropriate value, so use in_serving_softirq to distinguish between
+ * softirq and irq contexts.
+ */
+ if (unlikely(in_irq() || !in_serving_softirq())) {
+ /* Disable QMan IRQ source and invoke NAPI */
+ qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
+ np->p = p;
+ napi_schedule(&np->irqtask);
+ return 1;
+ }
+ return 0;
+}
+
+static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
+ struct qman_fq *rsp_fq,
+ const struct qm_dqrr_entry *dqrr)
+{
+ struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
+ struct caam_drv_req *drv_req;
+ const struct qm_fd *fd;
+ struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+ u32 status;
+
+ if (caam_qi_napi_schedule(p, caam_napi))
+ return qman_cb_dqrr_stop;
+
+ fd = &dqrr->fd;
+ status = be32_to_cpu(fd->status);
+ if (unlikely(status))
+ dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
+
+ if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
+ dev_err(qidev, "Non-compound FD from CAAM\n");
+ return qman_cb_dqrr_consume;
+ }
+
+ drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
+ if (unlikely(!drv_req)) {
+ dev_err(qidev,
+ "Can't find original request for caam response\n");
+ return qman_cb_dqrr_consume;
+ }
+
+ dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
+ sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
+
+ drv_req->cbk(drv_req, status);
+ return qman_cb_dqrr_consume;
+}
+
+static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
+{
+ struct qm_mcc_initfq opts;
+ struct qman_fq *fq;
+ int ret;
+
+ fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
+ if (!fq)
+ return -ENOMEM;
+
+ fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
+
+ ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
+ QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
+ if (ret) {
+ dev_err(qidev, "Rsp FQ create failed\n");
+ kfree(fq);
+ return -ENODEV;
+ }
+
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
+ QM_INITFQ_WE_CONTEXTB |
+ QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
+ opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
+ QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
+ qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
+ opts.fqd.cgid = qipriv.cgr.cgrid;
+ opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
+ QM_STASHING_EXCL_DATA;
+ qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
+
+ ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
+ if (ret) {
+ dev_err(qidev, "Rsp FQ init failed\n");
+ kfree(fq);
+ return -ENODEV;
+ }
+
+ per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
+
+ dev_info(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
+ return 0;
+}
+
+static int init_cgr(struct device *qidev)
+{
+ int ret;
+ struct qm_mcc_initcgr opts;
+ const u64 cpus = *(u64 *)qman_affine_cpus();
+ const int num_cpus = hweight64(cpus);
+ const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
+
+ ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
+ if (ret) {
+ dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
+ return ret;
+ }
+
+ qipriv.cgr.cb = cgr_cb;
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
+ QM_CGR_WE_MODE);
+ opts.cgr.cscn_en = QM_CGR_EN;
+ opts.cgr.mode = QMAN_CGR_MODE_FRAME;
+ qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
+
+ ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
+ if (ret) {
+ dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
+ qipriv.cgr.cgrid);
+ return ret;
+ }
+
+ dev_info(qidev, "Congestion threshold set to %llu\n", val);
+ return 0;
+}
+
+static int alloc_rsp_fqs(struct device *qidev)
+{
+ int ret, i;
+ const cpumask_t *cpus = qman_affine_cpus();
+
+ /*Now create response FQs*/
+ for_each_cpu(i, cpus) {
+ ret = alloc_rsp_fq_cpu(qidev, i);
+ if (ret) {
+ dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void free_rsp_fqs(void)
+{
+ int i;
+ const cpumask_t *cpus = qman_affine_cpus();
+
+ for_each_cpu(i, cpus)
+ kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
+}
+
+int caam_qi_init(struct platform_device *caam_pdev)
+{
+ int err, i;
+ struct platform_device *qi_pdev;
+ struct device *ctrldev = &caam_pdev->dev, *qidev;
+ struct caam_drv_private *ctrlpriv;
+ const cpumask_t *cpus = qman_affine_cpus();
+ struct cpumask old_cpumask = current->cpus_allowed;
+ static struct platform_device_info qi_pdev_info = {
+ .name = "caam_qi",
+ .id = PLATFORM_DEVID_NONE
+ };
+
+ /*
+ * QMAN requires CGRs to be removed from same CPU+portal from where it
+ * was originally allocated. Hence we need to note down the
+ * initialisation CPU and use the same CPU for module exit.
+ * We select the first CPU to from the list of portal owning CPUs.
+ * Then we pin module init to this CPU.
+ */
+ mod_init_cpu = cpumask_first(cpus);
+ set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
+
+ qi_pdev_info.parent = ctrldev;
+ qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
+ qi_pdev = platform_device_register_full(&qi_pdev_info);
+ if (IS_ERR(qi_pdev))
+ return PTR_ERR(qi_pdev);
+
+ ctrlpriv = dev_get_drvdata(ctrldev);
+ qidev = &qi_pdev->dev;
+
+ qipriv.qi_pdev = qi_pdev;
+ dev_set_drvdata(qidev, &qipriv);
+
+ /* Initialize the congestion detection */
+ err = init_cgr(qidev);
+ if (err) {
+ dev_err(qidev, "CGR initialization failed: %d\n", err);
+ platform_device_unregister(qi_pdev);
+ return err;
+ }
+
+ /* Initialise response FQs */
+ err = alloc_rsp_fqs(qidev);
+ if (err) {
+ dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
+ free_rsp_fqs();
+ platform_device_unregister(qi_pdev);
+ return err;
+ }
+
+ /*
+ * Enable the NAPI contexts on each of the core which has an affine
+ * portal.
+ */
+ for_each_cpu(i, cpus) {
+ struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
+ struct caam_napi *caam_napi = &priv->caam_napi;
+ struct napi_struct *irqtask = &caam_napi->irqtask;
+ struct net_device *net_dev = &priv->net_dev;
+
+ net_dev->dev = *qidev;
+ INIT_LIST_HEAD(&net_dev->napi_list);
+
+ netif_napi_add(net_dev, irqtask, caam_qi_poll,
+ CAAM_NAPI_WEIGHT);
+
+ napi_enable(irqtask);
+ }
+
+ /* Hook up QI device to parent controlling caam device */
+ ctrlpriv->qidev = qidev;
+
+ qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
+ SLAB_CACHE_DMA, NULL);
+ if (!qi_cache) {
+ dev_err(qidev, "Can't allocate CAAM cache\n");
+ free_rsp_fqs();
+ platform_device_unregister(qi_pdev);
+ return -ENOMEM;
+ }
+
+ /* Done with the CGRs; restore the cpus allowed mask */
+ set_cpus_allowed_ptr(current, &old_cpumask);
+#ifdef CONFIG_DEBUG_FS
+ ctrlpriv->qi_congested = debugfs_create_file("qi_congested", 0444,
+ ctrlpriv->ctl,
+ &times_congested,
+ &caam_fops_u64_ro);
+#endif
+ dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
+ return 0;
+}
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
new file mode 100644
index 000000000000..33b0433f5f22
--- /dev/null
+++ b/drivers/crypto/caam/qi.h
@@ -0,0 +1,201 @@
+/*
+ * Public definitions for the CAAM/QI (Queue Interface) backend.
+ *
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ */
+
+#ifndef __QI_H__
+#define __QI_H__
+
+#include <soc/fsl/qman.h>
+#include "compat.h"
+#include "desc.h"
+#include "desc_constr.h"
+
+/*
+ * CAAM hardware constructs a job descriptor which points to a shared descriptor
+ * (as pointed by context_a of to-CAAM FQ).
+ * When the job descriptor is executed by DECO, the whole job descriptor
+ * together with shared descriptor gets loaded in DECO buffer, which is
+ * 64 words (each 32-bit) long.
+ *
+ * The job descriptor constructed by CAAM hardware has the following layout:
+ *
+ * HEADER (1 word)
+ * Shdesc ptr (1 or 2 words)
+ * SEQ_OUT_PTR (1 word)
+ * Out ptr (1 or 2 words)
+ * Out length (1 word)
+ * SEQ_IN_PTR (1 word)
+ * In ptr (1 or 2 words)
+ * In length (1 word)
+ *
+ * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
+ *
+ * Apart from shdesc contents, the total number of words that get loaded in DECO
+ * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
+ * storing shared descriptor.
+ */
+#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
+
+extern bool caam_congested __read_mostly;
+
+/*
+ * This is the request structure the driver application should fill while
+ * submitting a job to driver.
+ */
+struct caam_drv_req;
+
+/*
+ * caam_qi_cbk - application's callback function invoked by the driver when the
+ * request has been successfully processed.
+ * @drv_req: original request that was submitted
+ * @status: completion status of request (0 - success, non-zero - error code)
+ */
+typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
+
+enum optype {
+ ENCRYPT,
+ DECRYPT,
+ GIVENCRYPT,
+ NUM_OP
+};
+
+/**
+ * caam_drv_ctx - CAAM/QI backend driver context
+ *
+ * The jobs are processed by the driver against a driver context.
+ * With every cryptographic context, a driver context is attached.
+ * The driver context contains data for private use by driver.
+ * For the applications, this is an opaque structure.
+ *
+ * @prehdr: preheader placed before shrd desc
+ * @sh_desc: shared descriptor
+ * @context_a: shared descriptor dma address
+ * @req_fq: to-CAAM request frame queue
+ * @rsp_fq: from-CAAM response frame queue
+ * @cpu: cpu on which to receive CAAM response
+ * @op_type: operation type
+ * @qidev: device pointer for CAAM/QI backend
+ */
+struct caam_drv_ctx {
+ u32 prehdr[2];
+ u32 sh_desc[MAX_SDLEN];
+ dma_addr_t context_a;
+ struct qman_fq *req_fq;
+ struct qman_fq *rsp_fq;
+ int cpu;
+ enum optype op_type;
+ struct device *qidev;
+} ____cacheline_aligned;
+
+/**
+ * caam_drv_req - The request structure the driver application should fill while
+ * submitting a job to driver.
+ * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
+ * buffers.
+ * @cbk: callback function to invoke when job is completed
+ * @app_ctx: arbitrary context attached with request by the application
+ *
+ * The fields mentioned below should not be used by application.
+ * These are for private use by driver.
+ *
+ * @hdr__: linked list header to maintain list of outstanding requests to CAAM
+ * @hwaddr: DMA address for the S/G table.
+ */
+struct caam_drv_req {
+ struct qm_sg_entry fd_sgt[2];
+ struct caam_drv_ctx *drv_ctx;
+ caam_qi_cbk cbk;
+ void *app_ctx;
+} ____cacheline_aligned;
+
+/**
+ * caam_drv_ctx_init - Initialise a CAAM/QI driver context
+ *
+ * A CAAM/QI driver context must be attached with each cryptographic context.
+ * This function allocates memory for CAAM/QI context and returns a handle to
+ * the application. This handle must be submitted along with each enqueue
+ * request to the driver by the application.
+ *
+ * @cpu: CPU where the application prefers to the driver to receive CAAM
+ * responses. The request completion callback would be issued from this
+ * CPU.
+ * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
+ * context.
+ *
+ * Returns a driver context on success or negative error code on failure.
+ */
+struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
+ u32 *sh_desc);
+
+/**
+ * caam_qi_enqueue - Submit a request to QI backend driver.
+ *
+ * The request structure must be properly filled as described above.
+ *
+ * @qidev: device pointer for QI backend
+ * @req: CAAM QI request structure
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
+
+/**
+ * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
+ * or too many CAAM responses are pending to be processed.
+ * @drv_ctx: driver context for which job is to be submitted
+ *
+ * Returns caam congestion status 'true/false'
+ */
+bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
+
+/**
+ * caam_drv_ctx_update - Update QI driver context
+ *
+ * Invoked when shared descriptor is required to be change in driver context.
+ *
+ * @drv_ctx: driver context to be updated
+ * @sh_desc: new shared descriptor pointer to be updated in QI driver context
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
+
+/**
+ * caam_drv_ctx_rel - Release a QI driver context
+ * @drv_ctx: context to be released
+ */
+void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
+
+int caam_qi_init(struct platform_device *pdev);
+int caam_qi_shutdown(struct device *dev);
+
+/**
+ * qi_cache_alloc - Allocate buffers from CAAM-QI cache
+ *
+ * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
+ * to be allocated on the hotpath. Instead of using malloc, one can use the
+ * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
+ * will have a size of 256B, which is sufficient for hosting 16 SG entries.
+ *
+ * @flags: flags that would be used for the equivalent malloc(..) call
+ *
+ * Returns a pointer to a retrieved buffer on success or NULL on failure.
+ */
+void *qi_cache_alloc(gfp_t flags);
+
+/**
+ * qi_cache_free - Frees buffers allocated from CAAM-QI cache
+ *
+ * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
+ * the buffer previously allocated by a qi_cache_alloc call.
+ * No checking is being done, the call is a passthrough call to
+ * kmem_cache_free(...)
+ *
+ * @obj: object previously allocated using qi_cache_alloc()
+ */
+void qi_cache_free(void *obj);
+
+#endif /* __QI_H__ */
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
new file mode 100644
index 000000000000..d000b4df745f
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SG_SW_QM_H
+#define __SG_SW_QM_H
+
+#include <soc/fsl/qman.h>
+#include "regs.h"
+
+static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
+ u16 offset)
+{
+ qm_sg_entry_set64(qm_sg_ptr, dma);
+ qm_sg_ptr->__reserved2 = 0;
+ qm_sg_ptr->bpid = 0;
+ qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
+}
+
+static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len, u16 offset)
+{
+ __dma_to_qm_sg(qm_sg_ptr, dma, offset);
+ qm_sg_entry_set_len(qm_sg_ptr, len);
+}
+
+static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len, u16 offset)
+{
+ __dma_to_qm_sg(qm_sg_ptr, dma, offset);
+ qm_sg_entry_set_f(qm_sg_ptr, len);
+}
+
+static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len, u16 offset)
+{
+ __dma_to_qm_sg(qm_sg_ptr, dma, offset);
+ qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
+}
+
+static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len,
+ u16 offset)
+{
+ __dma_to_qm_sg(qm_sg_ptr, dma, offset);
+ qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
+ (len & QM_SG_LEN_MASK));
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static inline struct qm_sg_entry *
+sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+ struct qm_sg_entry *qm_sg_ptr, u16 offset)
+{
+ while (sg_count && sg) {
+ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
+ sg_dma_len(sg), offset);
+ qm_sg_ptr++;
+ sg = sg_next(sg);
+ sg_count--;
+ }
+ return qm_sg_ptr - 1;
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+ struct qm_sg_entry *qm_sg_ptr, u16 offset)
+{
+ qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+ qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
+}
+
+#endif /* __SG_SW_QM_H */
diff --git a/drivers/crypto/cavium/Makefile b/drivers/crypto/cavium/Makefile
new file mode 100644
index 000000000000..641268b784be
--- /dev/null
+++ b/drivers/crypto/cavium/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for Cavium crypto device drivers
+#
+obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += zip/
diff --git a/drivers/crypto/cavium/zip/Makefile b/drivers/crypto/cavium/zip/Makefile
new file mode 100644
index 000000000000..b2f3baaff757
--- /dev/null
+++ b/drivers/crypto/cavium/zip/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for Cavium's ZIP Driver.
+#
+
+obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += thunderx_zip.o
+thunderx_zip-y := zip_main.o \
+ zip_device.o \
+ zip_crypto.o \
+ zip_mem.o \
+ zip_deflate.o \
+ zip_inflate.o
diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
new file mode 100644
index 000000000000..dc451e0a43c5
--- /dev/null
+++ b/drivers/crypto/cavium/zip/common.h
@@ -0,0 +1,202 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __COMMON_H__
+#define __COMMON_H__
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+/* Device specific zlib function definitions */
+#include "zip_device.h"
+
+/* ZIP device definitions */
+#include "zip_main.h"
+
+/* ZIP memory allocation/deallocation related definitions */
+#include "zip_mem.h"
+
+/* Device specific structure definitions */
+#include "zip_regs.h"
+
+#define ZIP_ERROR -1
+
+#define ZIP_FLUSH_FINISH 4
+
+#define RAW_FORMAT 0 /* for rawpipe */
+#define ZLIB_FORMAT 1 /* for zpipe */
+#define GZIP_FORMAT 2 /* for gzpipe */
+#define LZS_FORMAT 3 /* for lzspipe */
+
+/* Max number of ZIP devices supported */
+#define MAX_ZIP_DEVICES 2
+
+/* Configures the number of zip queues to be used */
+#define ZIP_NUM_QUEUES 2
+
+#define DYNAMIC_STOP_EXCESS 1024
+
+/* Maximum buffer sizes in direct mode */
+#define MAX_INPUT_BUFFER_SIZE (64 * 1024)
+#define MAX_OUTPUT_BUFFER_SIZE (64 * 1024)
+
+/**
+ * struct zip_operation - common data structure for comp and decomp operations
+ * @input: Next input byte is read from here
+ * @output: Next output byte written here
+ * @ctx_addr: Inflate context buffer address
+ * @history: Pointer to the history buffer
+ * @input_len: Number of bytes available at next_in
+ * @input_total_len: Total number of input bytes read
+ * @output_len: Remaining free space at next_out
+ * @output_total_len: Total number of bytes output so far
+ * @csum: Checksum value of the uncompressed data
+ * @flush: Flush flag
+ * @format: Format (depends on stream's wrap)
+ * @speed: Speed depends on stream's level
+ * @ccode: Compression code ( stream's strategy)
+ * @lzs_flag: Flag for LZS support
+ * @begin_file: Beginning of file indication for inflate
+ * @history_len: Size of the history data
+ * @end_file: Ending of the file indication for inflate
+ * @compcode: Completion status of the ZIP invocation
+ * @bytes_read: Input bytes read in current instruction
+ * @bits_processed: Total bits processed for entire file
+ * @sizeofptr: To distinguish between ILP32 and LP64
+ * @sizeofzops: Optional just for padding
+ *
+ * This structure is used to maintain the required meta data for the
+ * comp and decomp operations.
+ */
+struct zip_operation {
+ u8 *input;
+ u8 *output;
+ u64 ctx_addr;
+ u64 history;
+
+ u32 input_len;
+ u32 input_total_len;
+
+ u32 output_len;
+ u32 output_total_len;
+
+ u32 csum;
+ u32 flush;
+
+ u32 format;
+ u32 speed;
+ u32 ccode;
+ u32 lzs_flag;
+
+ u32 begin_file;
+ u32 history_len;
+
+ u32 end_file;
+ u32 compcode;
+ u32 bytes_read;
+ u32 bits_processed;
+
+ u32 sizeofptr;
+ u32 sizeofzops;
+};
+
+/* error messages */
+#define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
+ fmt "\n", __func__, __LINE__, ## args)
+
+#ifdef MSG_ENABLE
+/* Enable all messages */
+#define zip_msg(fmt, args...) pr_info("ZIP_MSG:" fmt "\n", ## args)
+#else
+#define zip_msg(fmt, args...)
+#endif
+
+#if defined(ZIP_DEBUG_ENABLE) && defined(MSG_ENABLE)
+
+#ifdef DEBUG_LEVEL
+
+#define FILE_NAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : \
+ strrchr(__FILE__, '\\') ? strrchr(__FILE__, '\\') + 1 : __FILE__)
+
+#if DEBUG_LEVEL >= 4
+
+#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
+ fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
+
+#elif DEBUG_LEVEL >= 3
+
+#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
+ fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
+
+#elif DEBUG_LEVEL >= 2
+
+#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s() : %d: " \
+ fmt "\n", __func__, __LINE__, ## args)
+
+#else
+
+#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
+
+#endif /* DEBUG LEVEL >=4 */
+
+#else
+
+#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
+
+#endif /* DEBUG_LEVEL */
+#else
+
+#define zip_dbg(fmt, args...)
+
+#endif /* ZIP_DEBUG_ENABLE && MSG_ENABLE*/
+
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
new file mode 100644
index 000000000000..8df4d26cf9d4
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -0,0 +1,313 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include "zip_crypto.h"
+
+static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
+ int lzs_flag)
+{
+ zip_ops->flush = ZIP_FLUSH_FINISH;
+
+ /* equivalent to level 6 of opensource zlib */
+ zip_ops->speed = 1;
+
+ if (!lzs_flag) {
+ zip_ops->ccode = 0; /* Auto Huffman */
+ zip_ops->lzs_flag = 0;
+ zip_ops->format = ZLIB_FORMAT;
+ } else {
+ zip_ops->ccode = 3; /* LZS Encoding */
+ zip_ops->lzs_flag = 1;
+ zip_ops->format = LZS_FORMAT;
+ }
+ zip_ops->begin_file = 1;
+ zip_ops->history_len = 0;
+ zip_ops->end_file = 1;
+ zip_ops->compcode = 0;
+ zip_ops->csum = 1; /* Adler checksum desired */
+}
+
+int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
+{
+ struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
+ struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp;
+
+ zip_static_init_zip_ops(comp_ctx, lzs_flag);
+ zip_static_init_zip_ops(decomp_ctx, lzs_flag);
+
+ comp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
+ if (!comp_ctx->input)
+ return -ENOMEM;
+
+ comp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
+ if (!comp_ctx->output)
+ goto err_comp_input;
+
+ decomp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
+ if (!decomp_ctx->input)
+ goto err_comp_output;
+
+ decomp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
+ if (!decomp_ctx->output)
+ goto err_decomp_input;
+
+ return 0;
+
+err_decomp_input:
+ zip_data_buf_free(decomp_ctx->input, MAX_INPUT_BUFFER_SIZE);
+
+err_comp_output:
+ zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
+
+err_comp_input:
+ zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
+
+ return -ENOMEM;
+}
+
+void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
+{
+ struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
+ struct zip_operation *dec_ctx = &zip_ctx->zip_decomp;
+
+ zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
+ zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
+
+ zip_data_buf_free(dec_ctx->input, MAX_INPUT_BUFFER_SIZE);
+ zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
+}
+
+int zip_compress(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen,
+ struct zip_kernel_ctx *zip_ctx)
+{
+ struct zip_operation *zip_ops = NULL;
+ struct zip_state zip_state;
+ struct zip_device *zip = NULL;
+ int ret;
+
+ if (!zip_ctx || !src || !dst || !dlen)
+ return -ENOMEM;
+
+ zip = zip_get_device(zip_get_node_id());
+ if (!zip)
+ return -ENODEV;
+
+ memset(&zip_state, 0, sizeof(struct zip_state));
+ zip_ops = &zip_ctx->zip_comp;
+
+ zip_ops->input_len = slen;
+ zip_ops->output_len = *dlen;
+ memcpy(zip_ops->input, src, slen);
+
+ ret = zip_deflate(zip_ops, &zip_state, zip);
+
+ if (!ret) {
+ *dlen = zip_ops->output_len;
+ memcpy(dst, zip_ops->output, *dlen);
+ }
+
+ return ret;
+}
+
+int zip_decompress(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen,
+ struct zip_kernel_ctx *zip_ctx)
+{
+ struct zip_operation *zip_ops = NULL;
+ struct zip_state zip_state;
+ struct zip_device *zip = NULL;
+ int ret;
+
+ if (!zip_ctx || !src || !dst || !dlen)
+ return -ENOMEM;
+
+ zip = zip_get_device(zip_get_node_id());
+ if (!zip)
+ return -ENODEV;
+
+ memset(&zip_state, 0, sizeof(struct zip_state));
+ zip_ops = &zip_ctx->zip_decomp;
+ memcpy(zip_ops->input, src, slen);
+
+ /* Work around for a bug in zlib which needs an extra bytes sometimes */
+ if (zip_ops->ccode != 3) /* Not LZS Encoding */
+ zip_ops->input[slen++] = 0;
+
+ zip_ops->input_len = slen;
+ zip_ops->output_len = *dlen;
+
+ ret = zip_inflate(zip_ops, &zip_state, zip);
+
+ if (!ret) {
+ *dlen = zip_ops->output_len;
+ memcpy(dst, zip_ops->output, *dlen);
+ }
+
+ return ret;
+}
+
+/* Legacy Compress framework start */
+int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
+
+ ret = zip_ctx_init(zip_ctx, 0);
+
+ return ret;
+}
+
+int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
+
+ ret = zip_ctx_init(zip_ctx, 1);
+
+ return ret;
+}
+
+void zip_free_comp_ctx(struct crypto_tfm *tfm)
+{
+ struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
+
+ zip_ctx_exit(zip_ctx);
+}
+
+int zip_comp_compress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
+
+ ret = zip_compress(src, slen, dst, dlen, zip_ctx);
+
+ return ret;
+}
+
+int zip_comp_decompress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
+
+ ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
+
+ return ret;
+} /* Legacy compress framework end */
+
+/* SCOMP framework start */
+void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx;
+
+ zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
+ if (!zip_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ret = zip_ctx_init(zip_ctx, 0);
+
+ if (ret) {
+ kzfree(zip_ctx);
+ return ERR_PTR(ret);
+ }
+
+ return zip_ctx;
+}
+
+void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx;
+
+ zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
+ if (!zip_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ret = zip_ctx_init(zip_ctx, 1);
+
+ if (ret) {
+ kzfree(zip_ctx);
+ return ERR_PTR(ret);
+ }
+
+ return zip_ctx;
+}
+
+void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+ struct zip_kernel_ctx *zip_ctx = ctx;
+
+ zip_ctx_exit(zip_ctx);
+ kzfree(zip_ctx);
+}
+
+int zip_scomp_compress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx = ctx;
+
+ ret = zip_compress(src, slen, dst, dlen, zip_ctx);
+
+ return ret;
+}
+
+int zip_scomp_decompress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx = ctx;
+
+ ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
+
+ return ret;
+} /* SCOMP framework end */
diff --git a/drivers/crypto/cavium/zip/zip_crypto.h b/drivers/crypto/cavium/zip/zip_crypto.h
new file mode 100644
index 000000000000..b59ddfcacd34
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_crypto.h
@@ -0,0 +1,79 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_CRYPTO_H__
+#define __ZIP_CRYPTO_H__
+
+#include <linux/crypto.h>
+#include <crypto/internal/scompress.h>
+#include "common.h"
+#include "zip_deflate.h"
+#include "zip_inflate.h"
+
+struct zip_kernel_ctx {
+ struct zip_operation zip_comp;
+ struct zip_operation zip_decomp;
+};
+
+int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm);
+int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm);
+void zip_free_comp_ctx(struct crypto_tfm *tfm);
+int zip_comp_compress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+int zip_comp_decompress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+
+void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm);
+void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm);
+void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *zip_ctx);
+int zip_scomp_compress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx);
+int zip_scomp_decompress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx);
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c
new file mode 100644
index 000000000000..9a944b8c1e29
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_deflate.c
@@ -0,0 +1,200 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "common.h"
+#include "zip_deflate.h"
+
+/* Prepares the deflate zip command */
+static int prepare_zip_command(struct zip_operation *zip_ops,
+ struct zip_state *s, union zip_inst_s *zip_cmd)
+{
+ union zip_zres_s *result_ptr = &s->result;
+
+ memset(zip_cmd, 0, sizeof(s->zip_cmd));
+ memset(result_ptr, 0, sizeof(s->result));
+
+ /* IWORD #0 */
+ /* History gather */
+ zip_cmd->s.hg = 0;
+ /* compression enable = 1 for deflate */
+ zip_cmd->s.ce = 1;
+ /* sf (sync flush) */
+ zip_cmd->s.sf = 1;
+ /* ef (end of file) */
+ if (zip_ops->flush == ZIP_FLUSH_FINISH) {
+ zip_cmd->s.ef = 1;
+ zip_cmd->s.sf = 0;
+ }
+
+ zip_cmd->s.cc = zip_ops->ccode;
+ /* ss (compression speed/storage) */
+ zip_cmd->s.ss = zip_ops->speed;
+
+ /* IWORD #1 */
+ /* adler checksum */
+ zip_cmd->s.adlercrc32 = zip_ops->csum;
+ zip_cmd->s.historylength = zip_ops->history_len;
+ zip_cmd->s.dg = 0;
+
+ /* IWORD # 6 and 7 - compression input/history pointer */
+ zip_cmd->s.inp_ptr_addr.s.addr = __pa(zip_ops->input);
+ zip_cmd->s.inp_ptr_ctl.s.length = (zip_ops->input_len +
+ zip_ops->history_len);
+ zip_cmd->s.ds = 0;
+
+ /* IWORD # 8 and 9 - Output pointer */
+ zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
+ zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
+ /* maximum number of output-stream bytes that can be written */
+ zip_cmd->s.totaloutputlength = zip_ops->output_len;
+
+ /* IWORD # 10 and 11 - Result pointer */
+ zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
+ /* Clearing completion code */
+ result_ptr->s.compcode = 0;
+
+ return 0;
+}
+
+/**
+ * zip_deflate - API to offload deflate operation to hardware
+ * @zip_ops: Pointer to zip operation structure
+ * @s: Pointer to the structure representing zip state
+ * @zip_dev: Pointer to zip device structure
+ *
+ * This function prepares the zip deflate command and submits it to the zip
+ * engine for processing.
+ *
+ * Return: 0 if successful or error code
+ */
+int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
+ struct zip_device *zip_dev)
+{
+ union zip_inst_s *zip_cmd = &s->zip_cmd;
+ union zip_zres_s *result_ptr = &s->result;
+ u32 queue;
+
+ /* Prepares zip command based on the input parameters */
+ prepare_zip_command(zip_ops, s, zip_cmd);
+
+ atomic64_add(zip_ops->input_len, &zip_dev->stats.comp_in_bytes);
+ /* Loads zip command into command queues and rings door bell */
+ queue = zip_load_instr(zip_cmd, zip_dev);
+
+ /* Stats update for compression requests submitted */
+ atomic64_inc(&zip_dev->stats.comp_req_submit);
+
+ while (!result_ptr->s.compcode)
+ continue;
+
+ /* Stats update for compression requests completed */
+ atomic64_inc(&zip_dev->stats.comp_req_complete);
+
+ zip_ops->compcode = result_ptr->s.compcode;
+ switch (zip_ops->compcode) {
+ case ZIP_CMD_NOTDONE:
+ zip_dbg("Zip instruction not yet completed");
+ return ZIP_ERROR;
+
+ case ZIP_CMD_SUCCESS:
+ zip_dbg("Zip instruction completed successfully");
+ zip_update_cmd_bufs(zip_dev, queue);
+ break;
+
+ case ZIP_CMD_DTRUNC:
+ zip_dbg("Output Truncate error");
+ /* Returning ZIP_ERROR to avoid copy to user */
+ return ZIP_ERROR;
+
+ default:
+ zip_err("Zip instruction failed. Code:%d", zip_ops->compcode);
+ return ZIP_ERROR;
+ }
+
+ /* Update the CRC depending on the format */
+ switch (zip_ops->format) {
+ case RAW_FORMAT:
+ zip_dbg("RAW Format: %d ", zip_ops->format);
+ /* Get checksum from engine, need to feed it again */
+ zip_ops->csum = result_ptr->s.adler32;
+ break;
+
+ case ZLIB_FORMAT:
+ zip_dbg("ZLIB Format: %d ", zip_ops->format);
+ zip_ops->csum = result_ptr->s.adler32;
+ break;
+
+ case GZIP_FORMAT:
+ zip_dbg("GZIP Format: %d ", zip_ops->format);
+ zip_ops->csum = result_ptr->s.crc32;
+ break;
+
+ case LZS_FORMAT:
+ zip_dbg("LZS Format: %d ", zip_ops->format);
+ break;
+
+ default:
+ zip_err("Unknown Format:%d\n", zip_ops->format);
+ }
+
+ atomic64_add(result_ptr->s.totalbyteswritten,
+ &zip_dev->stats.comp_out_bytes);
+
+ /* Update output_len */
+ if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
+ /* Dynamic stop && strm->output_len < zipconstants[onfsize] */
+ zip_err("output_len (%d) < total bytes written(%d)\n",
+ zip_ops->output_len, result_ptr->s.totalbyteswritten);
+ zip_ops->output_len = 0;
+
+ } else {
+ zip_ops->output_len = result_ptr->s.totalbyteswritten;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/cavium/zip/zip_deflate.h b/drivers/crypto/cavium/zip/zip_deflate.h
new file mode 100644
index 000000000000..1d32e76edc4d
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_deflate.h
@@ -0,0 +1,62 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_DEFLATE_H__
+#define __ZIP_DEFLATE_H__
+
+/**
+ * zip_deflate - API to offload deflate operation to hardware
+ * @zip_ops: Pointer to zip operation structure
+ * @s: Pointer to the structure representing zip state
+ * @zip_dev: Pointer to the structure representing zip device
+ *
+ * This function prepares the zip deflate command and submits it to the zip
+ * engine by ringing the doorbell.
+ *
+ * Return: 0 if successful or error code
+ */
+int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
+ struct zip_device *zip_dev);
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_device.c b/drivers/crypto/cavium/zip/zip_device.c
new file mode 100644
index 000000000000..ccf21fb91513
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_device.c
@@ -0,0 +1,202 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include "common.h"
+#include "zip_deflate.h"
+
+/**
+ * zip_cmd_queue_consumed - Calculates the space consumed in the command queue.
+ *
+ * @zip_dev: Pointer to zip device structure
+ * @queue: Queue number
+ *
+ * Return: Bytes consumed in the command queue buffer.
+ */
+static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue)
+{
+ return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) *
+ sizeof(u64 *));
+}
+
+/**
+ * zip_load_instr - Submits the instruction into the ZIP command queue
+ * @instr: Pointer to the instruction to be submitted
+ * @zip_dev: Pointer to ZIP device structure to which the instruction is to
+ * be submitted
+ *
+ * This function copies the ZIP instruction to the command queue and rings the
+ * doorbell to notify the engine of the instruction submission. The command
+ * queue is maintained in a circular fashion. When there is space for exactly
+ * one instruction in the queue, next chunk pointer of the queue is made to
+ * point to the head of the queue, thus maintaining a circular queue.
+ *
+ * Return: Queue number to which the instruction was submitted
+ */
+u32 zip_load_instr(union zip_inst_s *instr,
+ struct zip_device *zip_dev)
+{
+ union zip_quex_doorbell dbell;
+ u32 queue = 0;
+ u32 consumed = 0;
+ u64 *ncb_ptr = NULL;
+ union zip_nptr_s ncp;
+
+ /*
+ * Distribute the instructions between the enabled queues based on
+ * the CPU id.
+ */
+ if (smp_processor_id() % 2 == 0)
+ queue = 0;
+ else
+ queue = 1;
+
+ zip_dbg("CPU Core: %d Queue number:%d", smp_processor_id(), queue);
+
+ /* Take cmd buffer lock */
+ spin_lock(&zip_dev->iq[queue].lock);
+
+ /*
+ * Command Queue implementation
+ * 1. If there is place for new instructions, push the cmd at sw_head.
+ * 2. If there is place for exactly one instruction, push the new cmd
+ * at the sw_head. Make sw_head point to the sw_tail to make it
+ * circular. Write sw_head's physical address to the "Next-Chunk
+ * Buffer Ptr" to make it cmd_hw_tail.
+ * 3. Ring the door bell.
+ */
+ zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head);
+ zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail);
+
+ consumed = zip_cmd_queue_consumed(zip_dev, queue);
+ /* Check if there is space to push just one cmd */
+ if ((consumed + 128) == (ZIP_CMD_QBUF_SIZE - 8)) {
+ zip_dbg("Cmd queue space available for single command");
+ /* Space for one cmd, pust it and make it circular queue */
+ memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
+ sizeof(union zip_inst_s));
+ zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
+
+ /* Now, point the "Next-Chunk Buffer Ptr" to sw_head */
+ ncb_ptr = zip_dev->iq[queue].sw_head;
+
+ zip_dbg("ncb addr :0x%lx sw_head addr :0x%lx",
+ ncb_ptr, zip_dev->iq[queue].sw_head - 16);
+
+ /* Using Circular command queue */
+ zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail;
+ /* Mark this buffer for free */
+ zip_dev->iq[queue].free_flag = 1;
+
+ /* Write new chunk buffer address at "Next-Chunk Buffer Ptr" */
+ ncp.u_reg64 = 0ull;
+ ncp.s.addr = __pa(zip_dev->iq[queue].sw_head);
+ *ncb_ptr = ncp.u_reg64;
+ zip_dbg("*ncb_ptr :0x%lx sw_head[phys] :0x%lx",
+ *ncb_ptr, __pa(zip_dev->iq[queue].sw_head));
+
+ zip_dev->iq[queue].pend_cnt++;
+
+ } else {
+ zip_dbg("Enough space is available for commands");
+ /* Push this cmd to cmd queue buffer */
+ memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
+ sizeof(union zip_inst_s));
+ zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
+
+ zip_dev->iq[queue].pend_cnt++;
+ }
+ zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
+ zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
+ zip_dev->iq[queue].hw_tail);
+
+ zip_dbg(" Pushed the new cmd : pend_cnt : %d",
+ zip_dev->iq[queue].pend_cnt);
+
+ /* Ring the doorbell */
+ dbell.u_reg64 = 0ull;
+ dbell.s.dbell_cnt = 1;
+ zip_reg_write(dbell.u_reg64,
+ (zip_dev->reg_base + ZIP_QUEX_DOORBELL(queue)));
+
+ /* Unlock cmd buffer lock */
+ spin_unlock(&zip_dev->iq[queue].lock);
+
+ return queue;
+}
+
+/**
+ * zip_update_cmd_bufs - Updates the queue statistics after posting the
+ * instruction
+ * @zip_dev: Pointer to zip device structure
+ * @queue: Queue number
+ */
+void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue)
+{
+ /* Take cmd buffer lock */
+ spin_lock(&zip_dev->iq[queue].lock);
+
+ /* Check if the previous buffer can be freed */
+ if (zip_dev->iq[queue].free_flag == 1) {
+ zip_dbg("Free flag. Free cmd buffer, adjust sw head and tail");
+ /* Reset the free flag */
+ zip_dev->iq[queue].free_flag = 0;
+
+ /* Point the hw_tail to start of the new chunk buffer */
+ zip_dev->iq[queue].hw_tail = zip_dev->iq[queue].sw_head;
+ } else {
+ zip_dbg("Free flag not set. increment hw tail");
+ zip_dev->iq[queue].hw_tail += 16; /* 16 64_bit words = 128B */
+ }
+
+ zip_dev->iq[queue].done_cnt++;
+ zip_dev->iq[queue].pend_cnt--;
+
+ zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
+ zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
+ zip_dev->iq[queue].hw_tail);
+ zip_dbg(" Got CC : pend_cnt : %d\n", zip_dev->iq[queue].pend_cnt);
+
+ spin_unlock(&zip_dev->iq[queue].lock);
+}
diff --git a/drivers/crypto/cavium/zip/zip_device.h b/drivers/crypto/cavium/zip/zip_device.h
new file mode 100644
index 000000000000..9e18b3b93d38
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_device.h
@@ -0,0 +1,108 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_DEVICE_H__
+#define __ZIP_DEVICE_H__
+
+#include <linux/types.h>
+#include "zip_main.h"
+
+struct sg_info {
+ /*
+ * Pointer to the input data when scatter_gather == 0 and
+ * pointer to the input gather list buffer when scatter_gather == 1
+ */
+ union zip_zptr_s *gather;
+
+ /*
+ * Pointer to the output data when scatter_gather == 0 and
+ * pointer to the output scatter list buffer when scatter_gather == 1
+ */
+ union zip_zptr_s *scatter;
+
+ /*
+ * Holds size of the output buffer pointed by scatter list
+ * when scatter_gather == 1
+ */
+ u64 scatter_buf_size;
+
+ /* for gather data */
+ u64 gather_enable;
+
+ /* for scatter data */
+ u64 scatter_enable;
+
+ /* Number of gather list pointers for gather data */
+ u32 gbuf_cnt;
+
+ /* Number of scatter list pointers for scatter data */
+ u32 sbuf_cnt;
+
+ /* Buffers allocation state */
+ u8 alloc_state;
+};
+
+/**
+ * struct zip_state - Structure representing the required information related
+ * to a command
+ * @zip_cmd: Pointer to zip instruction structure
+ * @result: Pointer to zip result structure
+ * @ctx: Context pointer for inflate
+ * @history: Decompression history pointer
+ * @sginfo: Scatter-gather info structure
+ */
+struct zip_state {
+ union zip_inst_s zip_cmd;
+ union zip_zres_s result;
+ union zip_zptr_s *ctx;
+ union zip_zptr_s *history;
+ struct sg_info sginfo;
+};
+
+#define ZIP_CONTEXT_SIZE 2048
+#define ZIP_INFLATE_HISTORY_SIZE 32768
+#define ZIP_DEFLATE_HISTORY_SIZE 32768
+
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c
new file mode 100644
index 000000000000..50cbdd83dbf2
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_inflate.c
@@ -0,0 +1,223 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "common.h"
+#include "zip_inflate.h"
+
+static int prepare_inflate_zcmd(struct zip_operation *zip_ops,
+ struct zip_state *s, union zip_inst_s *zip_cmd)
+{
+ union zip_zres_s *result_ptr = &s->result;
+
+ memset(zip_cmd, 0, sizeof(s->zip_cmd));
+ memset(result_ptr, 0, sizeof(s->result));
+
+ /* IWORD#0 */
+
+ /* Decompression History Gather list - no gather list */
+ zip_cmd->s.hg = 0;
+ /* For decompression, CE must be 0x0. */
+ zip_cmd->s.ce = 0;
+ /* For decompression, SS must be 0x0. */
+ zip_cmd->s.ss = 0;
+ /* For decompression, SF should always be set. */
+ zip_cmd->s.sf = 1;
+
+ /* Begin File */
+ if (zip_ops->begin_file == 0)
+ zip_cmd->s.bf = 0;
+ else
+ zip_cmd->s.bf = 1;
+
+ zip_cmd->s.ef = 1;
+ /* 0: for Deflate decompression, 3: for LZS decompression */
+ zip_cmd->s.cc = zip_ops->ccode;
+
+ /* IWORD #1*/
+
+ /* adler checksum */
+ zip_cmd->s.adlercrc32 = zip_ops->csum;
+
+ /*
+ * HISTORYLENGTH must be 0x0 for any ZIP decompress operation.
+ * History data is added to a decompression operation via IWORD3.
+ */
+ zip_cmd->s.historylength = 0;
+ zip_cmd->s.ds = 0;
+
+ /* IWORD # 8 and 9 - Output pointer */
+ zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
+ zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
+
+ /* Maximum number of output-stream bytes that can be written */
+ zip_cmd->s.totaloutputlength = zip_ops->output_len;
+
+ zip_dbg("Data Direct Input case ");
+
+ /* IWORD # 6 and 7 - input pointer */
+ zip_cmd->s.dg = 0;
+ zip_cmd->s.inp_ptr_addr.s.addr = __pa((u8 *)zip_ops->input);
+ zip_cmd->s.inp_ptr_ctl.s.length = zip_ops->input_len;
+
+ /* IWORD # 10 and 11 - Result pointer */
+ zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
+
+ /* Clearing completion code */
+ result_ptr->s.compcode = 0;
+
+ /* Returning 0 for time being.*/
+ return 0;
+}
+
+/**
+ * zip_inflate - API to offload inflate operation to hardware
+ * @zip_ops: Pointer to zip operation structure
+ * @s: Pointer to the structure representing zip state
+ * @zip_dev: Pointer to zip device structure
+ *
+ * This function prepares the zip inflate command and submits it to the zip
+ * engine for processing.
+ *
+ * Return: 0 if successful or error code
+ */
+int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
+ struct zip_device *zip_dev)
+{
+ union zip_inst_s *zip_cmd = &s->zip_cmd;
+ union zip_zres_s *result_ptr = &s->result;
+ u32 queue;
+
+ /* Prepare inflate zip command */
+ prepare_inflate_zcmd(zip_ops, s, zip_cmd);
+
+ atomic64_add(zip_ops->input_len, &zip_dev->stats.decomp_in_bytes);
+
+ /* Load inflate command to zip queue and ring the doorbell */
+ queue = zip_load_instr(zip_cmd, zip_dev);
+
+ /* Decompression requests submitted stats update */
+ atomic64_inc(&zip_dev->stats.decomp_req_submit);
+
+ while (!result_ptr->s.compcode)
+ continue;
+
+ /* Decompression requests completed stats update */
+ atomic64_inc(&zip_dev->stats.decomp_req_complete);
+
+ zip_ops->compcode = result_ptr->s.compcode;
+ switch (zip_ops->compcode) {
+ case ZIP_CMD_NOTDONE:
+ zip_dbg("Zip Instruction not yet completed\n");
+ return ZIP_ERROR;
+
+ case ZIP_CMD_SUCCESS:
+ zip_dbg("Zip Instruction completed successfully\n");
+ break;
+
+ case ZIP_CMD_DYNAMIC_STOP:
+ zip_dbg(" Dynamic stop Initiated\n");
+ break;
+
+ default:
+ zip_dbg("Instruction failed. Code = %d\n", zip_ops->compcode);
+ atomic64_inc(&zip_dev->stats.decomp_bad_reqs);
+ zip_update_cmd_bufs(zip_dev, queue);
+ return ZIP_ERROR;
+ }
+
+ zip_update_cmd_bufs(zip_dev, queue);
+
+ if ((zip_ops->ccode == 3) && (zip_ops->flush == 4) &&
+ (zip_ops->compcode != ZIP_CMD_DYNAMIC_STOP))
+ result_ptr->s.ef = 1;
+
+ zip_ops->csum = result_ptr->s.adler32;
+
+ atomic64_add(result_ptr->s.totalbyteswritten,
+ &zip_dev->stats.decomp_out_bytes);
+
+ if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
+ zip_err("output_len (%d) < total bytes written (%d)\n",
+ zip_ops->output_len, result_ptr->s.totalbyteswritten);
+ zip_ops->output_len = 0;
+ } else {
+ zip_ops->output_len = result_ptr->s.totalbyteswritten;
+ }
+
+ zip_ops->bytes_read = result_ptr->s.totalbytesread;
+ zip_ops->bits_processed = result_ptr->s.totalbitsprocessed;
+ zip_ops->end_file = result_ptr->s.ef;
+ if (zip_ops->end_file) {
+ switch (zip_ops->format) {
+ case RAW_FORMAT:
+ zip_dbg("RAW Format: %d ", zip_ops->format);
+ /* Get checksum from engine */
+ zip_ops->csum = result_ptr->s.adler32;
+ break;
+
+ case ZLIB_FORMAT:
+ zip_dbg("ZLIB Format: %d ", zip_ops->format);
+ zip_ops->csum = result_ptr->s.adler32;
+ break;
+
+ case GZIP_FORMAT:
+ zip_dbg("GZIP Format: %d ", zip_ops->format);
+ zip_ops->csum = result_ptr->s.crc32;
+ break;
+
+ case LZS_FORMAT:
+ zip_dbg("LZS Format: %d ", zip_ops->format);
+ break;
+
+ default:
+ zip_err("Format error:%d\n", zip_ops->format);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/cavium/zip/zip_inflate.h b/drivers/crypto/cavium/zip/zip_inflate.h
new file mode 100644
index 000000000000..6b20f179978e
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_inflate.h
@@ -0,0 +1,62 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_INFLATE_H__
+#define __ZIP_INFLATE_H__
+
+/**
+ * zip_inflate - API to offload inflate operation to hardware
+ * @zip_ops: Pointer to zip operation structure
+ * @s: Pointer to the structure representing zip state
+ * @zip_dev: Pointer to the structure representing zip device
+ *
+ * This function prepares the zip inflate command and submits it to the zip
+ * engine for processing.
+ *
+ * Return: 0 if successful or error code
+ */
+int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
+ struct zip_device *zip_dev);
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
new file mode 100644
index 000000000000..1cd8aa488185
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -0,0 +1,729 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include "common.h"
+#include "zip_crypto.h"
+
+#define DRV_NAME "ThunderX-ZIP"
+
+static struct zip_device *zip_dev[MAX_ZIP_DEVICES];
+
+static const struct pci_device_id zip_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) },
+ { 0, }
+};
+
+void zip_reg_write(u64 val, u64 __iomem *addr)
+{
+ writeq(val, addr);
+}
+
+u64 zip_reg_read(u64 __iomem *addr)
+{
+ return readq(addr);
+}
+
+/*
+ * Allocates new ZIP device structure
+ * Returns zip_device pointer or NULL if cannot allocate memory for zip_device
+ */
+static struct zip_device *zip_alloc_device(struct pci_dev *pdev)
+{
+ struct zip_device *zip = NULL;
+ int idx;
+
+ for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) {
+ if (!zip_dev[idx])
+ break;
+ }
+
+ /* To ensure that the index is within the limit */
+ if (idx < MAX_ZIP_DEVICES)
+ zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
+
+ if (!zip)
+ return NULL;
+
+ zip_dev[idx] = zip;
+ zip->index = idx;
+ return zip;
+}
+
+/**
+ * zip_get_device - Get ZIP device based on node id of cpu
+ *
+ * @node: Node id of the current cpu
+ * Return: Pointer to Zip device structure
+ */
+struct zip_device *zip_get_device(int node)
+{
+ if ((node < MAX_ZIP_DEVICES) && (node >= 0))
+ return zip_dev[node];
+
+ zip_err("ZIP device not found for node id %d\n", node);
+ return NULL;
+}
+
+/**
+ * zip_get_node_id - Get the node id of the current cpu
+ *
+ * Return: Node id of the current cpu
+ */
+int zip_get_node_id(void)
+{
+ return cpu_to_node(smp_processor_id());
+}
+
+/* Initializes the ZIP h/w sub-system */
+static int zip_init_hw(struct zip_device *zip)
+{
+ union zip_cmd_ctl cmd_ctl;
+ union zip_constants constants;
+ union zip_que_ena que_ena;
+ union zip_quex_map que_map;
+ union zip_que_pri que_pri;
+
+ union zip_quex_sbuf_addr que_sbuf_addr;
+ union zip_quex_sbuf_ctl que_sbuf_ctl;
+
+ int q = 0;
+
+ /* Enable the ZIP Engine(Core) Clock */
+ cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL);
+ cmd_ctl.s.forceclk = 1;
+ zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL));
+
+ zip_msg("ZIP_CMD_CTL : 0x%016llx",
+ zip_reg_read(zip->reg_base + ZIP_CMD_CTL));
+
+ constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS);
+ zip->depth = constants.s.depth;
+ zip->onfsize = constants.s.onfsize;
+ zip->ctxsize = constants.s.ctxsize;
+
+ zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx",
+ zip->depth, zip->onfsize, zip->ctxsize);
+
+ /*
+ * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to
+ * have the correct buffer pointer and size configured for each
+ * instruction queue.
+ */
+ for (q = 0; q < ZIP_NUM_QUEUES; q++) {
+ que_sbuf_ctl.u_reg64 = 0ull;
+ que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64));
+ que_sbuf_ctl.s.inst_be = 0;
+ que_sbuf_ctl.s.stream_id = 0;
+ zip_reg_write(que_sbuf_ctl.u_reg64,
+ (zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
+
+ zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q,
+ zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
+ }
+
+ for (q = 0; q < ZIP_NUM_QUEUES; q++) {
+ memset(&zip->iq[q], 0x0, sizeof(struct zip_iq));
+
+ spin_lock_init(&zip->iq[q].lock);
+
+ if (zip_cmd_qbuf_alloc(zip, q)) {
+ while (q != 0) {
+ q--;
+ zip_cmd_qbuf_free(zip, q);
+ }
+ return -ENOMEM;
+ }
+
+ /* Initialize tail ptr to head */
+ zip->iq[q].sw_tail = zip->iq[q].sw_head;
+ zip->iq[q].hw_tail = zip->iq[q].sw_head;
+
+ /* Write the physical addr to register */
+ que_sbuf_addr.u_reg64 = 0ull;
+ que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >>
+ ZIP_128B_ALIGN);
+
+ zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q,
+ (u64)que_sbuf_addr.s.ptr);
+
+ zip_reg_write(que_sbuf_addr.u_reg64,
+ (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
+
+ zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q,
+ zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
+
+ zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
+ zip->iq[q].sw_head, zip->iq[q].sw_tail,
+ zip->iq[q].hw_tail);
+ zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr);
+ }
+
+ /*
+ * Queue-to-ZIP core mapping
+ * If a queue is not mapped to a particular core, it is equivalent to
+ * the ZIP core being disabled.
+ */
+ que_ena.u_reg64 = 0x0ull;
+ /* Enabling queues based on ZIP_NUM_QUEUES */
+ for (q = 0; q < ZIP_NUM_QUEUES; q++)
+ que_ena.s.ena |= (0x1 << q);
+ zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA));
+
+ zip_msg("QUE_ENA : 0x%016llx",
+ zip_reg_read(zip->reg_base + ZIP_QUE_ENA));
+
+ for (q = 0; q < ZIP_NUM_QUEUES; q++) {
+ que_map.u_reg64 = 0ull;
+ /* Mapping each queue to two ZIP cores */
+ que_map.s.zce = 0x3;
+ zip_reg_write(que_map.u_reg64,
+ (zip->reg_base + ZIP_QUEX_MAP(q)));
+
+ zip_msg("QUE_MAP(%d) : 0x%016llx", q,
+ zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q)));
+ }
+
+ que_pri.u_reg64 = 0ull;
+ for (q = 0; q < ZIP_NUM_QUEUES; q++)
+ que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */
+ zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI));
+
+ zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI));
+
+ return 0;
+}
+
+static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct zip_device *zip = NULL;
+ int err;
+
+ zip = zip_alloc_device(pdev);
+ if (!zip)
+ return -ENOMEM;
+
+ dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index,
+ pdev->vendor, pdev->device, dev_to_node(dev));
+
+ pci_set_drvdata(pdev, zip);
+ zip->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device");
+ goto err_free_device;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get 48-bit DMA for allocations\n");
+ goto err_release_regions;
+ }
+
+ /* MAP configuration registers */
+ zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0);
+ if (!zip->reg_base) {
+ dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* Initialize ZIP Hardware */
+ err = zip_init_hw(zip);
+ if (err)
+ goto err_release_regions;
+
+ return 0;
+
+err_release_regions:
+ if (zip->reg_base)
+ iounmap(zip->reg_base);
+ pci_release_regions(pdev);
+
+err_disable_device:
+ pci_disable_device(pdev);
+
+err_free_device:
+ pci_set_drvdata(pdev, NULL);
+
+ /* Remove zip_dev from zip_device list, free the zip_device memory */
+ zip_dev[zip->index] = NULL;
+ devm_kfree(dev, zip);
+
+ return err;
+}
+
+static void zip_remove(struct pci_dev *pdev)
+{
+ struct zip_device *zip = pci_get_drvdata(pdev);
+ union zip_cmd_ctl cmd_ctl;
+ int q = 0;
+
+ if (!zip)
+ return;
+
+ if (zip->reg_base) {
+ cmd_ctl.u_reg64 = 0x0ull;
+ cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
+ zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
+ iounmap(zip->reg_base);
+ }
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ /*
+ * Free Command Queue buffers. This free should be called for all
+ * the enabled Queues.
+ */
+ for (q = 0; q < ZIP_NUM_QUEUES; q++)
+ zip_cmd_qbuf_free(zip, q);
+
+ pci_set_drvdata(pdev, NULL);
+ /* remove zip device from zip device list */
+ zip_dev[zip->index] = NULL;
+}
+
+/* PCI Sub-System Interface */
+static struct pci_driver zip_driver = {
+ .name = DRV_NAME,
+ .id_table = zip_id_table,
+ .probe = zip_probe,
+ .remove = zip_remove,
+};
+
+/* Kernel Crypto Subsystem Interface */
+
+static struct crypto_alg zip_comp_deflate = {
+ .cra_name = "deflate",
+ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
+ .cra_ctxsize = sizeof(struct zip_kernel_ctx),
+ .cra_priority = 300,
+ .cra_module = THIS_MODULE,
+ .cra_init = zip_alloc_comp_ctx_deflate,
+ .cra_exit = zip_free_comp_ctx,
+ .cra_u = { .compress = {
+ .coa_compress = zip_comp_compress,
+ .coa_decompress = zip_comp_decompress
+ } }
+};
+
+static struct crypto_alg zip_comp_lzs = {
+ .cra_name = "lzs",
+ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
+ .cra_ctxsize = sizeof(struct zip_kernel_ctx),
+ .cra_priority = 300,
+ .cra_module = THIS_MODULE,
+ .cra_init = zip_alloc_comp_ctx_lzs,
+ .cra_exit = zip_free_comp_ctx,
+ .cra_u = { .compress = {
+ .coa_compress = zip_comp_compress,
+ .coa_decompress = zip_comp_decompress
+ } }
+};
+
+static struct scomp_alg zip_scomp_deflate = {
+ .alloc_ctx = zip_alloc_scomp_ctx_deflate,
+ .free_ctx = zip_free_scomp_ctx,
+ .compress = zip_scomp_compress,
+ .decompress = zip_scomp_decompress,
+ .base = {
+ .cra_name = "deflate",
+ .cra_driver_name = "deflate-scomp",
+ .cra_module = THIS_MODULE,
+ .cra_priority = 300,
+ }
+};
+
+static struct scomp_alg zip_scomp_lzs = {
+ .alloc_ctx = zip_alloc_scomp_ctx_lzs,
+ .free_ctx = zip_free_scomp_ctx,
+ .compress = zip_scomp_compress,
+ .decompress = zip_scomp_decompress,
+ .base = {
+ .cra_name = "lzs",
+ .cra_driver_name = "lzs-scomp",
+ .cra_module = THIS_MODULE,
+ .cra_priority = 300,
+ }
+};
+
+static int zip_register_compression_device(void)
+{
+ int ret;
+
+ ret = crypto_register_alg(&zip_comp_deflate);
+ if (ret < 0) {
+ zip_err("Deflate algorithm registration failed\n");
+ return ret;
+ }
+
+ ret = crypto_register_alg(&zip_comp_lzs);
+ if (ret < 0) {
+ zip_err("LZS algorithm registration failed\n");
+ goto err_unregister_alg_deflate;
+ }
+
+ ret = crypto_register_scomp(&zip_scomp_deflate);
+ if (ret < 0) {
+ zip_err("Deflate scomp algorithm registration failed\n");
+ goto err_unregister_alg_lzs;
+ }
+
+ ret = crypto_register_scomp(&zip_scomp_lzs);
+ if (ret < 0) {
+ zip_err("LZS scomp algorithm registration failed\n");
+ goto err_unregister_scomp_deflate;
+ }
+
+ return ret;
+
+err_unregister_scomp_deflate:
+ crypto_unregister_scomp(&zip_scomp_deflate);
+err_unregister_alg_lzs:
+ crypto_unregister_alg(&zip_comp_lzs);
+err_unregister_alg_deflate:
+ crypto_unregister_alg(&zip_comp_deflate);
+
+ return ret;
+}
+
+static void zip_unregister_compression_device(void)
+{
+ crypto_unregister_alg(&zip_comp_deflate);
+ crypto_unregister_alg(&zip_comp_lzs);
+ crypto_unregister_scomp(&zip_scomp_deflate);
+ crypto_unregister_scomp(&zip_scomp_lzs);
+}
+
+/*
+ * debugfs functions
+ */
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+/* Displays ZIP device statistics */
+static int zip_show_stats(struct seq_file *s, void *unused)
+{
+ u64 val = 0ull;
+ u64 avg_chunk = 0ull, avg_cr = 0ull;
+ u32 q = 0;
+
+ int index = 0;
+ struct zip_device *zip;
+ struct zip_stats *st;
+
+ for (index = 0; index < MAX_ZIP_DEVICES; index++) {
+ if (zip_dev[index]) {
+ zip = zip_dev[index];
+ st = &zip->stats;
+
+ /* Get all the pending requests */
+ for (q = 0; q < ZIP_NUM_QUEUES; q++) {
+ val = zip_reg_read((zip->reg_base +
+ ZIP_DBG_COREX_STA(q)));
+ val = (val >> 32);
+ val = val & 0xffffff;
+ atomic64_add(val, &st->pending_req);
+ }
+
+ avg_chunk = (atomic64_read(&st->comp_in_bytes) /
+ atomic64_read(&st->comp_req_complete));
+ avg_cr = (atomic64_read(&st->comp_in_bytes) /
+ atomic64_read(&st->comp_out_bytes));
+ seq_printf(s, " ZIP Device %d Stats\n"
+ "-----------------------------------\n"
+ "Comp Req Submitted : \t%lld\n"
+ "Comp Req Completed : \t%lld\n"
+ "Compress In Bytes : \t%lld\n"
+ "Compressed Out Bytes : \t%lld\n"
+ "Average Chunk size : \t%llu\n"
+ "Average Compression ratio : \t%llu\n"
+ "Decomp Req Submitted : \t%lld\n"
+ "Decomp Req Completed : \t%lld\n"
+ "Decompress In Bytes : \t%lld\n"
+ "Decompressed Out Bytes : \t%lld\n"
+ "Decompress Bad requests : \t%lld\n"
+ "Pending Req : \t%lld\n"
+ "---------------------------------\n",
+ index,
+ (u64)atomic64_read(&st->comp_req_submit),
+ (u64)atomic64_read(&st->comp_req_complete),
+ (u64)atomic64_read(&st->comp_in_bytes),
+ (u64)atomic64_read(&st->comp_out_bytes),
+ avg_chunk,
+ avg_cr,
+ (u64)atomic64_read(&st->decomp_req_submit),
+ (u64)atomic64_read(&st->decomp_req_complete),
+ (u64)atomic64_read(&st->decomp_in_bytes),
+ (u64)atomic64_read(&st->decomp_out_bytes),
+ (u64)atomic64_read(&st->decomp_bad_reqs),
+ (u64)atomic64_read(&st->pending_req));
+
+ /* Reset pending requests count */
+ atomic64_set(&st->pending_req, 0);
+ }
+ }
+ return 0;
+}
+
+/* Clears stats data */
+static int zip_clear_stats(struct seq_file *s, void *unused)
+{
+ int index = 0;
+
+ for (index = 0; index < MAX_ZIP_DEVICES; index++) {
+ if (zip_dev[index]) {
+ memset(&zip_dev[index]->stats, 0,
+ sizeof(struct zip_stats));
+ seq_printf(s, "Cleared stats for zip %d\n", index);
+ }
+ }
+
+ return 0;
+}
+
+static struct zip_registers zipregs[64] = {
+ {"ZIP_CMD_CTL ", 0x0000ull},
+ {"ZIP_THROTTLE ", 0x0010ull},
+ {"ZIP_CONSTANTS ", 0x00A0ull},
+ {"ZIP_QUE0_MAP ", 0x1400ull},
+ {"ZIP_QUE1_MAP ", 0x1408ull},
+ {"ZIP_QUE_ENA ", 0x0500ull},
+ {"ZIP_QUE_PRI ", 0x0508ull},
+ {"ZIP_QUE0_DONE ", 0x2000ull},
+ {"ZIP_QUE1_DONE ", 0x2008ull},
+ {"ZIP_QUE0_DOORBELL ", 0x4000ull},
+ {"ZIP_QUE1_DOORBELL ", 0x4008ull},
+ {"ZIP_QUE0_SBUF_ADDR ", 0x1000ull},
+ {"ZIP_QUE1_SBUF_ADDR ", 0x1008ull},
+ {"ZIP_QUE0_SBUF_CTL ", 0x1200ull},
+ {"ZIP_QUE1_SBUF_CTL ", 0x1208ull},
+ { NULL, 0}
+};
+
+/* Prints registers' contents */
+static int zip_print_regs(struct seq_file *s, void *unused)
+{
+ u64 val = 0;
+ int i = 0, index = 0;
+
+ for (index = 0; index < MAX_ZIP_DEVICES; index++) {
+ if (zip_dev[index]) {
+ seq_printf(s, "--------------------------------\n"
+ " ZIP Device %d Registers\n"
+ "--------------------------------\n",
+ index);
+
+ i = 0;
+
+ while (zipregs[i].reg_name) {
+ val = zip_reg_read((zip_dev[index]->reg_base +
+ zipregs[i].reg_offset));
+ seq_printf(s, "%s: 0x%016llx\n",
+ zipregs[i].reg_name, val);
+ i++;
+ }
+ }
+ }
+ return 0;
+}
+
+static int zip_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, zip_show_stats, NULL);
+}
+
+static const struct file_operations zip_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = zip_stats_open,
+ .read = seq_read,
+};
+
+static int zip_clear_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, zip_clear_stats, NULL);
+}
+
+static const struct file_operations zip_clear_fops = {
+ .owner = THIS_MODULE,
+ .open = zip_clear_open,
+ .read = seq_read,
+};
+
+static int zip_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, zip_print_regs, NULL);
+}
+
+static const struct file_operations zip_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = zip_regs_open,
+ .read = seq_read,
+};
+
+/* Root directory for thunderx_zip debugfs entry */
+static struct dentry *zip_debugfs_root;
+
+static int __init zip_debugfs_init(void)
+{
+ struct dentry *zip_stats, *zip_clear, *zip_regs;
+
+ if (!debugfs_initialized())
+ return -ENODEV;
+
+ zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
+ if (!zip_debugfs_root)
+ return -ENOMEM;
+
+ /* Creating files for entries inside thunderx_zip directory */
+ zip_stats = debugfs_create_file("zip_stats", 0444,
+ zip_debugfs_root,
+ NULL, &zip_stats_fops);
+ if (!zip_stats)
+ goto failed_to_create;
+
+ zip_clear = debugfs_create_file("zip_clear", 0444,
+ zip_debugfs_root,
+ NULL, &zip_clear_fops);
+ if (!zip_clear)
+ goto failed_to_create;
+
+ zip_regs = debugfs_create_file("zip_regs", 0444,
+ zip_debugfs_root,
+ NULL, &zip_regs_fops);
+ if (!zip_regs)
+ goto failed_to_create;
+
+ return 0;
+
+failed_to_create:
+ debugfs_remove_recursive(zip_debugfs_root);
+ return -ENOENT;
+}
+
+static void __exit zip_debugfs_exit(void)
+{
+ debugfs_remove_recursive(zip_debugfs_root);
+}
+
+#else
+static int __init zip_debugfs_init(void)
+{
+ return 0;
+}
+
+static void __exit zip_debugfs_exit(void) { }
+
+#endif
+/* debugfs - end */
+
+static int __init zip_init_module(void)
+{
+ int ret;
+
+ zip_msg("%s\n", DRV_NAME);
+
+ ret = pci_register_driver(&zip_driver);
+ if (ret < 0) {
+ zip_err("ZIP: pci_register_driver() failed\n");
+ return ret;
+ }
+
+ /* Register with the Kernel Crypto Interface */
+ ret = zip_register_compression_device();
+ if (ret < 0) {
+ zip_err("ZIP: Kernel Crypto Registration failed\n");
+ goto err_pci_unregister;
+ }
+
+ /* comp-decomp statistics are handled with debugfs interface */
+ ret = zip_debugfs_init();
+ if (ret < 0) {
+ zip_err("ZIP: debugfs initialization failed\n");
+ goto err_crypto_unregister;
+ }
+
+ return ret;
+
+err_crypto_unregister:
+ zip_unregister_compression_device();
+
+err_pci_unregister:
+ pci_unregister_driver(&zip_driver);
+ return ret;
+}
+
+static void __exit zip_cleanup_module(void)
+{
+ zip_debugfs_exit();
+
+ /* Unregister from the kernel crypto interface */
+ zip_unregister_compression_device();
+
+ /* Unregister this driver for pci zip devices */
+ pci_unregister_driver(&zip_driver);
+}
+
+module_init(zip_init_module);
+module_exit(zip_cleanup_module);
+
+MODULE_AUTHOR("Cavium Inc");
+MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, zip_id_table);
diff --git a/drivers/crypto/cavium/zip/zip_main.h b/drivers/crypto/cavium/zip/zip_main.h
new file mode 100644
index 000000000000..64e051f60784
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_main.h
@@ -0,0 +1,121 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_MAIN_H__
+#define __ZIP_MAIN_H__
+
+#include "zip_device.h"
+#include "zip_regs.h"
+
+/* PCI device IDs */
+#define PCI_DEVICE_ID_THUNDERX_ZIP 0xA01A
+
+/* ZIP device BARs */
+#define PCI_CFG_ZIP_PF_BAR0 0 /* Base addr for normal regs */
+
+/* Maximum available zip queues */
+#define ZIP_MAX_NUM_QUEUES 8
+
+#define ZIP_128B_ALIGN 7
+
+/* Command queue buffer size */
+#define ZIP_CMD_QBUF_SIZE (8064 + 8)
+
+struct zip_registers {
+ char *reg_name;
+ u64 reg_offset;
+};
+
+/* ZIP Compression - Decompression stats */
+struct zip_stats {
+ atomic64_t comp_req_submit;
+ atomic64_t comp_req_complete;
+ atomic64_t decomp_req_submit;
+ atomic64_t decomp_req_complete;
+ atomic64_t pending_req;
+ atomic64_t comp_in_bytes;
+ atomic64_t comp_out_bytes;
+ atomic64_t decomp_in_bytes;
+ atomic64_t decomp_out_bytes;
+ atomic64_t decomp_bad_reqs;
+};
+
+/* ZIP Instruction Queue */
+struct zip_iq {
+ u64 *sw_head;
+ u64 *sw_tail;
+ u64 *hw_tail;
+ u64 done_cnt;
+ u64 pend_cnt;
+ u64 free_flag;
+
+ /* ZIP IQ lock */
+ spinlock_t lock;
+};
+
+/* ZIP Device */
+struct zip_device {
+ u32 index;
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+
+ /* Different ZIP Constants */
+ u64 depth;
+ u64 onfsize;
+ u64 ctxsize;
+
+ struct zip_iq iq[ZIP_MAX_NUM_QUEUES];
+ struct zip_stats stats;
+};
+
+/* Prototypes */
+struct zip_device *zip_get_device(int node_id);
+int zip_get_node_id(void);
+void zip_reg_write(u64 val, u64 __iomem *addr);
+u64 zip_reg_read(u64 __iomem *addr);
+void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue);
+u32 zip_load_instr(union zip_inst_s *instr, struct zip_device *zip_dev);
+
+#endif /* ZIP_MAIN_H */
diff --git a/drivers/crypto/cavium/zip/zip_mem.c b/drivers/crypto/cavium/zip/zip_mem.c
new file mode 100644
index 000000000000..b3e0843a9169
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_mem.c
@@ -0,0 +1,114 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#include "common.h"
+
+/**
+ * zip_cmd_qbuf_alloc - Allocates a cmd buffer for ZIP Instruction Queue
+ * @zip: Pointer to zip device structure
+ * @q: Queue number to allocate bufffer to
+ * Return: 0 if successful, -ENOMEM otherwise
+ */
+int zip_cmd_qbuf_alloc(struct zip_device *zip, int q)
+{
+ zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
+ get_order(ZIP_CMD_QBUF_SIZE));
+
+ if (!zip->iq[q].sw_head)
+ return -ENOMEM;
+
+ memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE);
+
+ zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head);
+ return 0;
+}
+
+/**
+ * zip_cmd_qbuf_free - Frees the cmd Queue buffer
+ * @zip: Pointer to zip device structure
+ * @q: Queue number to free buffer of
+ */
+void zip_cmd_qbuf_free(struct zip_device *zip, int q)
+{
+ zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail);
+
+ free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE));
+}
+
+/**
+ * zip_data_buf_alloc - Allocates memory for a data bufffer
+ * @size: Size of the buffer to allocate
+ * Returns: Pointer to the buffer allocated
+ */
+u8 *zip_data_buf_alloc(u64 size)
+{
+ u8 *ptr;
+
+ ptr = (u8 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
+ get_order(size));
+
+ if (!ptr)
+ return NULL;
+
+ memset(ptr, 0, size);
+
+ zip_dbg("Data buffer allocation success\n");
+ return ptr;
+}
+
+/**
+ * zip_data_buf_free - Frees the memory of a data buffer
+ * @ptr: Pointer to the buffer
+ * @size: Buffer size
+ */
+void zip_data_buf_free(u8 *ptr, u64 size)
+{
+ zip_dbg("Freeing data buffer 0x%lx\n", ptr);
+
+ free_pages((u64)ptr, get_order(size));
+}
diff --git a/drivers/crypto/cavium/zip/zip_mem.h b/drivers/crypto/cavium/zip/zip_mem.h
new file mode 100644
index 000000000000..f8f2f08c4a5c
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_mem.h
@@ -0,0 +1,78 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_MEM_H__
+#define __ZIP_MEM_H__
+
+/**
+ * zip_cmd_qbuf_free - Frees the cmd Queue buffer
+ * @zip: Pointer to zip device structure
+ * @q: Queue nmber to free buffer of
+ */
+void zip_cmd_qbuf_free(struct zip_device *zip, int q);
+
+/**
+ * zip_cmd_qbuf_alloc - Allocates a Chunk/cmd buffer for ZIP Inst(cmd) Queue
+ * @zip: Pointer to zip device structure
+ * @q: Queue number to allocate bufffer to
+ * Return: 0 if successful, 1 otherwise
+ */
+int zip_cmd_qbuf_alloc(struct zip_device *zip, int q);
+
+/**
+ * zip_data_buf_alloc - Allocates memory for a data bufffer
+ * @size: Size of the buffer to allocate
+ * Returns: Pointer to the buffer allocated
+ */
+u8 *zip_data_buf_alloc(u64 size);
+
+/**
+ * zip_data_buf_free - Frees the memory of a data buffer
+ * @ptr: Pointer to the buffer
+ * @size: Buffer size
+ */
+void zip_data_buf_free(u8 *ptr, u64 size);
+
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_regs.h b/drivers/crypto/cavium/zip/zip_regs.h
new file mode 100644
index 000000000000..d0be682305c1
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_regs.h
@@ -0,0 +1,1347 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_REGS_H__
+#define __ZIP_REGS_H__
+
+/*
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium ZIP.
+ */
+
+#include <linux/kern_levels.h>
+
+/* ZIP invocation result completion status codes */
+#define ZIP_CMD_NOTDONE 0x0
+
+/* Successful completion. */
+#define ZIP_CMD_SUCCESS 0x1
+
+/* Output truncated */
+#define ZIP_CMD_DTRUNC 0x2
+
+/* Dynamic Stop */
+#define ZIP_CMD_DYNAMIC_STOP 0x3
+
+/* Uncompress ran out of input data when IWORD0[EF] was set */
+#define ZIP_CMD_ITRUNC 0x4
+
+/* Uncompress found the reserved block type 3 */
+#define ZIP_CMD_RBLOCK 0x5
+
+/*
+ * Uncompress found LEN != ZIP_CMD_NLEN in an uncompressed block in the input.
+ */
+#define ZIP_CMD_NLEN 0x6
+
+/* Uncompress found a bad code in the main Huffman codes. */
+#define ZIP_CMD_BADCODE 0x7
+
+/* Uncompress found a bad code in the 19 Huffman codes encoding lengths. */
+#define ZIP_CMD_BADCODE2 0x8
+
+/* Compress found a zero-length input. */
+#define ZIP_CMD_ZERO_LEN 0x9
+
+/* The compress or decompress encountered an internal parity error. */
+#define ZIP_CMD_PARITY 0xA
+
+/*
+ * Uncompress found a string identifier that precedes the uncompressed data and
+ * decompression history.
+ */
+#define ZIP_CMD_FATAL 0xB
+
+/**
+ * enum zip_int_vec_e - ZIP MSI-X Vector Enumeration, enumerates the MSI-X
+ * interrupt vectors.
+ */
+enum zip_int_vec_e {
+ ZIP_INT_VEC_E_ECCE = 0x10,
+ ZIP_INT_VEC_E_FIFE = 0x11,
+ ZIP_INT_VEC_E_QUE0_DONE = 0x0,
+ ZIP_INT_VEC_E_QUE0_ERR = 0x8,
+ ZIP_INT_VEC_E_QUE1_DONE = 0x1,
+ ZIP_INT_VEC_E_QUE1_ERR = 0x9,
+ ZIP_INT_VEC_E_QUE2_DONE = 0x2,
+ ZIP_INT_VEC_E_QUE2_ERR = 0xa,
+ ZIP_INT_VEC_E_QUE3_DONE = 0x3,
+ ZIP_INT_VEC_E_QUE3_ERR = 0xb,
+ ZIP_INT_VEC_E_QUE4_DONE = 0x4,
+ ZIP_INT_VEC_E_QUE4_ERR = 0xc,
+ ZIP_INT_VEC_E_QUE5_DONE = 0x5,
+ ZIP_INT_VEC_E_QUE5_ERR = 0xd,
+ ZIP_INT_VEC_E_QUE6_DONE = 0x6,
+ ZIP_INT_VEC_E_QUE6_ERR = 0xe,
+ ZIP_INT_VEC_E_QUE7_DONE = 0x7,
+ ZIP_INT_VEC_E_QUE7_ERR = 0xf,
+ ZIP_INT_VEC_E_ENUM_LAST = 0x12,
+};
+
+/**
+ * union zip_zptr_addr_s - ZIP Generic Pointer Structure for ADDR.
+ *
+ * It is the generic format of pointers in ZIP_INST_S.
+ */
+union zip_zptr_addr_s {
+ u64 u_reg64;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_49_63 : 15;
+ u64 addr : 49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 addr : 49;
+ u64 reserved_49_63 : 15;
+#endif
+ } s;
+
+};
+
+/**
+ * union zip_zptr_ctl_s - ZIP Generic Pointer Structure for CTL.
+ *
+ * It is the generic format of pointers in ZIP_INST_S.
+ */
+union zip_zptr_ctl_s {
+ u64 u_reg64;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_112_127 : 16;
+ u64 length : 16;
+ u64 reserved_67_95 : 29;
+ u64 fw : 1;
+ u64 nc : 1;
+ u64 data_be : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 data_be : 1;
+ u64 nc : 1;
+ u64 fw : 1;
+ u64 reserved_67_95 : 29;
+ u64 length : 16;
+ u64 reserved_112_127 : 16;
+#endif
+ } s;
+};
+
+/**
+ * union zip_inst_s - ZIP Instruction Structure.
+ * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15 within
+ * the structure).
+ */
+union zip_inst_s {
+ u64 u_reg64[16];
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 doneint : 1;
+ u64 reserved_56_62 : 7;
+ u64 totaloutputlength : 24;
+ u64 reserved_27_31 : 5;
+ u64 exn : 3;
+ u64 reserved_23_23 : 1;
+ u64 exbits : 7;
+ u64 reserved_12_15 : 4;
+ u64 sf : 1;
+ u64 ss : 2;
+ u64 cc : 2;
+ u64 ef : 1;
+ u64 bf : 1;
+ u64 ce : 1;
+ u64 reserved_3_3 : 1;
+ u64 ds : 1;
+ u64 dg : 1;
+ u64 hg : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 hg : 1;
+ u64 dg : 1;
+ u64 ds : 1;
+ u64 reserved_3_3 : 1;
+ u64 ce : 1;
+ u64 bf : 1;
+ u64 ef : 1;
+ u64 cc : 2;
+ u64 ss : 2;
+ u64 sf : 1;
+ u64 reserved_12_15 : 4;
+ u64 exbits : 7;
+ u64 reserved_23_23 : 1;
+ u64 exn : 3;
+ u64 reserved_27_31 : 5;
+ u64 totaloutputlength : 24;
+ u64 reserved_56_62 : 7;
+ u64 doneint : 1;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 historylength : 16;
+ u64 reserved_96_111 : 16;
+ u64 adlercrc32 : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 adlercrc32 : 32;
+ u64 reserved_96_111 : 16;
+ u64 historylength : 16;
+#endif
+ union zip_zptr_addr_s ctx_ptr_addr;
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+ union zip_zptr_addr_s his_ptr_addr;
+ union zip_zptr_ctl_s his_ptr_ctl;
+ union zip_zptr_addr_s inp_ptr_addr;
+ union zip_zptr_ctl_s inp_ptr_ctl;
+ union zip_zptr_addr_s out_ptr_addr;
+ union zip_zptr_ctl_s out_ptr_ctl;
+ union zip_zptr_addr_s res_ptr_addr;
+ union zip_zptr_ctl_s res_ptr_ctl;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_817_831 : 15;
+ u64 wq_ptr : 49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 wq_ptr : 49;
+ u64 reserved_817_831 : 15;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_882_895 : 14;
+ u64 tt : 2;
+ u64 reserved_874_879 : 6;
+ u64 grp : 10;
+ u64 tag : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tag : 32;
+ u64 grp : 10;
+ u64 reserved_874_879 : 6;
+ u64 tt : 2;
+ u64 reserved_882_895 : 14;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_896_959 : 64;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 reserved_896_959 : 64;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_960_1023 : 64;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 reserved_960_1023 : 64;
+#endif
+ } s;
+};
+
+/**
+ * union zip_nptr_s - ZIP Instruction Next-Chunk-Buffer Pointer (NPTR)
+ * Structure
+ *
+ * ZIP_NPTR structure is used to chain all the zip instruction buffers
+ * together. ZIP instruction buffers are managed (allocated and released) by
+ * the software.
+ */
+union zip_nptr_s {
+ u64 u_reg64;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_49_63 : 15;
+ u64 addr : 49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 addr : 49;
+ u64 reserved_49_63 : 15;
+#endif
+ } s;
+};
+
+/**
+ * union zip_zptr_s - ZIP Generic Pointer Structure.
+ *
+ * It is the generic format of pointers in ZIP_INST_S.
+ */
+union zip_zptr_s {
+ u64 u_reg64[2];
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_49_63 : 15;
+ u64 addr : 49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 addr : 49;
+ u64 reserved_49_63 : 15;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_112_127 : 16;
+ u64 length : 16;
+ u64 reserved_67_95 : 29;
+ u64 fw : 1;
+ u64 nc : 1;
+ u64 data_be : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 data_be : 1;
+ u64 nc : 1;
+ u64 fw : 1;
+ u64 reserved_67_95 : 29;
+ u64 length : 16;
+ u64 reserved_112_127 : 16;
+#endif
+ } s;
+};
+
+/**
+ * union zip_zres_s - ZIP Result Structure
+ *
+ * The ZIP coprocessor writes the result structure after it completes the
+ * invocation. The result structure is exactly 24 bytes, and each invocation of
+ * the ZIP coprocessor produces exactly one result structure.
+ */
+union zip_zres_s {
+ u64 u_reg64[3];
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 crc32 : 32;
+ u64 adler32 : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 adler32 : 32;
+ u64 crc32 : 32;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 totalbyteswritten : 32;
+ u64 totalbytesread : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 totalbytesread : 32;
+ u64 totalbyteswritten : 32;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 totalbitsprocessed : 32;
+ u64 doneint : 1;
+ u64 reserved_155_158 : 4;
+ u64 exn : 3;
+ u64 reserved_151_151 : 1;
+ u64 exbits : 7;
+ u64 reserved_137_143 : 7;
+ u64 ef : 1;
+
+ volatile u64 compcode : 8;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+
+ volatile u64 compcode : 8;
+ u64 ef : 1;
+ u64 reserved_137_143 : 7;
+ u64 exbits : 7;
+ u64 reserved_151_151 : 1;
+ u64 exn : 3;
+ u64 reserved_155_158 : 4;
+ u64 doneint : 1;
+ u64 totalbitsprocessed : 32;
+#endif
+ } s;
+};
+
+/**
+ * union zip_cmd_ctl - Structure representing the register that controls
+ * clock and reset.
+ */
+union zip_cmd_ctl {
+ u64 u_reg64;
+ struct zip_cmd_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_2_63 : 62;
+ u64 forceclk : 1;
+ u64 reset : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 reset : 1;
+ u64 forceclk : 1;
+ u64 reserved_2_63 : 62;
+#endif
+ } s;
+};
+
+#define ZIP_CMD_CTL 0x0ull
+
+/**
+ * union zip_constants - Data structure representing the register that contains
+ * all of the current implementation-related parameters of the zip core in this
+ * chip.
+ */
+union zip_constants {
+ u64 u_reg64;
+ struct zip_constants_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 nexec : 8;
+ u64 reserved_49_55 : 7;
+ u64 syncflush_capable : 1;
+ u64 depth : 16;
+ u64 onfsize : 12;
+ u64 ctxsize : 12;
+ u64 reserved_1_7 : 7;
+ u64 disabled : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 disabled : 1;
+ u64 reserved_1_7 : 7;
+ u64 ctxsize : 12;
+ u64 onfsize : 12;
+ u64 depth : 16;
+ u64 syncflush_capable : 1;
+ u64 reserved_49_55 : 7;
+ u64 nexec : 8;
+#endif
+ } s;
+};
+
+#define ZIP_CONSTANTS 0x00A0ull
+
+/**
+ * union zip_corex_bist_status - Represents registers which have the BIST
+ * status of memories in zip cores.
+ *
+ * Each bit is the BIST result of an individual memory
+ * (per bit, 0 = pass and 1 = fail).
+ */
+union zip_corex_bist_status {
+ u64 u_reg64;
+ struct zip_corex_bist_status_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_53_63 : 11;
+ u64 bstatus : 53;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 bstatus : 53;
+ u64 reserved_53_63 : 11;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_COREX_BIST_STATUS(u64 param1)
+{
+ if (((param1 <= 1)))
+ return 0x0520ull + (param1 & 1) * 0x8ull;
+ pr_err("ZIP_COREX_BIST_STATUS: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_ctl_bist_status - Represents register that has the BIST status of
+ * memories in ZIP_CTL (instruction buffer, G/S pointer FIFO, input data
+ * buffer, output data buffers).
+ *
+ * Each bit is the BIST result of an individual memory
+ * (per bit, 0 = pass and 1 = fail).
+ */
+union zip_ctl_bist_status {
+ u64 u_reg64;
+ struct zip_ctl_bist_status_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_9_63 : 55;
+ u64 bstatus : 9;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 bstatus : 9;
+ u64 reserved_9_63 : 55;
+#endif
+ } s;
+};
+
+#define ZIP_CTL_BIST_STATUS 0x0510ull
+
+/**
+ * union zip_ctl_cfg - Represents the register that controls the behavior of
+ * the ZIP DMA engines.
+ *
+ * It is recommended to keep default values for normal operation. Changing the
+ * values of the fields may be useful for diagnostics.
+ */
+union zip_ctl_cfg {
+ u64 u_reg64;
+ struct zip_ctl_cfg_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_52_63 : 12;
+ u64 ildf : 4;
+ u64 reserved_36_47 : 12;
+ u64 drtf : 4;
+ u64 reserved_27_31 : 5;
+ u64 stcf : 3;
+ u64 reserved_19_23 : 5;
+ u64 ldf : 3;
+ u64 reserved_2_15 : 14;
+ u64 busy : 1;
+ u64 reserved_0_0 : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 reserved_0_0 : 1;
+ u64 busy : 1;
+ u64 reserved_2_15 : 14;
+ u64 ldf : 3;
+ u64 reserved_19_23 : 5;
+ u64 stcf : 3;
+ u64 reserved_27_31 : 5;
+ u64 drtf : 4;
+ u64 reserved_36_47 : 12;
+ u64 ildf : 4;
+ u64 reserved_52_63 : 12;
+#endif
+ } s;
+};
+
+#define ZIP_CTL_CFG 0x0560ull
+
+/**
+ * union zip_dbg_corex_inst - Represents the registers that reflect the status
+ * of the current instruction that the ZIP core is executing or has executed.
+ *
+ * These registers are only for debug use.
+ */
+union zip_dbg_corex_inst {
+ u64 u_reg64;
+ struct zip_dbg_corex_inst_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 busy : 1;
+ u64 reserved_35_62 : 28;
+ u64 qid : 3;
+ u64 iid : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 iid : 32;
+ u64 qid : 3;
+ u64 reserved_35_62 : 28;
+ u64 busy : 1;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_DBG_COREX_INST(u64 param1)
+{
+ if (((param1 <= 1)))
+ return 0x0640ull + (param1 & 1) * 0x8ull;
+ pr_err("ZIP_DBG_COREX_INST: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_dbg_corex_sta - Represents registers that reflect the status of
+ * the zip cores.
+ *
+ * They are for debug use only.
+ */
+union zip_dbg_corex_sta {
+ u64 u_reg64;
+ struct zip_dbg_corex_sta_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 busy : 1;
+ u64 reserved_37_62 : 26;
+ u64 ist : 5;
+ u64 nie : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 nie : 32;
+ u64 ist : 5;
+ u64 reserved_37_62 : 26;
+ u64 busy : 1;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_DBG_COREX_STA(u64 param1)
+{
+ if (((param1 <= 1)))
+ return 0x0680ull + (param1 & 1) * 0x8ull;
+ pr_err("ZIP_DBG_COREX_STA: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_dbg_quex_sta - Represets registers that reflect status of the zip
+ * instruction queues.
+ *
+ * They are for debug use only.
+ */
+union zip_dbg_quex_sta {
+ u64 u_reg64;
+ struct zip_dbg_quex_sta_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 busy : 1;
+ u64 reserved_56_62 : 7;
+ u64 rqwc : 24;
+ u64 nii : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 nii : 32;
+ u64 rqwc : 24;
+ u64 reserved_56_62 : 7;
+ u64 busy : 1;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_DBG_QUEX_STA(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x1800ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_DBG_QUEX_STA: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_ecc_ctl - Represents the register that enables ECC for each
+ * individual internal memory that requires ECC.
+ *
+ * For debug purpose, it can also flip one or two bits in the ECC data.
+ */
+union zip_ecc_ctl {
+ u64 u_reg64;
+ struct zip_ecc_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_19_63 : 45;
+ u64 vmem_cdis : 1;
+ u64 vmem_fs : 2;
+ u64 reserved_15_15 : 1;
+ u64 idf1_cdis : 1;
+ u64 idf1_fs : 2;
+ u64 reserved_11_11 : 1;
+ u64 idf0_cdis : 1;
+ u64 idf0_fs : 2;
+ u64 reserved_7_7 : 1;
+ u64 gspf_cdis : 1;
+ u64 gspf_fs : 2;
+ u64 reserved_3_3 : 1;
+ u64 iqf_cdis : 1;
+ u64 iqf_fs : 2;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 iqf_fs : 2;
+ u64 iqf_cdis : 1;
+ u64 reserved_3_3 : 1;
+ u64 gspf_fs : 2;
+ u64 gspf_cdis : 1;
+ u64 reserved_7_7 : 1;
+ u64 idf0_fs : 2;
+ u64 idf0_cdis : 1;
+ u64 reserved_11_11 : 1;
+ u64 idf1_fs : 2;
+ u64 idf1_cdis : 1;
+ u64 reserved_15_15 : 1;
+ u64 vmem_fs : 2;
+ u64 vmem_cdis : 1;
+ u64 reserved_19_63 : 45;
+#endif
+ } s;
+};
+
+#define ZIP_ECC_CTL 0x0568ull
+
+/* NCB - zip_ecce_ena_w1c */
+union zip_ecce_ena_w1c {
+ u64 u_reg64;
+ struct zip_ecce_ena_w1c_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_37_63 : 27;
+ u64 dbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 sbe : 5;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 sbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 dbe : 5;
+ u64 reserved_37_63 : 27;
+#endif
+ } s;
+};
+
+#define ZIP_ECCE_ENA_W1C 0x0598ull
+
+/* NCB - zip_ecce_ena_w1s */
+union zip_ecce_ena_w1s {
+ u64 u_reg64;
+ struct zip_ecce_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_37_63 : 27;
+ u64 dbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 sbe : 5;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 sbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 dbe : 5;
+ u64 reserved_37_63 : 27;
+#endif
+ } s;
+};
+
+#define ZIP_ECCE_ENA_W1S 0x0590ull
+
+/**
+ * union zip_ecce_int - Represents the register that contains the status of the
+ * ECC interrupt sources.
+ */
+union zip_ecce_int {
+ u64 u_reg64;
+ struct zip_ecce_int_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_37_63 : 27;
+ u64 dbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 sbe : 5;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 sbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 dbe : 5;
+ u64 reserved_37_63 : 27;
+#endif
+ } s;
+};
+
+#define ZIP_ECCE_INT 0x0580ull
+
+/* NCB - zip_ecce_int_w1s */
+union zip_ecce_int_w1s {
+ u64 u_reg64;
+ struct zip_ecce_int_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_37_63 : 27;
+ u64 dbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 sbe : 5;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 sbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 dbe : 5;
+ u64 reserved_37_63 : 27;
+#endif
+ } s;
+};
+
+#define ZIP_ECCE_INT_W1S 0x0588ull
+
+/* NCB - zip_fife_ena_w1c */
+union zip_fife_ena_w1c {
+ u64 u_reg64;
+ struct zip_fife_ena_w1c_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_42_63 : 22;
+ u64 asserts : 42;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 asserts : 42;
+ u64 reserved_42_63 : 22;
+#endif
+ } s;
+};
+
+#define ZIP_FIFE_ENA_W1C 0x0090ull
+
+/* NCB - zip_fife_ena_w1s */
+union zip_fife_ena_w1s {
+ u64 u_reg64;
+ struct zip_fife_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_42_63 : 22;
+ u64 asserts : 42;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 asserts : 42;
+ u64 reserved_42_63 : 22;
+#endif
+ } s;
+};
+
+#define ZIP_FIFE_ENA_W1S 0x0088ull
+
+/* NCB - zip_fife_int */
+union zip_fife_int {
+ u64 u_reg64;
+ struct zip_fife_int_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_42_63 : 22;
+ u64 asserts : 42;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 asserts : 42;
+ u64 reserved_42_63 : 22;
+#endif
+ } s;
+};
+
+#define ZIP_FIFE_INT 0x0078ull
+
+/* NCB - zip_fife_int_w1s */
+union zip_fife_int_w1s {
+ u64 u_reg64;
+ struct zip_fife_int_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_42_63 : 22;
+ u64 asserts : 42;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 asserts : 42;
+ u64 reserved_42_63 : 22;
+#endif
+ } s;
+};
+
+#define ZIP_FIFE_INT_W1S 0x0080ull
+
+/**
+ * union zip_msix_pbax - Represents the register that is the MSI-X PBA table
+ *
+ * The bit number is indexed by the ZIP_INT_VEC_E enumeration.
+ */
+union zip_msix_pbax {
+ u64 u_reg64;
+ struct zip_msix_pbax_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 pend : 64;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 pend : 64;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_MSIX_PBAX(u64 param1)
+{
+ if (((param1 == 0)))
+ return 0x0000838000FF0000ull;
+ pr_err("ZIP_MSIX_PBAX: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_msix_vecx_addr - Represents the register that is the MSI-X vector
+ * table, indexed by the ZIP_INT_VEC_E enumeration.
+ */
+union zip_msix_vecx_addr {
+ u64 u_reg64;
+ struct zip_msix_vecx_addr_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_49_63 : 15;
+ u64 addr : 47;
+ u64 reserved_1_1 : 1;
+ u64 secvec : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 secvec : 1;
+ u64 reserved_1_1 : 1;
+ u64 addr : 47;
+ u64 reserved_49_63 : 15;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_MSIX_VECX_ADDR(u64 param1)
+{
+ if (((param1 <= 17)))
+ return 0x0000838000F00000ull + (param1 & 31) * 0x10ull;
+ pr_err("ZIP_MSIX_VECX_ADDR: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_msix_vecx_ctl - Represents the register that is the MSI-X vector
+ * table, indexed by the ZIP_INT_VEC_E enumeration.
+ */
+union zip_msix_vecx_ctl {
+ u64 u_reg64;
+ struct zip_msix_vecx_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_33_63 : 31;
+ u64 mask : 1;
+ u64 reserved_20_31 : 12;
+ u64 data : 20;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 data : 20;
+ u64 reserved_20_31 : 12;
+ u64 mask : 1;
+ u64 reserved_33_63 : 31;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_MSIX_VECX_CTL(u64 param1)
+{
+ if (((param1 <= 17)))
+ return 0x0000838000F00008ull + (param1 & 31) * 0x10ull;
+ pr_err("ZIP_MSIX_VECX_CTL: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_done - Represents the registers that contain the per-queue
+ * instruction done count.
+ */
+union zip_quex_done {
+ u64 u_reg64;
+ struct zip_quex_done_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_20_63 : 44;
+ u64 done : 20;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 done : 20;
+ u64 reserved_20_63 : 44;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_DONE(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x2000ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_DONE: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_done_ack - Represents the registers on write to which will
+ * decrement the per-queue instructiona done count.
+ */
+union zip_quex_done_ack {
+ u64 u_reg64;
+ struct zip_quex_done_ack_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_20_63 : 44;
+ u64 done_ack : 20;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 done_ack : 20;
+ u64 reserved_20_63 : 44;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_DONE_ACK(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x2200ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_DONE_ACK: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_done_ena_w1c - Represents the register which when written
+ * 1 to will disable the DONEINT interrupt for the queue.
+ */
+union zip_quex_done_ena_w1c {
+ u64 u_reg64;
+ struct zip_quex_done_ena_w1c_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_1_63 : 63;
+ u64 done_ena : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 done_ena : 1;
+ u64 reserved_1_63 : 63;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_DONE_ENA_W1C(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x2600ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_DONE_ENA_W1C: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_done_ena_w1s - Represents the register that when written 1 to
+ * will enable the DONEINT interrupt for the queue.
+ */
+union zip_quex_done_ena_w1s {
+ u64 u_reg64;
+ struct zip_quex_done_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_1_63 : 63;
+ u64 done_ena : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 done_ena : 1;
+ u64 reserved_1_63 : 63;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_DONE_ENA_W1S(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x2400ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_DONE_ENA_W1S: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_done_wait - Represents the register that specifies the per
+ * queue interrupt coalescing settings.
+ */
+union zip_quex_done_wait {
+ u64 u_reg64;
+ struct zip_quex_done_wait_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_48_63 : 16;
+ u64 time_wait : 16;
+ u64 reserved_20_31 : 12;
+ u64 num_wait : 20;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 num_wait : 20;
+ u64 reserved_20_31 : 12;
+ u64 time_wait : 16;
+ u64 reserved_48_63 : 16;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_DONE_WAIT(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x2800ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_DONE_WAIT: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_doorbell - Represents doorbell registers for the ZIP
+ * instruction queues.
+ */
+union zip_quex_doorbell {
+ u64 u_reg64;
+ struct zip_quex_doorbell_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_20_63 : 44;
+ u64 dbell_cnt : 20;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 dbell_cnt : 20;
+ u64 reserved_20_63 : 44;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_DOORBELL(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x4000ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_DOORBELL: %llu\n", param1);
+ return 0;
+}
+
+union zip_quex_err_ena_w1c {
+ u64 u_reg64;
+ struct zip_quex_err_ena_w1c_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_5_63 : 59;
+ u64 mdbe : 1;
+ u64 nwrp : 1;
+ u64 nrrp : 1;
+ u64 irde : 1;
+ u64 dovf : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 dovf : 1;
+ u64 irde : 1;
+ u64 nrrp : 1;
+ u64 nwrp : 1;
+ u64 mdbe : 1;
+ u64 reserved_5_63 : 59;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_ERR_ENA_W1C(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x3600ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_ERR_ENA_W1C: %llu\n", param1);
+ return 0;
+}
+
+union zip_quex_err_ena_w1s {
+ u64 u_reg64;
+ struct zip_quex_err_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_5_63 : 59;
+ u64 mdbe : 1;
+ u64 nwrp : 1;
+ u64 nrrp : 1;
+ u64 irde : 1;
+ u64 dovf : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 dovf : 1;
+ u64 irde : 1;
+ u64 nrrp : 1;
+ u64 nwrp : 1;
+ u64 mdbe : 1;
+ u64 reserved_5_63 : 59;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_ERR_ENA_W1S(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x3400ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_ERR_ENA_W1S: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_err_int - Represents registers that contain the per-queue
+ * error interrupts.
+ */
+union zip_quex_err_int {
+ u64 u_reg64;
+ struct zip_quex_err_int_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_5_63 : 59;
+ u64 mdbe : 1;
+ u64 nwrp : 1;
+ u64 nrrp : 1;
+ u64 irde : 1;
+ u64 dovf : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 dovf : 1;
+ u64 irde : 1;
+ u64 nrrp : 1;
+ u64 nwrp : 1;
+ u64 mdbe : 1;
+ u64 reserved_5_63 : 59;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_ERR_INT(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x3000ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_ERR_INT: %llu\n", param1);
+ return 0;
+}
+
+/* NCB - zip_que#_err_int_w1s */
+union zip_quex_err_int_w1s {
+ u64 u_reg64;
+ struct zip_quex_err_int_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_5_63 : 59;
+ u64 mdbe : 1;
+ u64 nwrp : 1;
+ u64 nrrp : 1;
+ u64 irde : 1;
+ u64 dovf : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 dovf : 1;
+ u64 irde : 1;
+ u64 nrrp : 1;
+ u64 nwrp : 1;
+ u64 mdbe : 1;
+ u64 reserved_5_63 : 59;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_ERR_INT_W1S(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x3200ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_ERR_INT_W1S: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_gcfg - Represents the registers that reflect status of the
+ * zip instruction queues,debug use only.
+ */
+union zip_quex_gcfg {
+ u64 u_reg64;
+ struct zip_quex_gcfg_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_4_63 : 60;
+ u64 iqb_ldwb : 1;
+ u64 cbw_sty : 1;
+ u64 l2ld_cmd : 2;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 l2ld_cmd : 2;
+ u64 cbw_sty : 1;
+ u64 iqb_ldwb : 1;
+ u64 reserved_4_63 : 60;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_GCFG(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x1A00ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_GCFG: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_map - Represents the registers that control how each
+ * instruction queue maps to zip cores.
+ */
+union zip_quex_map {
+ u64 u_reg64;
+ struct zip_quex_map_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_2_63 : 62;
+ u64 zce : 2;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 zce : 2;
+ u64 reserved_2_63 : 62;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_MAP(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x1400ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_MAP: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_sbuf_addr - Represents the registers that set the buffer
+ * parameters for the instruction queues.
+ *
+ * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
+ * this register to effectively reset the command buffer state machine.
+ * These registers must be programmed after SW programs the corresponding
+ * ZIP_QUE(0..7)_SBUF_CTL.
+ */
+union zip_quex_sbuf_addr {
+ u64 u_reg64;
+ struct zip_quex_sbuf_addr_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_49_63 : 15;
+ u64 ptr : 42;
+ u64 off : 7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 off : 7;
+ u64 ptr : 42;
+ u64 reserved_49_63 : 15;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_SBUF_ADDR(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x1000ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_SBUF_ADDR: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_sbuf_ctl - Represents the registers that set the buffer
+ * parameters for the instruction queues.
+ *
+ * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
+ * this register to effectively reset the command buffer state machine.
+ * These registers must be programmed before SW programs the corresponding
+ * ZIP_QUE(0..7)_SBUF_ADDR.
+ */
+union zip_quex_sbuf_ctl {
+ u64 u_reg64;
+ struct zip_quex_sbuf_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_45_63 : 19;
+ u64 size : 13;
+ u64 inst_be : 1;
+ u64 reserved_24_30 : 7;
+ u64 stream_id : 8;
+ u64 reserved_12_15 : 4;
+ u64 aura : 12;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 aura : 12;
+ u64 reserved_12_15 : 4;
+ u64 stream_id : 8;
+ u64 reserved_24_30 : 7;
+ u64 inst_be : 1;
+ u64 size : 13;
+ u64 reserved_45_63 : 19;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_SBUF_CTL(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x1200ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_SBUF_CTL: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_que_ena - Represents queue enable register
+ *
+ * If a queue is disabled, ZIP_CTL stops fetching instructions from the queue.
+ */
+union zip_que_ena {
+ u64 u_reg64;
+ struct zip_que_ena_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_8_63 : 56;
+ u64 ena : 8;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 ena : 8;
+ u64 reserved_8_63 : 56;
+#endif
+ } s;
+};
+
+#define ZIP_QUE_ENA 0x0500ull
+
+/**
+ * union zip_que_pri - Represents the register that defines the priority
+ * between instruction queues.
+ */
+union zip_que_pri {
+ u64 u_reg64;
+ struct zip_que_pri_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_8_63 : 56;
+ u64 pri : 8;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 pri : 8;
+ u64 reserved_8_63 : 56;
+#endif
+ } s;
+};
+
+#define ZIP_QUE_PRI 0x0508ull
+
+/**
+ * union zip_throttle - Represents the register that controls the maximum
+ * number of in-flight X2I data fetch transactions.
+ *
+ * Writing 0 to this register causes the ZIP module to temporarily suspend NCB
+ * accesses; it is not recommended for normal operation, but may be useful for
+ * diagnostics.
+ */
+union zip_throttle {
+ u64 u_reg64;
+ struct zip_throttle_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_6_63 : 58;
+ u64 ld_infl : 6;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 ld_infl : 6;
+ u64 reserved_6_63 : 58;
+#endif
+ } s;
+};
+
+#define ZIP_THROTTLE 0x0010ull
+
+#endif /* _CSRS_ZIP__ */
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 346ceb8f17bd..60919a3ec53b 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -12,4 +12,6 @@ ccp-crypto-objs := ccp-crypto-main.o \
ccp-crypto-aes.o \
ccp-crypto-aes-cmac.o \
ccp-crypto-aes-xts.o \
+ ccp-crypto-aes-galois.o \
+ ccp-crypto-des3.o \
ccp-crypto-sha.o
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
new file mode 100644
index 000000000000..38ee6f348ea9
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -0,0 +1,252 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) AES GCM crypto API support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/internal/aead.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/scatterwalk.h>
+#include <linux/delay.h>
+
+#include "ccp-crypto.h"
+
+#define AES_GCM_IVSIZE 12
+
+static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret)
+{
+ return ret;
+}
+
+static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ ctx->u.aes.type = CCP_AES_TYPE_128;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->u.aes.type = CCP_AES_TYPE_192;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->u.aes.type = CCP_AES_TYPE_256;
+ break;
+ default:
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->u.aes.mode = CCP_AES_MODE_GCM;
+ ctx->u.aes.key_len = key_len;
+
+ memcpy(ctx->u.aes.key, key, key_len);
+ sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
+
+ return 0;
+}
+
+static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ return 0;
+}
+
+static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
+ struct ccp_aes_req_ctx *rctx = aead_request_ctx(req);
+ struct scatterlist *iv_sg = NULL;
+ unsigned int iv_len = 0;
+ int i;
+ int ret = 0;
+
+ if (!ctx->u.aes.key_len)
+ return -EINVAL;
+
+ if (ctx->u.aes.mode != CCP_AES_MODE_GCM)
+ return -EINVAL;
+
+ if (!req->iv)
+ return -EINVAL;
+
+ /*
+ * 5 parts:
+ * plaintext/ciphertext input
+ * AAD
+ * key
+ * IV
+ * Destination+tag buffer
+ */
+
+ /* Prepare the IV: 12 bytes + an integer (counter) */
+ memcpy(rctx->iv, req->iv, AES_GCM_IVSIZE);
+ for (i = 0; i < 3; i++)
+ rctx->iv[i + AES_GCM_IVSIZE] = 0;
+ rctx->iv[AES_BLOCK_SIZE - 1] = 1;
+
+ /* Set up a scatterlist for the IV */
+ iv_sg = &rctx->iv_sg;
+ iv_len = AES_BLOCK_SIZE;
+ sg_init_one(iv_sg, rctx->iv, iv_len);
+
+ /* The AAD + plaintext are concatenated in the src buffer */
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_AES;
+ rctx->cmd.u.aes.type = ctx->u.aes.type;
+ rctx->cmd.u.aes.mode = ctx->u.aes.mode;
+ rctx->cmd.u.aes.action = encrypt;
+ rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
+ rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
+ rctx->cmd.u.aes.iv = iv_sg;
+ rctx->cmd.u.aes.iv_len = iv_len;
+ rctx->cmd.u.aes.src = req->src;
+ rctx->cmd.u.aes.src_len = req->cryptlen;
+ rctx->cmd.u.aes.aad_len = req->assoclen;
+
+ /* The cipher text + the tag are in the dst buffer */
+ rctx->cmd.u.aes.dst = req->dst;
+
+ ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+ return ret;
+}
+
+static int ccp_aes_gcm_encrypt(struct aead_request *req)
+{
+ return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_ENCRYPT);
+}
+
+static int ccp_aes_gcm_decrypt(struct aead_request *req)
+{
+ return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_DECRYPT);
+}
+
+static int ccp_aes_gcm_cra_init(struct crypto_aead *tfm)
+{
+ struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->complete = ccp_aes_gcm_complete;
+ ctx->u.aes.key_len = 0;
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
+
+ return 0;
+}
+
+static void ccp_aes_gcm_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static struct aead_alg ccp_aes_gcm_defaults = {
+ .setkey = ccp_aes_gcm_setkey,
+ .setauthsize = ccp_aes_gcm_setauthsize,
+ .encrypt = ccp_aes_gcm_encrypt,
+ .decrypt = ccp_aes_gcm_decrypt,
+ .init = ccp_aes_gcm_cra_init,
+ .ivsize = AES_GCM_IVSIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ccp_ctx),
+ .cra_priority = CCP_CRA_PRIORITY,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_exit = ccp_aes_gcm_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+};
+
+struct ccp_aes_aead_def {
+ enum ccp_aes_mode mode;
+ unsigned int version;
+ const char *name;
+ const char *driver_name;
+ unsigned int blocksize;
+ unsigned int ivsize;
+ struct aead_alg *alg_defaults;
+};
+
+static struct ccp_aes_aead_def aes_aead_algs[] = {
+ {
+ .mode = CCP_AES_MODE_GHASH,
+ .version = CCP_VERSION(5, 0),
+ .name = "gcm(aes)",
+ .driver_name = "gcm-aes-ccp",
+ .blocksize = 1,
+ .ivsize = AES_BLOCK_SIZE,
+ .alg_defaults = &ccp_aes_gcm_defaults,
+ },
+};
+
+static int ccp_register_aes_aead(struct list_head *head,
+ const struct ccp_aes_aead_def *def)
+{
+ struct ccp_crypto_aead *ccp_aead;
+ struct aead_alg *alg;
+ int ret;
+
+ ccp_aead = kzalloc(sizeof(*ccp_aead), GFP_KERNEL);
+ if (!ccp_aead)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ccp_aead->entry);
+
+ ccp_aead->mode = def->mode;
+
+ /* Copy the defaults and override as necessary */
+ alg = &ccp_aead->alg;
+ *alg = *def->alg_defaults;
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->driver_name);
+ alg->base.cra_blocksize = def->blocksize;
+ alg->base.cra_ablkcipher.ivsize = def->ivsize;
+
+ ret = crypto_register_aead(alg);
+ if (ret) {
+ pr_err("%s ablkcipher algorithm registration error (%d)\n",
+ alg->base.cra_name, ret);
+ kfree(ccp_aead);
+ return ret;
+ }
+
+ list_add(&ccp_aead->entry, head);
+
+ return 0;
+}
+
+int ccp_register_aes_aeads(struct list_head *head)
+{
+ int i, ret;
+ unsigned int ccpversion = ccp_version();
+
+ for (i = 0; i < ARRAY_SIZE(aes_aead_algs); i++) {
+ if (aes_aead_algs[i].version > ccpversion)
+ continue;
+ ret = ccp_register_aes_aead(head, &aes_aead_algs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
new file mode 100644
index 000000000000..5af7347ae03c
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -0,0 +1,254 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) DES3 crypto API support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <ghook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/des.h>
+
+#include "ccp-crypto.h"
+
+static int ccp_des3_complete(struct crypto_async_request *async_req, int ret)
+{
+ struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
+
+ if (ret)
+ return ret;
+
+ if (ctx->u.des3.mode != CCP_DES3_MODE_ECB)
+ memcpy(req->info, rctx->iv, DES3_EDE_BLOCK_SIZE);
+
+ return 0;
+}
+
+static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+ struct ccp_crypto_ablkcipher_alg *alg =
+ ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
+ u32 *flags = &tfm->base.crt_flags;
+
+
+ /* From des_generic.c:
+ *
+ * RFC2451:
+ * If the first two or last two independent 64-bit keys are
+ * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
+ * same as DES. Implementers MUST reject keys that exhibit this
+ * property.
+ */
+ const u32 *K = (const u32 *)key;
+
+ if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+ !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+ (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ /* It's not clear that there is any support for a keysize of 112.
+ * If needed, the caller should make K1 == K3
+ */
+ ctx->u.des3.type = CCP_DES3_TYPE_168;
+ ctx->u.des3.mode = alg->mode;
+ ctx->u.des3.key_len = key_len;
+
+ memcpy(ctx->u.des3.key, key, key_len);
+ sg_init_one(&ctx->u.des3.key_sg, ctx->u.des3.key, key_len);
+
+ return 0;
+}
+
+static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct scatterlist *iv_sg = NULL;
+ unsigned int iv_len = 0;
+ int ret;
+
+ if (!ctx->u.des3.key_len)
+ return -EINVAL;
+
+ if (((ctx->u.des3.mode == CCP_DES3_MODE_ECB) ||
+ (ctx->u.des3.mode == CCP_DES3_MODE_CBC)) &&
+ (req->nbytes & (DES3_EDE_BLOCK_SIZE - 1)))
+ return -EINVAL;
+
+ if (ctx->u.des3.mode != CCP_DES3_MODE_ECB) {
+ if (!req->info)
+ return -EINVAL;
+
+ memcpy(rctx->iv, req->info, DES3_EDE_BLOCK_SIZE);
+ iv_sg = &rctx->iv_sg;
+ iv_len = DES3_EDE_BLOCK_SIZE;
+ sg_init_one(iv_sg, rctx->iv, iv_len);
+ }
+
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_DES3;
+ rctx->cmd.u.des3.type = ctx->u.des3.type;
+ rctx->cmd.u.des3.mode = ctx->u.des3.mode;
+ rctx->cmd.u.des3.action = (encrypt)
+ ? CCP_DES3_ACTION_ENCRYPT
+ : CCP_DES3_ACTION_DECRYPT;
+ rctx->cmd.u.des3.key = &ctx->u.des3.key_sg;
+ rctx->cmd.u.des3.key_len = ctx->u.des3.key_len;
+ rctx->cmd.u.des3.iv = iv_sg;
+ rctx->cmd.u.des3.iv_len = iv_len;
+ rctx->cmd.u.des3.src = req->src;
+ rctx->cmd.u.des3.src_len = req->nbytes;
+ rctx->cmd.u.des3.dst = req->dst;
+
+ ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+ return ret;
+}
+
+static int ccp_des3_encrypt(struct ablkcipher_request *req)
+{
+ return ccp_des3_crypt(req, true);
+}
+
+static int ccp_des3_decrypt(struct ablkcipher_request *req)
+{
+ return ccp_des3_crypt(req, false);
+}
+
+static int ccp_des3_cra_init(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->complete = ccp_des3_complete;
+ ctx->u.des3.key_len = 0;
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_des3_req_ctx);
+
+ return 0;
+}
+
+static void ccp_des3_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static struct crypto_alg ccp_des3_defaults = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ccp_ctx),
+ .cra_priority = CCP_CRA_PRIORITY,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = ccp_des3_cra_init,
+ .cra_exit = ccp_des3_cra_exit,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = ccp_des3_setkey,
+ .encrypt = ccp_des3_encrypt,
+ .decrypt = ccp_des3_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ },
+};
+
+struct ccp_des3_def {
+ enum ccp_des3_mode mode;
+ unsigned int version;
+ const char *name;
+ const char *driver_name;
+ unsigned int blocksize;
+ unsigned int ivsize;
+ struct crypto_alg *alg_defaults;
+};
+
+static struct ccp_des3_def des3_algs[] = {
+ {
+ .mode = CCP_DES3_MODE_ECB,
+ .version = CCP_VERSION(5, 0),
+ .name = "ecb(des3_ede)",
+ .driver_name = "ecb-des3-ccp",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .ivsize = 0,
+ .alg_defaults = &ccp_des3_defaults,
+ },
+ {
+ .mode = CCP_DES3_MODE_CBC,
+ .version = CCP_VERSION(5, 0),
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-des3-ccp",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .alg_defaults = &ccp_des3_defaults,
+ },
+};
+
+static int ccp_register_des3_alg(struct list_head *head,
+ const struct ccp_des3_def *def)
+{
+ struct ccp_crypto_ablkcipher_alg *ccp_alg;
+ struct crypto_alg *alg;
+ int ret;
+
+ ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+ if (!ccp_alg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ccp_alg->entry);
+
+ ccp_alg->mode = def->mode;
+
+ /* Copy the defaults and override as necessary */
+ alg = &ccp_alg->alg;
+ *alg = *def->alg_defaults;
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->driver_name);
+ alg->cra_blocksize = def->blocksize;
+ alg->cra_ablkcipher.ivsize = def->ivsize;
+
+ ret = crypto_register_alg(alg);
+ if (ret) {
+ pr_err("%s ablkcipher algorithm registration error (%d)\n",
+ alg->cra_name, ret);
+ kfree(ccp_alg);
+ return ret;
+ }
+
+ list_add(&ccp_alg->entry, head);
+
+ return 0;
+}
+
+int ccp_register_des3_algs(struct list_head *head)
+{
+ int i, ret;
+ unsigned int ccpversion = ccp_version();
+
+ for (i = 0; i < ARRAY_SIZE(des3_algs); i++) {
+ if (des3_algs[i].version > ccpversion)
+ continue;
+ ret = ccp_register_des3_alg(head, &des3_algs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index e0380e59c361..8dccbddabef1 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -33,9 +33,14 @@ static unsigned int sha_disable;
module_param(sha_disable, uint, 0444);
MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
+static unsigned int des3_disable;
+module_param(des3_disable, uint, 0444);
+MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
+
/* List heads for the supported algorithms */
static LIST_HEAD(hash_algs);
static LIST_HEAD(cipher_algs);
+static LIST_HEAD(aead_algs);
/* For any tfm, requests for that tfm must be returned on the order
* received. With multiple queues available, the CCP can process more
@@ -335,6 +340,16 @@ static int ccp_register_algs(void)
ret = ccp_register_aes_xts_algs(&cipher_algs);
if (ret)
return ret;
+
+ ret = ccp_register_aes_aeads(&aead_algs);
+ if (ret)
+ return ret;
+ }
+
+ if (!des3_disable) {
+ ret = ccp_register_des3_algs(&cipher_algs);
+ if (ret)
+ return ret;
}
if (!sha_disable) {
@@ -350,6 +365,7 @@ static void ccp_unregister_algs(void)
{
struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
+ struct ccp_crypto_aead *aead_alg, *aead_tmp;
list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
crypto_unregister_ahash(&ahash_alg->alg);
@@ -362,6 +378,12 @@ static void ccp_unregister_algs(void)
list_del(&ablk_alg->entry);
kfree(ablk_alg);
}
+
+ list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) {
+ crypto_unregister_aead(&aead_alg->alg);
+ list_del(&aead_alg->entry);
+ kfree(aead_alg);
+ }
}
static int ccp_crypto_init(void)
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 84a652be4274..6b46eea94932 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -146,6 +146,12 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
case CCP_SHA_TYPE_256:
rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE;
break;
+ case CCP_SHA_TYPE_384:
+ rctx->cmd.u.sha.ctx_len = SHA384_DIGEST_SIZE;
+ break;
+ case CCP_SHA_TYPE_512:
+ rctx->cmd.u.sha.ctx_len = SHA512_DIGEST_SIZE;
+ break;
default:
/* Should never get here */
break;
@@ -393,6 +399,22 @@ static struct ccp_sha_def sha_algs[] = {
.digest_size = SHA256_DIGEST_SIZE,
.block_size = SHA256_BLOCK_SIZE,
},
+ {
+ .version = CCP_VERSION(5, 0),
+ .name = "sha384",
+ .drv_name = "sha384-ccp",
+ .type = CCP_SHA_TYPE_384,
+ .digest_size = SHA384_DIGEST_SIZE,
+ .block_size = SHA384_BLOCK_SIZE,
+ },
+ {
+ .version = CCP_VERSION(5, 0),
+ .name = "sha512",
+ .drv_name = "sha512-ccp",
+ .type = CCP_SHA_TYPE_512,
+ .digest_size = SHA512_DIGEST_SIZE,
+ .block_size = SHA512_BLOCK_SIZE,
+ },
};
static int ccp_register_hmac_alg(struct list_head *head,
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 8335b32e815e..dd5bf15f06e5 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -19,10 +19,14 @@
#include <linux/ccp.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/internal/aead.h>
+#include <crypto/aead.h>
#include <crypto/ctr.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
+#define CCP_LOG_LEVEL KERN_INFO
+
#define CCP_CRA_PRIORITY 300
struct ccp_crypto_ablkcipher_alg {
@@ -33,6 +37,14 @@ struct ccp_crypto_ablkcipher_alg {
struct crypto_alg alg;
};
+struct ccp_crypto_aead {
+ struct list_head entry;
+
+ u32 mode;
+
+ struct aead_alg alg;
+};
+
struct ccp_crypto_ahash_alg {
struct list_head entry;
@@ -95,6 +107,9 @@ struct ccp_aes_req_ctx {
struct scatterlist iv_sg;
u8 iv[AES_BLOCK_SIZE];
+ struct scatterlist tag_sg;
+ u8 tag[AES_BLOCK_SIZE];
+
/* Fields used for RFC3686 requests */
u8 *rfc3686_info;
u8 rfc3686_iv[AES_BLOCK_SIZE];
@@ -137,9 +152,29 @@ struct ccp_aes_cmac_exp_ctx {
u8 buf[AES_BLOCK_SIZE];
};
-/***** SHA related defines *****/
-#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
-#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+/***** 3DES related defines *****/
+struct ccp_des3_ctx {
+ enum ccp_engine engine;
+ enum ccp_des3_type type;
+ enum ccp_des3_mode mode;
+
+ struct scatterlist key_sg;
+ unsigned int key_len;
+ u8 key[AES_MAX_KEY_SIZE];
+};
+
+struct ccp_des3_req_ctx {
+ struct scatterlist iv_sg;
+ u8 iv[AES_BLOCK_SIZE];
+
+ struct ccp_cmd cmd;
+};
+
+/* SHA-related defines
+ * These values must be large enough to accommodate any variant
+ */
+#define MAX_SHA_CONTEXT_SIZE SHA512_DIGEST_SIZE
+#define MAX_SHA_BLOCK_SIZE SHA512_BLOCK_SIZE
struct ccp_sha_ctx {
struct scatterlist opad_sg;
@@ -199,6 +234,7 @@ struct ccp_ctx {
union {
struct ccp_aes_ctx aes;
struct ccp_sha_ctx sha;
+ struct ccp_des3_ctx des3;
} u;
};
@@ -210,6 +246,8 @@ struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
int ccp_register_aes_algs(struct list_head *head);
int ccp_register_aes_cmac_algs(struct list_head *head);
int ccp_register_aes_xts_algs(struct list_head *head);
+int ccp_register_aes_aeads(struct list_head *head);
int ccp_register_sha_algs(struct list_head *head);
+int ccp_register_des3_algs(struct list_head *head);
#endif
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 7bc09989e18a..367c2e30656f 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -315,17 +315,73 @@ static int ccp_perform_ecc(struct ccp_op *op)
return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
}
+static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
+{
+ iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+}
+
+static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
+{
+ iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
+}
+
+static void ccp_irq_bh(unsigned long data)
+{
+ struct ccp_device *ccp = (struct ccp_device *)data;
+ struct ccp_cmd_queue *cmd_q;
+ u32 q_int, status;
+ unsigned int i;
+
+ status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ q_int = status & (cmd_q->int_ok | cmd_q->int_err);
+ if (q_int) {
+ cmd_q->int_status = status;
+ cmd_q->q_status = ioread32(cmd_q->reg_status);
+ cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+
+ /* On error, only save the first error value */
+ if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
+ cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+ cmd_q->int_rcvd = 1;
+
+ /* Acknowledge the interrupt and wake the kthread */
+ iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
+ wake_up_interruptible(&cmd_q->int_queue);
+ }
+ }
+ ccp_enable_queue_interrupts(ccp);
+}
+
+static irqreturn_t ccp_irq_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+
+ ccp_disable_queue_interrupts(ccp);
+ if (ccp->use_tasklet)
+ tasklet_schedule(&ccp->irq_tasklet);
+ else
+ ccp_irq_bh((unsigned long)ccp);
+
+ return IRQ_HANDLED;
+}
+
static int ccp_init(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct ccp_cmd_queue *cmd_q;
struct dma_pool *dma_pool;
char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
- unsigned int qmr, qim, i;
+ unsigned int qmr, i;
int ret;
/* Find available queues */
- qim = 0;
+ ccp->qim = 0;
qmr = ioread32(ccp->io_regs + Q_MASK_REG);
for (i = 0; i < MAX_HW_QUEUES; i++) {
if (!(qmr & (1 << i)))
@@ -370,7 +426,7 @@ static int ccp_init(struct ccp_device *ccp)
init_waitqueue_head(&cmd_q->int_queue);
/* Build queue interrupt mask (two interrupts per queue) */
- qim |= cmd_q->int_ok | cmd_q->int_err;
+ ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
#ifdef CONFIG_ARM64
/* For arm64 set the recommended queue cache settings */
@@ -388,14 +444,14 @@ static int ccp_init(struct ccp_device *ccp)
dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
/* Disable and clear interrupts until ready */
- iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+ ccp_disable_queue_interrupts(ccp);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
ioread32(cmd_q->reg_int_status);
ioread32(cmd_q->reg_status);
}
- iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
+ iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
/* Request an irq */
ret = ccp->get_irq(ccp);
@@ -404,6 +460,11 @@ static int ccp_init(struct ccp_device *ccp)
goto e_pool;
}
+ /* Initialize the ISR tasklet? */
+ if (ccp->use_tasklet)
+ tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
+ (unsigned long)ccp);
+
dev_dbg(dev, "Starting threads...\n");
/* Create a kthread for each queue */
for (i = 0; i < ccp->cmd_q_count; i++) {
@@ -426,7 +487,7 @@ static int ccp_init(struct ccp_device *ccp)
dev_dbg(dev, "Enabling interrupts...\n");
/* Enable interrupts */
- iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
+ ccp_enable_queue_interrupts(ccp);
dev_dbg(dev, "Registering device...\n");
ccp_add_device(ccp);
@@ -463,7 +524,7 @@ static void ccp_destroy(struct ccp_device *ccp)
{
struct ccp_cmd_queue *cmd_q;
struct ccp_cmd *cmd;
- unsigned int qim, i;
+ unsigned int i;
/* Unregister the DMA engine */
ccp_dmaengine_unregister(ccp);
@@ -474,22 +535,15 @@ static void ccp_destroy(struct ccp_device *ccp)
/* Remove this device from the list of available units */
ccp_del_device(ccp);
- /* Build queue interrupt mask (two interrupt masks per queue) */
- qim = 0;
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
- qim |= cmd_q->int_ok | cmd_q->int_err;
- }
-
/* Disable and clear interrupts */
- iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+ ccp_disable_queue_interrupts(ccp);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
ioread32(cmd_q->reg_int_status);
ioread32(cmd_q->reg_status);
}
- iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
+ iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
/* Stop the queue kthreads */
for (i = 0; i < ccp->cmd_q_count; i++)
@@ -516,43 +570,10 @@ static void ccp_destroy(struct ccp_device *ccp)
}
}
-static irqreturn_t ccp_irq_handler(int irq, void *data)
-{
- struct device *dev = data;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- struct ccp_cmd_queue *cmd_q;
- u32 q_int, status;
- unsigned int i;
-
- status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
-
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
-
- q_int = status & (cmd_q->int_ok | cmd_q->int_err);
- if (q_int) {
- cmd_q->int_status = status;
- cmd_q->q_status = ioread32(cmd_q->reg_status);
- cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
-
- /* On error, only save the first error value */
- if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
- cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
-
- cmd_q->int_rcvd = 1;
-
- /* Acknowledge the interrupt and wake the kthread */
- iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
- wake_up_interruptible(&cmd_q->int_queue);
- }
- }
-
- return IRQ_HANDLED;
-}
-
static const struct ccp_actions ccp3_actions = {
.aes = ccp_perform_aes,
.xts_aes = ccp_perform_xts_aes,
+ .des3 = NULL,
.sha = ccp_perform_sha,
.rsa = ccp_perform_rsa,
.passthru = ccp_perform_passthru,
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index fc08b4ed69d9..ccbe32d5dd1c 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -108,6 +108,12 @@ union ccp_function {
u16 type:2;
} aes_xts;
struct {
+ u16 size:7;
+ u16 encrypt:1;
+ u16 mode:5;
+ u16 type:2;
+ } des3;
+ struct {
u16 rsvd1:10;
u16 type:4;
u16 rsvd2:1;
@@ -139,6 +145,10 @@ union ccp_function {
#define CCP_AES_TYPE(p) ((p)->aes.type)
#define CCP_XTS_SIZE(p) ((p)->aes_xts.size)
#define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt)
+#define CCP_DES3_SIZE(p) ((p)->des3.size)
+#define CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt)
+#define CCP_DES3_MODE(p) ((p)->des3.mode)
+#define CCP_DES3_TYPE(p) ((p)->des3.type)
#define CCP_SHA_TYPE(p) ((p)->sha.type)
#define CCP_RSA_SIZE(p) ((p)->rsa.size)
#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap)
@@ -388,6 +398,47 @@ static int ccp5_perform_sha(struct ccp_op *op)
return ccp5_do_cmd(&desc, op->cmd_q);
}
+static int ccp5_perform_des3(struct ccp_op *op)
+{
+ struct ccp5_desc desc;
+ union ccp_function function;
+ u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
+
+ /* Zero out all the fields of the command desc */
+ memset(&desc, 0, sizeof(struct ccp5_desc));
+
+ CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_DES3;
+
+ CCP5_CMD_SOC(&desc) = op->soc;
+ CCP5_CMD_IOC(&desc) = 1;
+ CCP5_CMD_INIT(&desc) = op->init;
+ CCP5_CMD_EOM(&desc) = op->eom;
+ CCP5_CMD_PROT(&desc) = 0;
+
+ function.raw = 0;
+ CCP_DES3_ENCRYPT(&function) = op->u.des3.action;
+ CCP_DES3_MODE(&function) = op->u.des3.mode;
+ CCP_DES3_TYPE(&function) = op->u.des3.type;
+ CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+ CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
+
+ CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+ CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+ CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+ CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+ CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
+ CCP5_CMD_KEY_HI(&desc) = 0;
+ CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
+ CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
+
+ return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
static int ccp5_perform_rsa(struct ccp_op *op)
{
struct ccp5_desc desc;
@@ -435,6 +486,7 @@ static int ccp5_perform_passthru(struct ccp_op *op)
struct ccp_dma_info *saddr = &op->src.u.dma;
struct ccp_dma_info *daddr = &op->dst.u.dma;
+
memset(&desc, 0, Q_DESC_SIZE);
CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU;
@@ -653,6 +705,65 @@ static int ccp_assign_lsbs(struct ccp_device *ccp)
return rc;
}
+static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
+}
+
+static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
+}
+
+static void ccp5_irq_bh(unsigned long data)
+{
+ struct ccp_device *ccp = (struct ccp_device *)data;
+ u32 status;
+ unsigned int i;
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
+
+ status = ioread32(cmd_q->reg_interrupt_status);
+
+ if (status) {
+ cmd_q->int_status = status;
+ cmd_q->q_status = ioread32(cmd_q->reg_status);
+ cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+
+ /* On error, only save the first error value */
+ if ((status & INT_ERROR) && !cmd_q->cmd_error)
+ cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+ cmd_q->int_rcvd = 1;
+
+ /* Acknowledge the interrupt and wake the kthread */
+ iowrite32(status, cmd_q->reg_interrupt_status);
+ wake_up_interruptible(&cmd_q->int_queue);
+ }
+ }
+ ccp5_enable_queue_interrupts(ccp);
+}
+
+static irqreturn_t ccp5_irq_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+
+ ccp5_disable_queue_interrupts(ccp);
+ if (ccp->use_tasklet)
+ tasklet_schedule(&ccp->irq_tasklet);
+ else
+ ccp5_irq_bh((unsigned long)ccp);
+ return IRQ_HANDLED;
+}
+
static int ccp5_init(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
@@ -729,6 +840,7 @@ static int ccp5_init(struct ccp_device *ccp)
dev_dbg(dev, "queue #%u available\n", i);
}
+
if (ccp->cmd_q_count == 0) {
dev_notice(dev, "no command queues available\n");
ret = -EIO;
@@ -736,19 +848,18 @@ static int ccp5_init(struct ccp_device *ccp)
}
/* Turn off the queues and disable interrupts until ready */
+ ccp5_disable_queue_interrupts(ccp);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
cmd_q->qcontrol = 0; /* Start with nothing */
iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
- /* Disable the interrupts */
- iowrite32(0x00, cmd_q->reg_int_enable);
ioread32(cmd_q->reg_int_status);
ioread32(cmd_q->reg_status);
- /* Clear the interrupts */
- iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
+ /* Clear the interrupt status */
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
}
dev_dbg(dev, "Requesting an IRQ...\n");
@@ -758,6 +869,10 @@ static int ccp5_init(struct ccp_device *ccp)
dev_err(dev, "unable to allocate an IRQ\n");
goto e_pool;
}
+ /* Initialize the ISR tasklet */
+ if (ccp->use_tasklet)
+ tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
+ (unsigned long)ccp);
dev_dbg(dev, "Loading LSB map...\n");
/* Copy the private LSB mask to the public registers */
@@ -826,11 +941,7 @@ static int ccp5_init(struct ccp_device *ccp)
}
dev_dbg(dev, "Enabling interrupts...\n");
- /* Enable interrupts */
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
- iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable);
- }
+ ccp5_enable_queue_interrupts(ccp);
dev_dbg(dev, "Registering device...\n");
/* Put this on the unit list to make it available */
@@ -882,17 +993,15 @@ static void ccp5_destroy(struct ccp_device *ccp)
ccp_del_device(ccp);
/* Disable and clear interrupts */
+ ccp5_disable_queue_interrupts(ccp);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
/* Turn off the run bit */
iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
- /* Disable the interrupts */
- iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
-
/* Clear the interrupt status */
- iowrite32(0x00, cmd_q->reg_int_enable);
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
ioread32(cmd_q->reg_int_status);
ioread32(cmd_q->reg_status);
}
@@ -925,38 +1034,6 @@ static void ccp5_destroy(struct ccp_device *ccp)
}
}
-static irqreturn_t ccp5_irq_handler(int irq, void *data)
-{
- struct device *dev = data;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- u32 status;
- unsigned int i;
-
- for (i = 0; i < ccp->cmd_q_count; i++) {
- struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
-
- status = ioread32(cmd_q->reg_interrupt_status);
-
- if (status) {
- cmd_q->int_status = status;
- cmd_q->q_status = ioread32(cmd_q->reg_status);
- cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
-
- /* On error, only save the first error value */
- if ((status & INT_ERROR) && !cmd_q->cmd_error)
- cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
-
- cmd_q->int_rcvd = 1;
-
- /* Acknowledge the interrupt and wake the kthread */
- iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
- wake_up_interruptible(&cmd_q->int_queue);
- }
- }
-
- return IRQ_HANDLED;
-}
-
static void ccp5_config(struct ccp_device *ccp)
{
/* Public side */
@@ -994,6 +1071,7 @@ static const struct ccp_actions ccp5_actions = {
.aes = ccp5_perform_aes,
.xts_aes = ccp5_perform_xts_aes,
.sha = ccp5_perform_sha,
+ .des3 = ccp5_perform_des3,
.rsa = ccp5_perform_rsa,
.passthru = ccp5_perform_passthru,
.ecc = ccp5_perform_ecc,
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index aa36f3f81860..0cb09d0feeaf 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -109,9 +109,8 @@
#define INT_COMPLETION 0x1
#define INT_ERROR 0x2
#define INT_QUEUE_STOPPED 0x4
-#define ALL_INTERRUPTS (INT_COMPLETION| \
- INT_ERROR| \
- INT_QUEUE_STOPPED)
+#define INT_EMPTY_QUEUE 0x8
+#define SUPPORTED_INTERRUPTS (INT_COMPLETION | INT_ERROR)
#define LSB_REGION_WIDTH 5
#define MAX_LSB_CNT 8
@@ -194,6 +193,9 @@
#define CCP_XTS_AES_KEY_SB_COUNT 1
#define CCP_XTS_AES_CTX_SB_COUNT 1
+#define CCP_DES3_KEY_SB_COUNT 1
+#define CCP_DES3_CTX_SB_COUNT 1
+
#define CCP_SHA_SB_COUNT 1
#define CCP_RSA_MAX_WIDTH 4096
@@ -337,7 +339,10 @@ struct ccp_device {
void *dev_specific;
int (*get_irq)(struct ccp_device *ccp);
void (*free_irq)(struct ccp_device *ccp);
+ unsigned int qim;
unsigned int irq;
+ bool use_tasklet;
+ struct tasklet_struct irq_tasklet;
/* I/O area used for device communication. The register mapping
* starts at an offset into the mapped bar.
@@ -424,33 +429,33 @@ enum ccp_memtype {
};
#define CCP_MEMTYPE_LSB CCP_MEMTYPE_KSB
+
struct ccp_dma_info {
dma_addr_t address;
unsigned int offset;
unsigned int length;
enum dma_data_direction dir;
-};
+} __packed __aligned(4);
struct ccp_dm_workarea {
struct device *dev;
struct dma_pool *dma_pool;
- unsigned int length;
u8 *address;
struct ccp_dma_info dma;
+ unsigned int length;
};
struct ccp_sg_workarea {
struct scatterlist *sg;
int nents;
+ unsigned int sg_used;
struct scatterlist *dma_sg;
struct device *dma_dev;
unsigned int dma_count;
enum dma_data_direction dma_dir;
- unsigned int sg_used;
-
u64 bytes_left;
};
@@ -479,6 +484,12 @@ struct ccp_xts_aes_op {
enum ccp_xts_aes_unit_size unit_size;
};
+struct ccp_des3_op {
+ enum ccp_des3_type type;
+ enum ccp_des3_mode mode;
+ enum ccp_des3_action action;
+};
+
struct ccp_sha_op {
enum ccp_sha_type type;
u64 msg_bits;
@@ -516,6 +527,7 @@ struct ccp_op {
union {
struct ccp_aes_op aes;
struct ccp_xts_aes_op xts;
+ struct ccp_des3_op des3;
struct ccp_sha_op sha;
struct ccp_rsa_op rsa;
struct ccp_passthru_op passthru;
@@ -624,13 +636,13 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp);
struct ccp_actions {
int (*aes)(struct ccp_op *);
int (*xts_aes)(struct ccp_op *);
+ int (*des3)(struct ccp_op *);
int (*sha)(struct ccp_op *);
int (*rsa)(struct ccp_op *);
int (*passthru)(struct ccp_op *);
int (*ecc)(struct ccp_op *);
u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int);
- void (*sbfree)(struct ccp_cmd_queue *, unsigned int,
- unsigned int);
+ void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int);
unsigned int (*get_free_slots)(struct ccp_cmd_queue *);
int (*init)(struct ccp_device *);
void (*destroy)(struct ccp_device *);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index f1396c3aedac..c0dfdacbdff5 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -16,6 +16,7 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
+#include <crypto/des.h>
#include <linux/ccp.h>
#include "ccp-dev.h"
@@ -41,6 +42,20 @@ static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
};
+static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
+ cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
+ cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
+ cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
+ cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
+};
+
+static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
+ cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
+ cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
+ cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
+ cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
+};
+
#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
ccp_gen_jobid(ccp) : 0)
@@ -586,6 +601,255 @@ e_key:
return ret;
}
+static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+ struct ccp_cmd *cmd)
+{
+ struct ccp_aes_engine *aes = &cmd->u.aes;
+ struct ccp_dm_workarea key, ctx, final_wa, tag;
+ struct ccp_data src, dst;
+ struct ccp_data aad;
+ struct ccp_op op;
+
+ unsigned long long *final;
+ unsigned int dm_offset;
+ unsigned int ilen;
+ bool in_place = true; /* Default value */
+ int ret;
+
+ struct scatterlist *p_inp, sg_inp[2];
+ struct scatterlist *p_tag, sg_tag[2];
+ struct scatterlist *p_outp, sg_outp[2];
+ struct scatterlist *p_aad;
+
+ if (!aes->iv)
+ return -EINVAL;
+
+ if (!((aes->key_len == AES_KEYSIZE_128) ||
+ (aes->key_len == AES_KEYSIZE_192) ||
+ (aes->key_len == AES_KEYSIZE_256)))
+ return -EINVAL;
+
+ if (!aes->key) /* Gotta have a key SGL */
+ return -EINVAL;
+
+ /* First, decompose the source buffer into AAD & PT,
+ * and the destination buffer into AAD, CT & tag, or
+ * the input into CT & tag.
+ * It is expected that the input and output SGs will
+ * be valid, even if the AAD and input lengths are 0.
+ */
+ p_aad = aes->src;
+ p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
+ p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
+ if (aes->action == CCP_AES_ACTION_ENCRYPT) {
+ ilen = aes->src_len;
+ p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
+ } else {
+ /* Input length for decryption includes tag */
+ ilen = aes->src_len - AES_BLOCK_SIZE;
+ p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
+ }
+
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+ op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+ op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+ op.init = 1;
+ op.u.aes.type = aes->type;
+
+ /* Copy the key to the LSB */
+ ret = ccp_init_dm_workarea(&key, cmd_q,
+ CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ dm_offset = CCP_SB_BYTES - aes->key_len;
+ ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_key;
+ }
+
+ /* Copy the context (IV) to the LSB.
+ * There is an assumption here that the IV is 96 bits in length, plus
+ * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
+ */
+ ret = ccp_init_dm_workarea(&ctx, cmd_q,
+ CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto e_key;
+
+ dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
+ ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_ctx;
+ }
+
+ op.init = 1;
+ if (aes->aad_len > 0) {
+ /* Step 1: Run a GHASH over the Additional Authenticated Data */
+ ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
+ AES_BLOCK_SIZE,
+ DMA_TO_DEVICE);
+ if (ret)
+ goto e_ctx;
+
+ op.u.aes.mode = CCP_AES_MODE_GHASH;
+ op.u.aes.action = CCP_AES_GHASHAAD;
+
+ while (aad.sg_wa.bytes_left) {
+ ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
+
+ ret = cmd_q->ccp->vdata->perform->aes(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_aad;
+ }
+
+ ccp_process_data(&aad, NULL, &op);
+ op.init = 0;
+ }
+ }
+
+ op.u.aes.mode = CCP_AES_MODE_GCTR;
+ op.u.aes.action = aes->action;
+
+ if (ilen > 0) {
+ /* Step 2: Run a GCTR over the plaintext */
+ in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
+
+ ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
+ AES_BLOCK_SIZE,
+ in_place ? DMA_BIDIRECTIONAL
+ : DMA_TO_DEVICE);
+ if (ret)
+ goto e_ctx;
+
+ if (in_place) {
+ dst = src;
+ } else {
+ ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
+ AES_BLOCK_SIZE, DMA_FROM_DEVICE);
+ if (ret)
+ goto e_src;
+ }
+
+ op.soc = 0;
+ op.eom = 0;
+ op.init = 1;
+ while (src.sg_wa.bytes_left) {
+ ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
+ if (!src.sg_wa.bytes_left) {
+ unsigned int nbytes = aes->src_len
+ % AES_BLOCK_SIZE;
+
+ if (nbytes) {
+ op.eom = 1;
+ op.u.aes.size = (nbytes * 8) - 1;
+ }
+ }
+
+ ret = cmd_q->ccp->vdata->perform->aes(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ ccp_process_data(&src, &dst, &op);
+ op.init = 0;
+ }
+ }
+
+ /* Step 3: Update the IV portion of the context with the original IV */
+ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ /* Step 4: Concatenate the lengths of the AAD and source, and
+ * hash that 16 byte buffer.
+ */
+ ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto e_dst;
+ final = (unsigned long long *) final_wa.address;
+ final[0] = cpu_to_be64(aes->aad_len * 8);
+ final[1] = cpu_to_be64(ilen * 8);
+
+ op.u.aes.mode = CCP_AES_MODE_GHASH;
+ op.u.aes.action = CCP_AES_GHASHFINAL;
+ op.src.type = CCP_MEMTYPE_SYSTEM;
+ op.src.u.dma.address = final_wa.dma.address;
+ op.src.u.dma.length = AES_BLOCK_SIZE;
+ op.dst.type = CCP_MEMTYPE_SYSTEM;
+ op.dst.u.dma.address = final_wa.dma.address;
+ op.dst.u.dma.length = AES_BLOCK_SIZE;
+ op.eom = 1;
+ op.u.aes.size = 0;
+ ret = cmd_q->ccp->vdata->perform->aes(&op);
+ if (ret)
+ goto e_dst;
+
+ if (aes->action == CCP_AES_ACTION_ENCRYPT) {
+ /* Put the ciphered tag after the ciphertext. */
+ ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
+ } else {
+ /* Does this ciphered tag match the input? */
+ ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto e_tag;
+ ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
+
+ ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
+ ccp_dm_free(&tag);
+ }
+
+e_tag:
+ ccp_dm_free(&final_wa);
+
+e_dst:
+ if (aes->src_len && !in_place)
+ ccp_free_data(&dst, cmd_q);
+
+e_src:
+ if (aes->src_len)
+ ccp_free_data(&src, cmd_q);
+
+e_aad:
+ if (aes->aad_len)
+ ccp_free_data(&aad, cmd_q);
+
+e_ctx:
+ ccp_dm_free(&ctx);
+
+e_key:
+ ccp_dm_free(&key);
+
+ return ret;
+}
+
static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_aes_engine *aes = &cmd->u.aes;
@@ -599,6 +863,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (aes->mode == CCP_AES_MODE_CMAC)
return ccp_run_aes_cmac_cmd(cmd_q, cmd);
+ if (aes->mode == CCP_AES_MODE_GCM)
+ return ccp_run_aes_gcm_cmd(cmd_q, cmd);
+
if (!((aes->key_len == AES_KEYSIZE_128) ||
(aes->key_len == AES_KEYSIZE_192) ||
(aes->key_len == AES_KEYSIZE_256)))
@@ -925,6 +1192,200 @@ e_key:
return ret;
}
+static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+ struct ccp_des3_engine *des3 = &cmd->u.des3;
+
+ struct ccp_dm_workarea key, ctx;
+ struct ccp_data src, dst;
+ struct ccp_op op;
+ unsigned int dm_offset;
+ unsigned int len_singlekey;
+ bool in_place = false;
+ int ret;
+
+ /* Error checks */
+ if (!cmd_q->ccp->vdata->perform->des3)
+ return -EINVAL;
+
+ if (des3->key_len != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ if (((des3->mode == CCP_DES3_MODE_ECB) ||
+ (des3->mode == CCP_DES3_MODE_CBC)) &&
+ (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))
+ return -EINVAL;
+
+ if (!des3->key || !des3->src || !des3->dst)
+ return -EINVAL;
+
+ if (des3->mode != CCP_DES3_MODE_ECB) {
+ if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (!des3->iv)
+ return -EINVAL;
+ }
+
+ ret = -EIO;
+ /* Zero out all the fields of the command desc */
+ memset(&op, 0, sizeof(op));
+
+ /* Set up the Function field */
+ op.cmd_q = cmd_q;
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+ op.sb_key = cmd_q->sb_key;
+
+ op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
+ op.u.des3.type = des3->type;
+ op.u.des3.mode = des3->mode;
+ op.u.des3.action = des3->action;
+
+ /*
+ * All supported key sizes fit in a single (32-byte) KSB entry and
+ * (like AES) must be in little endian format. Use the 256-bit byte
+ * swap passthru option to convert from big endian to little endian.
+ */
+ ret = ccp_init_dm_workarea(&key, cmd_q,
+ CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ /*
+ * The contents of the key triplet are in the reverse order of what
+ * is required by the engine. Copy the 3 pieces individually to put
+ * them where they belong.
+ */
+ dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
+
+ len_singlekey = des3->key_len / 3;
+ ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
+ des3->key, 0, len_singlekey);
+ ccp_set_dm_area(&key, dm_offset + len_singlekey,
+ des3->key, len_singlekey, len_singlekey);
+ ccp_set_dm_area(&key, dm_offset,
+ des3->key, 2 * len_singlekey, len_singlekey);
+
+ /* Copy the key to the SB */
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_key;
+ }
+
+ /*
+ * The DES3 context fits in a single (32-byte) KSB entry and
+ * must be in little endian format. Use the 256-bit byte swap
+ * passthru option to convert from big endian to little endian.
+ */
+ if (des3->mode != CCP_DES3_MODE_ECB) {
+ u32 load_mode;
+
+ op.sb_ctx = cmd_q->sb_ctx;
+
+ ret = ccp_init_dm_workarea(&ctx, cmd_q,
+ CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto e_key;
+
+ /* Load the context into the LSB */
+ dm_offset = CCP_SB_BYTES - des3->iv_len;
+ ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len);
+
+ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+ load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
+ else
+ load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ load_mode);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_ctx;
+ }
+ }
+
+ /*
+ * Prepare the input and output data workareas. For in-place
+ * operations we need to set the dma direction to BIDIRECTIONAL
+ * and copy the src workarea to the dst workarea.
+ */
+ if (sg_virt(des3->src) == sg_virt(des3->dst))
+ in_place = true;
+
+ ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
+ DES3_EDE_BLOCK_SIZE,
+ in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ if (ret)
+ goto e_ctx;
+
+ if (in_place)
+ dst = src;
+ else {
+ ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
+ DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
+ if (ret)
+ goto e_src;
+ }
+
+ /* Send data to the CCP DES3 engine */
+ while (src.sg_wa.bytes_left) {
+ ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
+ if (!src.sg_wa.bytes_left) {
+ op.eom = 1;
+
+ /* Since we don't retrieve the context in ECB mode
+ * we have to wait for the operation to complete
+ * on the last piece of data
+ */
+ op.soc = 0;
+ }
+
+ ret = cmd_q->ccp->vdata->perform->des3(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ ccp_process_data(&src, &dst, &op);
+ }
+
+ if (des3->mode != CCP_DES3_MODE_ECB) {
+ /* Retrieve the context and make BE */
+ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
+ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+ dm_offset = CCP_SB_BYTES - des3->iv_len;
+ else
+ dm_offset = 0;
+ ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
+ DES3_EDE_BLOCK_SIZE);
+ }
+e_dst:
+ if (!in_place)
+ ccp_free_data(&dst, cmd_q);
+
+e_src:
+ ccp_free_data(&src, cmd_q);
+
+e_ctx:
+ if (des3->mode != CCP_DES3_MODE_ECB)
+ ccp_dm_free(&ctx);
+
+e_key:
+ ccp_dm_free(&key);
+
+ return ret;
+}
+
static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_sha_engine *sha = &cmd->u.sha;
@@ -955,6 +1416,18 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
return -EINVAL;
block_size = SHA256_BLOCK_SIZE;
break;
+ case CCP_SHA_TYPE_384:
+ if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
+ || sha->ctx_len < SHA384_DIGEST_SIZE)
+ return -EINVAL;
+ block_size = SHA384_BLOCK_SIZE;
+ break;
+ case CCP_SHA_TYPE_512:
+ if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
+ || sha->ctx_len < SHA512_DIGEST_SIZE)
+ return -EINVAL;
+ block_size = SHA512_BLOCK_SIZE;
+ break;
default:
return -EINVAL;
}
@@ -1042,6 +1515,21 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
sb_count = 1;
ooffset = ioffset = 0;
break;
+ case CCP_SHA_TYPE_384:
+ digest_size = SHA384_DIGEST_SIZE;
+ init = (void *) ccp_sha384_init;
+ ctx_size = SHA512_DIGEST_SIZE;
+ sb_count = 2;
+ ioffset = 0;
+ ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
+ break;
+ case CCP_SHA_TYPE_512:
+ digest_size = SHA512_DIGEST_SIZE;
+ init = (void *) ccp_sha512_init;
+ ctx_size = SHA512_DIGEST_SIZE;
+ sb_count = 2;
+ ooffset = ioffset = 0;
+ break;
default:
ret = -EINVAL;
goto e_data;
@@ -1060,6 +1548,11 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.sha.type = sha->type;
op.u.sha.msg_bits = sha->msg_bits;
+ /* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
+ * SHA384/512 require 2 adjacent SB slots, with the right half in the
+ * first slot, and the left half in the second. Each portion must then
+ * be in little endian format: use the 256-bit byte swap option.
+ */
ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
DMA_BIDIRECTIONAL);
if (ret)
@@ -1071,6 +1564,13 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
case CCP_SHA_TYPE_256:
memcpy(ctx.address + ioffset, init, ctx_size);
break;
+ case CCP_SHA_TYPE_384:
+ case CCP_SHA_TYPE_512:
+ memcpy(ctx.address + ctx_size / 2, init,
+ ctx_size / 2);
+ memcpy(ctx.address, init + ctx_size / 2,
+ ctx_size / 2);
+ break;
default:
ret = -EINVAL;
goto e_ctx;
@@ -1137,6 +1637,15 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
sha->ctx, 0,
digest_size);
break;
+ case CCP_SHA_TYPE_384:
+ case CCP_SHA_TYPE_512:
+ ccp_get_dm_area(&ctx, 0,
+ sha->ctx, LSB_ITEM_SIZE - ooffset,
+ LSB_ITEM_SIZE);
+ ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
+ sha->ctx, 0,
+ LSB_ITEM_SIZE - ooffset);
+ break;
default:
ret = -EINVAL;
goto e_ctx;
@@ -1174,6 +1683,16 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ctx.address + ooffset,
digest_size);
break;
+ case CCP_SHA_TYPE_384:
+ case CCP_SHA_TYPE_512:
+ memcpy(hmac_buf + block_size,
+ ctx.address + LSB_ITEM_SIZE + ooffset,
+ LSB_ITEM_SIZE);
+ memcpy(hmac_buf + block_size +
+ (LSB_ITEM_SIZE - ooffset),
+ ctx.address,
+ LSB_ITEM_SIZE);
+ break;
default:
ret = -EINVAL;
goto e_ctx;
@@ -1831,6 +2350,9 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
case CCP_ENGINE_XTS_AES_128:
ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
break;
+ case CCP_ENGINE_DES3:
+ ret = ccp_run_des3_cmd(cmd_q, cmd);
+ break;
case CCP_ENGINE_SHA:
ret = ccp_run_sha_cmd(cmd_q, cmd);
break;
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 28a9996c1085..e880d4cf4ada 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -69,6 +69,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
goto e_irq;
}
}
+ ccp->use_tasklet = true;
return 0;
@@ -100,6 +101,7 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
goto e_msi;
}
+ ccp->use_tasklet = true;
return 0;
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 41bc7f4f58cd..f00e0d8bd039 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -294,7 +294,7 @@ static inline void get_aes_decrypt_key(unsigned char *dec_key,
static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
{
- struct crypto_shash *base_hash = NULL;
+ struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
switch (ds) {
case SHA1_DIGEST_SIZE:
@@ -522,7 +522,7 @@ static inline void create_wreq(struct chcr_context *ctx,
{
struct uld_ctx *u_ctx = ULD_CTX(ctx);
int iv_loc = IV_DSGL;
- int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
+ int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
unsigned int immdatalen = 0, nr_frags = 0;
if (is_ofld_imm(skb)) {
@@ -543,7 +543,7 @@ static inline void create_wreq(struct chcr_context *ctx,
chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
chcr_req->wreq.rx_chid_to_rx_q_id =
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
- is_iv ? iv_loc : IV_NOP, ctx->tx_channel_id);
+ is_iv ? iv_loc : IV_NOP, ctx->tx_qidx);
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
qid);
@@ -721,19 +721,19 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
struct sk_buff *skb;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_channel_id))) {
+ ctx->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
- skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx],
CHCR_ENCRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
return PTR_ERR(skb);
}
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -746,19 +746,19 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
struct sk_buff *skb;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_channel_id))) {
+ ctx->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
- skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx],
CHCR_DECRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
return PTR_ERR(skb);
}
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -766,7 +766,9 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
static int chcr_device_init(struct chcr_context *ctx)
{
struct uld_ctx *u_ctx;
+ struct adapter *adap;
unsigned int id;
+ int txq_perchan, txq_idx, ntxq;
int err = 0, rxq_perchan, rxq_idx;
id = smp_processor_id();
@@ -777,11 +779,18 @@ static int chcr_device_init(struct chcr_context *ctx)
goto out;
}
u_ctx = ULD_CTX(ctx);
+ adap = padap(ctx->dev);
+ ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
+ adap->vres.ncrypto_fc);
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
+ txq_perchan = ntxq / u_ctx->lldi.nchan;
rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
+ txq_idx = ctx->dev->tx_channel_id * txq_perchan;
+ txq_idx += id % txq_perchan;
spin_lock(&ctx->dev->lock_chcr_dev);
- ctx->tx_channel_id = rxq_idx;
+ ctx->rx_qidx = rxq_idx;
+ ctx->tx_qidx = txq_idx;
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
ctx->dev->rx_channel_id = 0;
spin_unlock(&ctx->dev->lock_chcr_dev);
@@ -935,7 +944,7 @@ static int chcr_ahash_update(struct ahash_request *req)
u_ctx = ULD_CTX(ctx);
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_channel_id))) {
+ ctx->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
@@ -975,7 +984,7 @@ static int chcr_ahash_update(struct ahash_request *req)
}
req_ctx->reqlen = remainder;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
@@ -1028,7 +1037,7 @@ static int chcr_ahash_final(struct ahash_request *req)
return -ENOMEM;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -1047,7 +1056,7 @@ static int chcr_ahash_finup(struct ahash_request *req)
u_ctx = ULD_CTX(ctx);
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_channel_id))) {
+ ctx->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
@@ -1079,7 +1088,7 @@ static int chcr_ahash_finup(struct ahash_request *req)
return -ENOMEM;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
@@ -1100,7 +1109,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
u_ctx = ULD_CTX(ctx);
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_channel_id))) {
+ ctx->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
@@ -1130,7 +1139,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
return -ENOMEM;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -1334,20 +1343,36 @@ static int chcr_copy_assoc(struct aead_request *req,
return crypto_skcipher_encrypt(skreq);
}
-
-static unsigned char get_hmac(unsigned int authsize)
+static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
+ int aadmax, int wrlen,
+ unsigned short op_type)
{
- switch (authsize) {
- case ICV_8:
- return CHCR_SCMD_HMAC_CTRL_PL1;
- case ICV_10:
- return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
- case ICV_12:
- return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
- }
- return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
+
+ if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
+ (req->assoclen > aadmax) ||
+ (src_nent > MAX_SKB_FRAGS) ||
+ (wrlen > MAX_WR_SIZE))
+ return 1;
+ return 0;
}
+static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct aead_request *subreq = aead_request_ctx(req);
+
+ aead_request_set_tfm(subreq, aeadctx->sw_cipher);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+ return op_type ? crypto_aead_decrypt(subreq) :
+ crypto_aead_encrypt(subreq);
+}
static struct sk_buff *create_authenc_wr(struct aead_request *req,
unsigned short qid,
@@ -1371,7 +1396,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
unsigned short stop_offset = 0;
unsigned int assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
- int err = 0;
+ int err = -EINVAL, src_nent;
int null = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@@ -1381,8 +1406,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
goto err;
-
- if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
+ if (src_nent < 0)
goto err;
src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
reqctx->dst = src;
@@ -1400,7 +1425,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
}
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize));
- if (reqctx->dst_nents <= 0) {
+ if (reqctx->dst_nents < 0) {
pr_err("AUTHENC:Invalid Destination sg entries\n");
goto err;
}
@@ -1408,6 +1433,12 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
- sizeof(chcr_req->key_ctx);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG,
+ T6_MAX_AAD_SIZE,
+ transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
+ op_type)) {
+ return ERR_PTR(chcr_aead_fallback(req, op_type));
+ }
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
goto err;
@@ -1489,24 +1520,6 @@ err:
return ERR_PTR(-EINVAL);
}
-static void aes_gcm_empty_pld_pad(struct scatterlist *sg,
- unsigned short offset)
-{
- struct page *spage;
- unsigned char *addr;
-
- spage = sg_page(sg);
- get_page(spage); /* so that it is not freed by NIC */
-#ifdef KMAP_ATOMIC_ARGS
- addr = kmap_atomic(spage, KM_SOFTIRQ0);
-#else
- addr = kmap_atomic(spage);
-#endif
- memset(addr + sg->offset, 0, offset + 1);
-
- kunmap_atomic(addr);
-}
-
static int set_msg_len(u8 *block, unsigned int msglen, int csize)
{
__be32 data;
@@ -1570,11 +1583,6 @@ static int ccm_format_packet(struct aead_request *req,
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int rc = 0;
- if (req->assoclen > T5_MAX_AAD_SIZE) {
- pr_err("CCM: Unsupported AAD data. It should be < %d\n",
- T5_MAX_AAD_SIZE);
- return -EINVAL;
- }
if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
reqctx->iv[0] = 3;
memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
@@ -1600,13 +1608,13 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
struct chcr_context *chcrctx)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
unsigned int ivsize = AES_BLOCK_SIZE;
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
unsigned int c_id = chcrctx->dev->rx_channel_id;
unsigned int ccm_xtra;
unsigned char tag_offset = 0, auth_offset = 0;
- unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm));
unsigned int assoclen;
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
@@ -1642,8 +1650,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
crypto_aead_authsize(tfm));
sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
- cipher_mode, mac_mode, hmac_ctrl,
- ivsize >> 1);
+ cipher_mode, mac_mode,
+ aeadctx->hmac_ctrl, ivsize >> 1);
sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
1, dst_size);
@@ -1719,16 +1727,17 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
unsigned int dst_size = 0, kctx_len;
unsigned int sub_type;
unsigned int authsize = crypto_aead_authsize(tfm);
- int err = 0;
+ int err = -EINVAL, src_nent;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
goto err;
-
- if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
+ if (src_nent < 0)
goto err;
+
sub_type = get_aead_subtype(tfm);
src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
reqctx->dst = src;
@@ -1744,7 +1753,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
}
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize));
- if (reqctx->dst_nents <= 0) {
+ if (reqctx->dst_nents < 0) {
pr_err("CCM:Invalid Destination sg entries\n");
goto err;
}
@@ -1756,6 +1765,13 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG,
+ T6_MAX_AAD_SIZE - 18,
+ transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
+ op_type)) {
+ return ERR_PTR(chcr_aead_fallback(req, op_type));
+ }
+
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
@@ -1820,8 +1836,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
unsigned char tag_offset = 0;
unsigned int crypt_len = 0;
unsigned int authsize = crypto_aead_authsize(tfm);
- unsigned char hmac_ctrl = get_hmac(authsize);
- int err = 0;
+ int err = -EINVAL, src_nent;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@@ -1831,8 +1846,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
goto err;
-
- if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
+ if (src_nent < 0)
goto err;
src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
@@ -1854,7 +1869,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
crypt_len = req->cryptlen;
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize));
- if (reqctx->dst_nents <= 0) {
+ if (reqctx->dst_nents < 0) {
pr_err("GCM:Invalid Destination sg entries\n");
goto err;
}
@@ -1864,6 +1879,12 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
AEAD_H_SIZE;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG,
+ T6_MAX_AAD_SIZE,
+ transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
+ op_type)) {
+ return ERR_PTR(chcr_aead_fallback(req, op_type));
+ }
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
goto err;
@@ -1881,11 +1902,11 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
ctx->dev->rx_channel_id, 2, (ivsize ?
(req->assoclen + 1) : 0));
- chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len);
+ chcr_req->sec_cpl.pldlen =
+ htonl(req->assoclen + ivsize + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
req->assoclen ? 1 : 0, req->assoclen,
req->assoclen + ivsize + 1, 0);
- if (req->cryptlen) {
chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
tag_offset, tag_offset);
@@ -1893,17 +1914,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_GCM,
- CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl,
- ivsize >> 1);
- } else {
- chcr_req->sec_cpl.cipherstop_lo_authinsert =
- FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
- chcr_req->sec_cpl.seqno_numivs =
- FILL_SEC_CPL_SCMD0_SEQNO(op_type,
- (op_type == CHCR_ENCRYPT_OP) ?
- 1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC,
- 0, 0, ivsize >> 1);
- }
+ CHCR_SCMD_AUTH_MODE_GHASH,
+ aeadctx->hmac_ctrl, ivsize >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
0, 1, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
@@ -1936,15 +1948,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
write_sg_to_skb(skb, &frags, req->src, req->assoclen);
write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
-
- if (req->cryptlen) {
- write_sg_to_skb(skb, &frags, src, req->cryptlen);
- } else {
- aes_gcm_empty_pld_pad(req->dst, authsize - 1);
- write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
-
- }
-
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
sizeof(struct cpl_rx_phys_dsgl) + dst_size);
reqctx->skb = skb;
@@ -1965,8 +1969,15 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm)
{
struct chcr_context *ctx = crypto_aead_ctx(tfm);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
-
- crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx));
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+
+ aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(aeadctx->sw_cipher))
+ return PTR_ERR(aeadctx->sw_cipher);
+ crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
+ sizeof(struct aead_request) +
+ crypto_aead_reqsize(aeadctx->sw_cipher)));
aeadctx->null = crypto_get_default_null_skcipher();
if (IS_ERR(aeadctx->null))
return PTR_ERR(aeadctx->null);
@@ -1975,7 +1986,11 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm)
static void chcr_aead_cra_exit(struct crypto_aead *tfm)
{
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
crypto_put_default_null_skcipher();
+ crypto_free_aead(aeadctx->sw_cipher);
}
static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
@@ -1985,7 +2000,7 @@ static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
aeadctx->mayverify = VERIFY_HW;
- return 0;
+ return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
}
static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
@@ -2022,7 +2037,7 @@ static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
aeadctx->mayverify = VERIFY_SW;
}
- return 0;
+ return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
}
@@ -2062,7 +2077,7 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- return 0;
+ return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
}
static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
@@ -2088,7 +2103,7 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- return 0;
+ return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
}
static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
@@ -2130,10 +2145,10 @@ static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- return 0;
+ return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
}
-static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
+static int chcr_ccm_common_setkey(struct crypto_aead *aead,
const u8 *key,
unsigned int keylen)
{
@@ -2142,8 +2157,6 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
unsigned char ck_size, mk_size;
int key_ctx_size = 0;
- memcpy(aeadctx->key, key, keylen);
- aeadctx->enckey_len = keylen;
key_ctx_size = sizeof(struct _key_ctx) +
((DIV_ROUND_UP(keylen, 16)) << 4) * 2;
if (keylen == AES_KEYSIZE_128) {
@@ -2163,9 +2176,32 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
}
aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
key_ctx_size >> 4);
+ memcpy(aeadctx->key, key, keylen);
+ aeadctx->enckey_len = keylen;
+
return 0;
}
+static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ int error;
+
+ crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
+ CRYPTO_TFM_REQ_MASK);
+ error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+ crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
+ crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
+ CRYPTO_TFM_RES_MASK);
+ if (error)
+ return error;
+ return chcr_ccm_common_setkey(aead, key, keylen);
+}
+
static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
@@ -2180,7 +2216,7 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
}
keylen -= 3;
memcpy(aeadctx->salt, key + keylen, 3);
- return chcr_aead_ccm_setkey(aead, key, keylen);
+ return chcr_ccm_common_setkey(aead, key, keylen);
}
static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
@@ -2193,6 +2229,17 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int ck_size;
int ret = 0, key_ctx_size = 0;
+ aeadctx->enckey_len = 0;
+ crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
+ & CRYPTO_TFM_REQ_MASK);
+ ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+ crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
+ crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
+ CRYPTO_TFM_RES_MASK);
+ if (ret)
+ goto out;
+
if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
keylen > 3) {
keylen -= 4; /* nonce/salt is present in the last 4 bytes */
@@ -2207,8 +2254,7 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
} else {
crypto_tfm_set_flags((struct crypto_tfm *)aead,
CRYPTO_TFM_RES_BAD_KEY_LEN);
- aeadctx->enckey_len = 0;
- pr_err("GCM: Invalid key length %d", keylen);
+ pr_err("GCM: Invalid key length %d\n", keylen);
ret = -EINVAL;
goto out;
}
@@ -2259,11 +2305,21 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
int err = 0, i, key_ctx_len = 0;
unsigned char ck_size = 0;
unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
- struct crypto_shash *base_hash = NULL;
+ struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
struct algo_param param;
int align;
u8 *o_ptr = NULL;
+ crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
+ & CRYPTO_TFM_REQ_MASK);
+ err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+ crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
+ crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
+ & CRYPTO_TFM_RES_MASK);
+ if (err)
+ goto out;
+
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
goto out;
@@ -2296,7 +2352,8 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
base_hash = chcr_alloc_shash(max_authsize);
if (IS_ERR(base_hash)) {
pr_err("chcr : Base driver cannot be loaded\n");
- goto out;
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
}
{
SHASH_DESC_ON_STACK(shash, base_hash);
@@ -2351,7 +2408,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
}
out:
aeadctx->enckey_len = 0;
- if (base_hash)
+ if (!IS_ERR(base_hash))
chcr_free_shash(base_hash);
return -EINVAL;
}
@@ -2363,11 +2420,21 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct crypto_authenc_keys keys;
-
+ int err;
/* it contains auth and cipher key both*/
int key_ctx_len = 0;
unsigned char ck_size = 0;
+ crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
+ & CRYPTO_TFM_REQ_MASK);
+ err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+ crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
+ crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
+ & CRYPTO_TFM_RES_MASK);
+ if (err)
+ goto out;
+
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
goto out;
@@ -2465,22 +2532,20 @@ static int chcr_aead_op(struct aead_request *req,
}
u_ctx = ULD_CTX(ctx);
if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_channel_id)) {
+ ctx->tx_qidx)) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
/* Form a WR from req */
- skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size,
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size,
op_type);
- if (IS_ERR(skb) || skb == NULL) {
- pr_err("chcr : %s : failed to form WR. No memory\n", __func__);
+ if (IS_ERR(skb) || !skb)
return PTR_ERR(skb);
- }
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -2673,6 +2738,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_name = "gcm(aes)",
.cra_driver_name = "gcm-aes-chcr",
.cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_gcm_ctx),
@@ -2691,6 +2757,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_name = "rfc4106(gcm(aes))",
.cra_driver_name = "rfc4106-gcm-aes-chcr",
.cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY + 1,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_gcm_ctx),
@@ -2710,6 +2777,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_name = "ccm(aes)",
.cra_driver_name = "ccm-aes-chcr",
.cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx),
@@ -2728,6 +2796,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_name = "rfc4309(ccm(aes))",
.cra_driver_name = "rfc4309-ccm-aes-chcr",
.cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY + 1,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx),
@@ -2747,6 +2816,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_driver_name =
"authenc-hmac-sha1-cbc-aes-chcr",
.cra_blocksize = AES_BLOCK_SIZE,
+ .cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_authenc_ctx),
@@ -2768,6 +2838,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_driver_name =
"authenc-hmac-sha256-cbc-aes-chcr",
.cra_blocksize = AES_BLOCK_SIZE,
+ .cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_authenc_ctx),
@@ -2788,6 +2859,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_driver_name =
"authenc-hmac-sha224-cbc-aes-chcr",
.cra_blocksize = AES_BLOCK_SIZE,
+ .cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_authenc_ctx),
@@ -2807,6 +2879,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_driver_name =
"authenc-hmac-sha384-cbc-aes-chcr",
.cra_blocksize = AES_BLOCK_SIZE,
+ .cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_authenc_ctx),
@@ -2827,6 +2900,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_driver_name =
"authenc-hmac-sha512-cbc-aes-chcr",
.cra_blocksize = AES_BLOCK_SIZE,
+ .cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_authenc_ctx),
@@ -2847,6 +2921,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_driver_name =
"authenc-digest_null-cbc-aes-chcr",
.cra_blocksize = AES_BLOCK_SIZE,
+ .cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_authenc_ctx),
@@ -2915,10 +2990,9 @@ static int chcr_register_alg(void)
name = driver_algs[i].alg.crypto.cra_driver_name;
break;
case CRYPTO_ALG_TYPE_AEAD:
- driver_algs[i].alg.aead.base.cra_priority =
- CHCR_CRA_PRIORITY;
driver_algs[i].alg.aead.base.cra_flags =
- CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK;
driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
driver_algs[i].alg.aead.init = chcr_aead_cra_init;
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index ba38bae7ce80..751d06a58101 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -218,6 +218,10 @@
#define MAX_NK 8
#define CRYPTO_MAX_IMM_TX_PKT_LEN 256
+#define MAX_WR_SIZE 512
+#define MIN_AUTH_SG 2 /*IV + AAD*/
+#define MIN_GCM_SG 2 /* IV + AAD*/
+#define MIN_CCM_SG 3 /*IV+AAD+B0*/
struct algo_param {
unsigned int auth_mode;
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 79da22b5cdc9..cd0c35a18d92 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -54,6 +54,8 @@
#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
#define MAX_SALT 4
+#define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev)
+
struct uld_ctx;
struct _key_ctx {
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 81cfd0ba132e..5b2fabb14229 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -41,15 +41,15 @@
#define CCM_B0_SIZE 16
#define CCM_AAD_FIELD_SIZE 2
-#define T5_MAX_AAD_SIZE 512
+#define T6_MAX_AAD_SIZE 511
/* Define following if h/w is not dropping the AAD and IV data before
* giving the processed data
*/
-#define CHCR_CRA_PRIORITY 3000
-
+#define CHCR_CRA_PRIORITY 500
+#define CHCR_AEAD_PRIORITY 6000
#define CHCR_AES_MAX_KEY_LEN (2 * (AES_MAX_KEY_SIZE)) /* consider xts */
#define CHCR_MAX_CRYPTO_IV_LEN 16 /* AES IV len */
@@ -188,6 +188,7 @@ struct chcr_aead_ctx {
__be32 key_ctx_hdr;
unsigned int enckey_len;
struct crypto_skcipher *null;
+ struct crypto_aead *sw_cipher;
u8 salt[MAX_SALT];
u8 key[CHCR_AES_MAX_KEY_LEN];
u16 hmac_ctrl;
@@ -211,7 +212,8 @@ struct __crypto_ctx {
struct chcr_context {
struct chcr_dev *dev;
- unsigned char tx_channel_id;
+ unsigned char tx_qidx;
+ unsigned char rx_qidx;
struct __crypto_ctx crypto_ctx[0];
};
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
new file mode 100644
index 000000000000..451620b475a0
--- /dev/null
+++ b/drivers/crypto/exynos-rng.c
@@ -0,0 +1,389 @@
+/*
+ * exynos-rng.c - Random Number Generator driver for the Exynos
+ *
+ * Copyright (c) 2017 Krzysztof Kozlowski <krzk@kernel.org>
+ *
+ * Loosely based on old driver from drivers/char/hw_random/exynos-rng.c:
+ * Copyright (C) 2012 Samsung Electronics
+ * Jonghwa Lee <jonghwa3.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <crypto/internal/rng.h>
+
+#define EXYNOS_RNG_CONTROL 0x0
+#define EXYNOS_RNG_STATUS 0x10
+#define EXYNOS_RNG_SEED_BASE 0x140
+#define EXYNOS_RNG_SEED(n) (EXYNOS_RNG_SEED_BASE + (n * 0x4))
+#define EXYNOS_RNG_OUT_BASE 0x160
+#define EXYNOS_RNG_OUT(n) (EXYNOS_RNG_OUT_BASE + (n * 0x4))
+
+/* EXYNOS_RNG_CONTROL bit fields */
+#define EXYNOS_RNG_CONTROL_START 0x18
+/* EXYNOS_RNG_STATUS bit fields */
+#define EXYNOS_RNG_STATUS_SEED_SETTING_DONE BIT(1)
+#define EXYNOS_RNG_STATUS_RNG_DONE BIT(5)
+
+/* Five seed and output registers, each 4 bytes */
+#define EXYNOS_RNG_SEED_REGS 5
+#define EXYNOS_RNG_SEED_SIZE (EXYNOS_RNG_SEED_REGS * 4)
+
+/*
+ * Driver re-seeds itself with generated random numbers to increase
+ * the randomness.
+ *
+ * Time for next re-seed in ms.
+ */
+#define EXYNOS_RNG_RESEED_TIME 100
+/*
+ * In polling mode, do not wait infinitely for the engine to finish the work.
+ */
+#define EXYNOS_RNG_WAIT_RETRIES 100
+
+/* Context for crypto */
+struct exynos_rng_ctx {
+ struct exynos_rng_dev *rng;
+};
+
+/* Device associated memory */
+struct exynos_rng_dev {
+ struct device *dev;
+ void __iomem *mem;
+ struct clk *clk;
+ /* Generated numbers stored for seeding during resume */
+ u8 seed_save[EXYNOS_RNG_SEED_SIZE];
+ unsigned int seed_save_len;
+ /* Time of last seeding in jiffies */
+ unsigned long last_seeding;
+};
+
+static struct exynos_rng_dev *exynos_rng_dev;
+
+static u32 exynos_rng_readl(struct exynos_rng_dev *rng, u32 offset)
+{
+ return readl_relaxed(rng->mem + offset);
+}
+
+static void exynos_rng_writel(struct exynos_rng_dev *rng, u32 val, u32 offset)
+{
+ writel_relaxed(val, rng->mem + offset);
+}
+
+static int exynos_rng_set_seed(struct exynos_rng_dev *rng,
+ const u8 *seed, unsigned int slen)
+{
+ u32 val;
+ int i;
+
+ /* Round seed length because loop iterates over full register size */
+ slen = ALIGN_DOWN(slen, 4);
+
+ if (slen < EXYNOS_RNG_SEED_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < slen ; i += 4) {
+ unsigned int seed_reg = (i / 4) % EXYNOS_RNG_SEED_REGS;
+
+ val = seed[i] << 24;
+ val |= seed[i + 1] << 16;
+ val |= seed[i + 2] << 8;
+ val |= seed[i + 3] << 0;
+
+ exynos_rng_writel(rng, val, EXYNOS_RNG_SEED(seed_reg));
+ }
+
+ val = exynos_rng_readl(rng, EXYNOS_RNG_STATUS);
+ if (!(val & EXYNOS_RNG_STATUS_SEED_SETTING_DONE)) {
+ dev_warn(rng->dev, "Seed setting not finished\n");
+ return -EIO;
+ }
+
+ rng->last_seeding = jiffies;
+
+ return 0;
+}
+
+/*
+ * Read from output registers and put the data under 'dst' array,
+ * up to dlen bytes.
+ *
+ * Returns number of bytes actually stored in 'dst' (dlen
+ * or EXYNOS_RNG_SEED_SIZE).
+ */
+static unsigned int exynos_rng_copy_random(struct exynos_rng_dev *rng,
+ u8 *dst, unsigned int dlen)
+{
+ unsigned int cnt = 0;
+ int i, j;
+ u32 val;
+
+ for (j = 0; j < EXYNOS_RNG_SEED_REGS; j++) {
+ val = exynos_rng_readl(rng, EXYNOS_RNG_OUT(j));
+
+ for (i = 0; i < 4; i++) {
+ dst[cnt] = val & 0xff;
+ val >>= 8;
+ if (++cnt >= dlen)
+ return cnt;
+ }
+ }
+
+ return cnt;
+}
+
+/*
+ * Start the engine and poll for finish. Then read from output registers
+ * filling the 'dst' buffer up to 'dlen' bytes or up to size of generated
+ * random data (EXYNOS_RNG_SEED_SIZE).
+ *
+ * On success: return 0 and store number of read bytes under 'read' address.
+ * On error: return -ERRNO.
+ */
+static int exynos_rng_get_random(struct exynos_rng_dev *rng,
+ u8 *dst, unsigned int dlen,
+ unsigned int *read)
+{
+ int retry = EXYNOS_RNG_WAIT_RETRIES;
+
+ exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
+ EXYNOS_RNG_CONTROL);
+
+ while (!(exynos_rng_readl(rng,
+ EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && --retry)
+ cpu_relax();
+
+ if (!retry)
+ return -ETIMEDOUT;
+
+ /* Clear status bit */
+ exynos_rng_writel(rng, EXYNOS_RNG_STATUS_RNG_DONE,
+ EXYNOS_RNG_STATUS);
+ *read = exynos_rng_copy_random(rng, dst, dlen);
+
+ return 0;
+}
+
+/* Re-seed itself from time to time */
+static void exynos_rng_reseed(struct exynos_rng_dev *rng)
+{
+ unsigned long next_seeding = rng->last_seeding + \
+ msecs_to_jiffies(EXYNOS_RNG_RESEED_TIME);
+ unsigned long now = jiffies;
+ unsigned int read = 0;
+ u8 seed[EXYNOS_RNG_SEED_SIZE];
+
+ if (time_before(now, next_seeding))
+ return;
+
+ if (exynos_rng_get_random(rng, seed, sizeof(seed), &read))
+ return;
+
+ exynos_rng_set_seed(rng, seed, read);
+}
+
+static int exynos_rng_generate(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int dlen)
+{
+ struct exynos_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ struct exynos_rng_dev *rng = ctx->rng;
+ unsigned int read = 0;
+ int ret;
+
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ do {
+ ret = exynos_rng_get_random(rng, dst, dlen, &read);
+ if (ret)
+ break;
+
+ dlen -= read;
+ dst += read;
+
+ exynos_rng_reseed(rng);
+ } while (dlen > 0);
+
+ clk_disable_unprepare(rng->clk);
+
+ return ret;
+}
+
+static int exynos_rng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ struct exynos_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ struct exynos_rng_dev *rng = ctx->rng;
+ int ret;
+
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ ret = exynos_rng_set_seed(ctx->rng, seed, slen);
+
+ clk_disable_unprepare(rng->clk);
+
+ return ret;
+}
+
+static int exynos_rng_kcapi_init(struct crypto_tfm *tfm)
+{
+ struct exynos_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->rng = exynos_rng_dev;
+
+ return 0;
+}
+
+static struct rng_alg exynos_rng_alg = {
+ .generate = exynos_rng_generate,
+ .seed = exynos_rng_seed,
+ .seedsize = EXYNOS_RNG_SEED_SIZE,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "exynos_rng",
+ .cra_priority = 100,
+ .cra_ctxsize = sizeof(struct exynos_rng_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = exynos_rng_kcapi_init,
+ }
+};
+
+static int exynos_rng_probe(struct platform_device *pdev)
+{
+ struct exynos_rng_dev *rng;
+ struct resource *res;
+ int ret;
+
+ if (exynos_rng_dev)
+ return -EEXIST;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ rng->dev = &pdev->dev;
+ rng->clk = devm_clk_get(&pdev->dev, "secss");
+ if (IS_ERR(rng->clk)) {
+ dev_err(&pdev->dev, "Couldn't get clock.\n");
+ return PTR_ERR(rng->clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rng->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rng->mem))
+ return PTR_ERR(rng->mem);
+
+ platform_set_drvdata(pdev, rng);
+
+ exynos_rng_dev = rng;
+
+ ret = crypto_register_rng(&exynos_rng_alg);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Couldn't register rng crypto alg: %d\n", ret);
+ exynos_rng_dev = NULL;
+ }
+
+ return ret;
+}
+
+static int exynos_rng_remove(struct platform_device *pdev)
+{
+ crypto_unregister_rng(&exynos_rng_alg);
+
+ exynos_rng_dev = NULL;
+
+ return 0;
+}
+
+static int __maybe_unused exynos_rng_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos_rng_dev *rng = platform_get_drvdata(pdev);
+ int ret;
+
+ /* If we were never seeded then after resume it will be the same */
+ if (!rng->last_seeding)
+ return 0;
+
+ rng->seed_save_len = 0;
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ /* Get new random numbers and store them for seeding on resume. */
+ exynos_rng_get_random(rng, rng->seed_save, sizeof(rng->seed_save),
+ &(rng->seed_save_len));
+ dev_dbg(rng->dev, "Stored %u bytes for seeding on system resume\n",
+ rng->seed_save_len);
+
+ clk_disable_unprepare(rng->clk);
+
+ return 0;
+}
+
+static int __maybe_unused exynos_rng_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos_rng_dev *rng = platform_get_drvdata(pdev);
+ int ret;
+
+ /* Never seeded so nothing to do */
+ if (!rng->last_seeding)
+ return 0;
+
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ ret = exynos_rng_set_seed(rng, rng->seed_save, rng->seed_save_len);
+
+ clk_disable_unprepare(rng->clk);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_suspend,
+ exynos_rng_resume);
+
+static const struct of_device_id exynos_rng_dt_match[] = {
+ {
+ .compatible = "samsung,exynos4-rng",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos_rng_dt_match);
+
+static struct platform_driver exynos_rng_driver = {
+ .driver = {
+ .name = "exynos-rng",
+ .pm = &exynos_rng_pm_ops,
+ .of_match_table = exynos_rng_dt_match,
+ },
+ .probe = exynos_rng_probe,
+ .remove = exynos_rng_remove,
+};
+
+module_platform_driver(exynos_rng_driver);
+
+MODULE_DESCRIPTION("Exynos H/W Random Number Generator driver");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 7868765a70c5..771dd26c7076 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -806,7 +806,7 @@ static struct buffer_desc *chainup_buffers(struct device *dev,
void *ptr;
nbytes -= len;
- ptr = page_address(sg_page(sg)) + sg->offset;
+ ptr = sg_virt(sg);
next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
if (!next_buf) {
buf = NULL;
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 3a47cdb8f0c8..9e845e866dec 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -19,13 +19,10 @@
#define AES_BUF_ORDER 2
#define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
& ~(AES_BLOCK_SIZE - 1))
+#define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
+ AES_BLOCK_SIZE * 2)
+#define AES_MAX_CT_SIZE 6
-/* AES command token size */
-#define AES_CT_SIZE_ECB 2
-#define AES_CT_SIZE_CBC 3
-#define AES_CT_SIZE_CTR 3
-#define AES_CT_SIZE_GCM_OUT 5
-#define AES_CT_SIZE_GCM_IN 6
#define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
/* AES-CBC/ECB/CTR command token */
@@ -50,6 +47,8 @@
#define AES_TFM_128BITS cpu_to_le32(0xb << 16)
#define AES_TFM_192BITS cpu_to_le32(0xd << 16)
#define AES_TFM_256BITS cpu_to_le32(0xf << 16)
+#define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
+#define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
/* AES transform information word 1 fields */
#define AES_TFM_ECB cpu_to_le32(0x0 << 0)
#define AES_TFM_CBC cpu_to_le32(0x1 << 0)
@@ -59,10 +58,9 @@
#define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
#define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
#define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
-#define AES_TFM_GHASH_DIG cpu_to_le32(0x2 << 21)
-#define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
/* AES flags */
+#define AES_FLAGS_CIPHER_MSK GENMASK(2, 0)
#define AES_FLAGS_ECB BIT(0)
#define AES_FLAGS_CBC BIT(1)
#define AES_FLAGS_CTR BIT(2)
@@ -70,19 +68,15 @@
#define AES_FLAGS_ENCRYPT BIT(4)
#define AES_FLAGS_BUSY BIT(5)
+#define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
+
/**
- * Command token(CT) is a set of hardware instructions that
- * are used to control engine's processing flow of AES.
- *
- * Transform information(TFM) is used to define AES state and
- * contains all keys and initial vectors.
- *
- * The engine requires CT and TFM to do:
- * - Commands decoding and control of the engine's data path.
- * - Coordinating hardware data fetch and store operations.
- * - Result token construction and output.
+ * mtk_aes_info - hardware information of AES
+ * @cmd: command token, hardware instruction
+ * @tfm: transform state of cipher algorithm.
+ * @state: contains keys and initial vectors.
*
- * Memory map of GCM's TFM:
+ * Memory layout of GCM buffer:
* /-----------\
* | AES KEY | 128/196/256 bits
* |-----------|
@@ -90,14 +84,16 @@
* |-----------|
* | IVs | 4 * 4 bytes
* \-----------/
+ *
+ * The engine requires all these info to do:
+ * - Commands decoding and control of the engine's data path.
+ * - Coordinating hardware data fetch and store operations.
+ * - Result token construction and output.
*/
-struct mtk_aes_ct {
- __le32 cmd[AES_CT_SIZE_GCM_IN];
-};
-
-struct mtk_aes_tfm {
- __le32 ctrl[2];
- __le32 state[SIZE_IN_WORDS(AES_KEYSIZE_256 + AES_BLOCK_SIZE * 2)];
+struct mtk_aes_info {
+ __le32 cmd[AES_MAX_CT_SIZE];
+ __le32 tfm[2];
+ __le32 state[AES_MAX_STATE_BUF_SIZE];
};
struct mtk_aes_reqctx {
@@ -107,11 +103,12 @@ struct mtk_aes_reqctx {
struct mtk_aes_base_ctx {
struct mtk_cryp *cryp;
u32 keylen;
+ __le32 keymode;
+
mtk_aes_fn start;
- struct mtk_aes_ct ct;
+ struct mtk_aes_info info;
dma_addr_t ct_dma;
- struct mtk_aes_tfm tfm;
dma_addr_t tfm_dma;
__le32 ct_hdr;
@@ -248,6 +245,33 @@ static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
sg->length += dma->remainder;
}
+static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
+{
+ int i;
+
+ for (i = 0; i < SIZE_IN_WORDS(size); i++)
+ dst[i] = cpu_to_le32(src[i]);
+}
+
+static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
+{
+ int i;
+
+ for (i = 0; i < SIZE_IN_WORDS(size); i++)
+ dst[i] = cpu_to_be32(src[i]);
+}
+
+static inline int mtk_aes_complete(struct mtk_cryp *cryp,
+ struct mtk_aes_rec *aes,
+ int err)
+{
+ aes->flags &= ~AES_FLAGS_BUSY;
+ aes->areq->complete(aes->areq, err);
+ /* Handle new request */
+ tasklet_schedule(&aes->queue_task);
+ return err;
+}
+
/*
* Write descriptors for processing. This will configure the engine, load
* the transform information and then start the packet processing.
@@ -262,7 +286,7 @@ static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
/* Write command descriptors */
for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
- cmd = ring->cmd_base + ring->cmd_pos;
+ cmd = ring->cmd_next;
cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
cmd->buf = cpu_to_le32(sg_dma_address(ssg));
@@ -274,25 +298,30 @@ static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
}
- if (++ring->cmd_pos == MTK_DESC_NUM)
- ring->cmd_pos = 0;
+ /* Shift ring buffer and check boundary */
+ if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
+ ring->cmd_next = ring->cmd_base;
}
cmd->hdr |= MTK_DESC_LAST;
/* Prepare result descriptors */
for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
- res = ring->res_base + ring->res_pos;
+ res = ring->res_next;
res->hdr = MTK_DESC_BUF_LEN(dsg->length);
res->buf = cpu_to_le32(sg_dma_address(dsg));
if (nents == 0)
res->hdr |= MTK_DESC_FIRST;
- if (++ring->res_pos == MTK_DESC_NUM)
- ring->res_pos = 0;
+ /* Shift ring buffer and check boundary */
+ if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
+ ring->res_next = ring->res_base;
}
res->hdr |= MTK_DESC_LAST;
+ /* Pointer to current result descriptor */
+ ring->res_prev = res;
+
/* Prepare enough space for authenticated tag */
if (aes->flags & AES_FLAGS_GCM)
res->hdr += AES_BLOCK_SIZE;
@@ -313,9 +342,7 @@ static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
struct mtk_aes_base_ctx *ctx = aes->ctx;
- dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct),
- DMA_TO_DEVICE);
- dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm),
+ dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
DMA_TO_DEVICE);
if (aes->src.sg == aes->dst.sg) {
@@ -346,16 +373,14 @@ static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
struct mtk_aes_base_ctx *ctx = aes->ctx;
+ struct mtk_aes_info *info = &ctx->info;
- ctx->ct_dma = dma_map_single(cryp->dev, &ctx->ct, sizeof(ctx->ct),
+ ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
- return -EINVAL;
+ goto exit;
- ctx->tfm_dma = dma_map_single(cryp->dev, &ctx->tfm, sizeof(ctx->tfm),
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(cryp->dev, ctx->tfm_dma)))
- goto tfm_map_err;
+ ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
if (aes->src.sg == aes->dst.sg) {
aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
@@ -382,13 +407,9 @@ static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
return mtk_aes_xmit(cryp, aes);
sg_map_err:
- dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm),
- DMA_TO_DEVICE);
-tfm_map_err:
- dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct),
- DMA_TO_DEVICE);
-
- return -EINVAL;
+ dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
+exit:
+ return mtk_aes_complete(cryp, aes, -EINVAL);
}
/* Initialize transform information of CBC/ECB/CTR mode */
@@ -397,50 +418,43 @@ static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
{
struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
struct mtk_aes_base_ctx *ctx = aes->ctx;
+ struct mtk_aes_info *info = &ctx->info;
+ u32 cnt = 0;
ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
- ctx->ct.cmd[0] = AES_CMD0 | cpu_to_le32(len);
- ctx->ct.cmd[1] = AES_CMD1;
+ info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
+ info->cmd[cnt++] = AES_CMD1;
+ info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
if (aes->flags & AES_FLAGS_ENCRYPT)
- ctx->tfm.ctrl[0] = AES_TFM_BASIC_OUT;
+ info->tfm[0] |= AES_TFM_BASIC_OUT;
else
- ctx->tfm.ctrl[0] = AES_TFM_BASIC_IN;
+ info->tfm[0] |= AES_TFM_BASIC_IN;
- if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128))
- ctx->tfm.ctrl[0] |= AES_TFM_128BITS;
- else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256))
- ctx->tfm.ctrl[0] |= AES_TFM_256BITS;
- else
- ctx->tfm.ctrl[0] |= AES_TFM_192BITS;
-
- if (aes->flags & AES_FLAGS_CBC) {
- const u32 *iv = (const u32 *)req->info;
- u32 *iv_state = ctx->tfm.state + ctx->keylen;
- int i;
-
- ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen +
- SIZE_IN_WORDS(AES_BLOCK_SIZE));
- ctx->tfm.ctrl[1] = AES_TFM_CBC | AES_TFM_FULL_IV;
-
- for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
- iv_state[i] = cpu_to_le32(iv[i]);
-
- ctx->ct.cmd[2] = AES_CMD2;
- ctx->ct_size = AES_CT_SIZE_CBC;
- } else if (aes->flags & AES_FLAGS_ECB) {
- ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen);
- ctx->tfm.ctrl[1] = AES_TFM_ECB;
-
- ctx->ct_size = AES_CT_SIZE_ECB;
- } else if (aes->flags & AES_FLAGS_CTR) {
- ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen +
- SIZE_IN_WORDS(AES_BLOCK_SIZE));
- ctx->tfm.ctrl[1] = AES_TFM_CTR_LOAD | AES_TFM_FULL_IV;
-
- ctx->ct.cmd[2] = AES_CMD2;
- ctx->ct_size = AES_CT_SIZE_CTR;
+ switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
+ case AES_FLAGS_CBC:
+ info->tfm[1] = AES_TFM_CBC;
+ break;
+ case AES_FLAGS_ECB:
+ info->tfm[1] = AES_TFM_ECB;
+ goto ecb;
+ case AES_FLAGS_CTR:
+ info->tfm[1] = AES_TFM_CTR_LOAD;
+ goto ctr;
+
+ default:
+ /* Should not happen... */
+ return;
}
+
+ mtk_aes_write_state_le(info->state + ctx->keylen, req->info,
+ AES_BLOCK_SIZE);
+ctr:
+ info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
+ info->tfm[1] |= AES_TFM_FULL_IV;
+ info->cmd[cnt++] = AES_CMD2;
+ecb:
+ ctx->ct_size = cnt;
}
static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
@@ -465,7 +479,7 @@ static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
padlen = mtk_aes_padlen(len);
if (len + padlen > AES_BUF_SIZE)
- return -ENOMEM;
+ return mtk_aes_complete(cryp, aes, -ENOMEM);
if (!src_aligned) {
sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
@@ -525,13 +539,10 @@ static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
return ctx->start(cryp, aes);
}
-static int mtk_aes_complete(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
+static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
+ struct mtk_aes_rec *aes)
{
- aes->flags &= ~AES_FLAGS_BUSY;
- aes->areq->complete(aes->areq, 0);
-
- /* Handle new request */
- return mtk_aes_handle_queue(cryp, aes->id, NULL);
+ return mtk_aes_complete(cryp, aes, 0);
}
static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
@@ -540,7 +551,7 @@ static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
mtk_aes_set_mode(aes, rctx);
- aes->resume = mtk_aes_complete;
+ aes->resume = mtk_aes_transfer_complete;
return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
}
@@ -557,15 +568,14 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
struct scatterlist *src, *dst;
- int i;
- u32 start, end, ctr, blocks, *iv_state;
+ u32 start, end, ctr, blocks;
size_t datalen;
bool fragmented = false;
/* Check for transfer completion. */
cctx->offset += aes->total;
if (cctx->offset >= req->nbytes)
- return mtk_aes_complete(cryp, aes);
+ return mtk_aes_transfer_complete(cryp, aes);
/* Compute data length. */
datalen = req->nbytes - cctx->offset;
@@ -587,9 +597,8 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
/* Write IVs into transform state buffer. */
- iv_state = ctx->tfm.state + ctx->keylen;
- for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
- iv_state[i] = cpu_to_le32(cctx->iv[i]);
+ mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
+ AES_BLOCK_SIZE);
if (unlikely(fragmented)) {
/*
@@ -599,7 +608,6 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
cctx->iv[3] = cpu_to_be32(ctr);
crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
}
- aes->resume = mtk_aes_ctr_transfer;
return mtk_aes_dma(cryp, aes, src, dst, datalen);
}
@@ -615,6 +623,7 @@ static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
cctx->offset = 0;
aes->total = 0;
+ aes->resume = mtk_aes_ctr_transfer;
return mtk_aes_ctr_transfer(cryp, aes);
}
@@ -624,21 +633,25 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
const u8 *key, u32 keylen)
{
struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- const u32 *aes_key = (const u32 *)key;
- u32 *key_state = ctx->tfm.state;
- int i;
- if (keylen != AES_KEYSIZE_128 &&
- keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256) {
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->keymode = AES_TFM_128BITS;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->keymode = AES_TFM_192BITS;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->keymode = AES_TFM_256BITS;
+ break;
+
+ default:
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->keylen = SIZE_IN_WORDS(keylen);
-
- for (i = 0; i < ctx->keylen; i++)
- key_state[i] = cpu_to_le32(aes_key[i]);
+ mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
return 0;
}
@@ -789,6 +802,19 @@ mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
return container_of(ctx, struct mtk_aes_gcm_ctx, base);
}
+/*
+ * Engine will verify and compare tag automatically, so we just need
+ * to check returned status which stored in the result descriptor.
+ */
+static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
+ struct mtk_aes_rec *aes)
+{
+ u32 status = cryp->ring[aes->id]->res_prev->ct;
+
+ return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
+ -EBADMSG : 0);
+}
+
/* Initialize transform information of GCM mode */
static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
struct mtk_aes_rec *aes,
@@ -797,45 +823,35 @@ static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
struct aead_request *req = aead_request_cast(aes->areq);
struct mtk_aes_base_ctx *ctx = aes->ctx;
struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
- const u32 *iv = (const u32 *)req->iv;
- u32 *iv_state = ctx->tfm.state + ctx->keylen +
- SIZE_IN_WORDS(AES_BLOCK_SIZE);
+ struct mtk_aes_info *info = &ctx->info;
u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
- int i;
+ u32 cnt = 0;
ctx->ct_hdr = AES_CT_CTRL_HDR | len;
- ctx->ct.cmd[0] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
- ctx->ct.cmd[1] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
- ctx->ct.cmd[2] = AES_GCM_CMD2;
- ctx->ct.cmd[3] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
+ info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
+ info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
+ info->cmd[cnt++] = AES_GCM_CMD2;
+ info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
if (aes->flags & AES_FLAGS_ENCRYPT) {
- ctx->ct.cmd[4] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
- ctx->ct_size = AES_CT_SIZE_GCM_OUT;
- ctx->tfm.ctrl[0] = AES_TFM_GCM_OUT;
+ info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
+ info->tfm[0] = AES_TFM_GCM_OUT;
} else {
- ctx->ct.cmd[4] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
- ctx->ct.cmd[5] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
- ctx->ct_size = AES_CT_SIZE_GCM_IN;
- ctx->tfm.ctrl[0] = AES_TFM_GCM_IN;
+ info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
+ info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
+ info->tfm[0] = AES_TFM_GCM_IN;
}
+ ctx->ct_size = cnt;
- if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128))
- ctx->tfm.ctrl[0] |= AES_TFM_128BITS;
- else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256))
- ctx->tfm.ctrl[0] |= AES_TFM_256BITS;
- else
- ctx->tfm.ctrl[0] |= AES_TFM_192BITS;
+ info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
+ ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
+ ctx->keymode;
+ info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
+ AES_TFM_ENC_HASH;
- ctx->tfm.ctrl[0] |= AES_TFM_GHASH_DIG | AES_TFM_GHASH |
- AES_TFM_SIZE(ctx->keylen + SIZE_IN_WORDS(
- AES_BLOCK_SIZE + ivsize));
- ctx->tfm.ctrl[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE |
- AES_TFM_3IV | AES_TFM_ENC_HASH;
-
- for (i = 0; i < SIZE_IN_WORDS(ivsize); i++)
- iv_state[i] = cpu_to_le32(iv[i]);
+ mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
+ AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
}
static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
@@ -856,7 +872,7 @@ static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
if (!src_aligned || !dst_aligned) {
if (aes->total > AES_BUF_SIZE)
- return -ENOMEM;
+ return mtk_aes_complete(cryp, aes, -ENOMEM);
if (!src_aligned) {
sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
@@ -892,6 +908,8 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
if (aes->flags & AES_FLAGS_ENCRYPT) {
u32 tag[4];
+
+ aes->resume = mtk_aes_transfer_complete;
/* Compute total process length. */
aes->total = len + gctx->authsize;
/* Compute text length. */
@@ -899,10 +917,10 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
/* Hardware will append authenticated tag to output buffer */
scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
} else {
+ aes->resume = mtk_aes_gcm_tag_verify;
aes->total = len;
gctx->textlen = req->cryptlen - gctx->authsize;
}
- aes->resume = mtk_aes_complete;
return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
}
@@ -915,7 +933,7 @@ static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
rctx->mode = AES_FLAGS_GCM | mode;
return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT),
- &req->base);
+ &req->base);
}
static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err)
@@ -949,24 +967,26 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
struct scatterlist sg[1];
struct skcipher_request req;
} *data;
- const u32 *aes_key;
- u32 *key_state, *hash_state;
- int err, i;
+ int err;
- if (keylen != AES_KEYSIZE_256 &&
- keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_128) {
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->keymode = AES_TFM_128BITS;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->keymode = AES_TFM_192BITS;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->keymode = AES_TFM_256BITS;
+ break;
+
+ default:
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- key_state = ctx->tfm.state;
- aes_key = (u32 *)key;
ctx->keylen = SIZE_IN_WORDS(keylen);
- for (i = 0; i < ctx->keylen; i++)
- ctx->tfm.state[i] = cpu_to_le32(aes_key[i]);
-
/* Same as crypto_gcm_setkey() from crypto/gcm.c */
crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
@@ -1001,10 +1021,11 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (err)
goto out;
- hash_state = key_state + ctx->keylen;
-
- for (i = 0; i < 4; i++)
- hash_state[i] = cpu_to_be32(data->hash[i]);
+ /* Write key into state buffer */
+ mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
+ /* Write key(H) into state buffer */
+ mtk_aes_write_state_be(ctx->info.state + ctx->keylen, data->hash,
+ AES_BLOCK_SIZE);
out:
kzfree(data);
return err;
@@ -1092,58 +1113,36 @@ static struct aead_alg aes_gcm_alg = {
},
};
-static void mtk_aes_enc_task(unsigned long data)
+static void mtk_aes_queue_task(unsigned long data)
{
- struct mtk_cryp *cryp = (struct mtk_cryp *)data;
- struct mtk_aes_rec *aes = cryp->aes[0];
+ struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
- mtk_aes_unmap(cryp, aes);
- aes->resume(cryp, aes);
+ mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
}
-static void mtk_aes_dec_task(unsigned long data)
+static void mtk_aes_done_task(unsigned long data)
{
- struct mtk_cryp *cryp = (struct mtk_cryp *)data;
- struct mtk_aes_rec *aes = cryp->aes[1];
+ struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
+ struct mtk_cryp *cryp = aes->cryp;
mtk_aes_unmap(cryp, aes);
aes->resume(cryp, aes);
}
-static irqreturn_t mtk_aes_enc_irq(int irq, void *dev_id)
+static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
{
- struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
- struct mtk_aes_rec *aes = cryp->aes[0];
- u32 val = mtk_aes_read(cryp, RDR_STAT(RING0));
+ struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id;
+ struct mtk_cryp *cryp = aes->cryp;
+ u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
- mtk_aes_write(cryp, RDR_STAT(RING0), val);
+ mtk_aes_write(cryp, RDR_STAT(aes->id), val);
if (likely(AES_FLAGS_BUSY & aes->flags)) {
- mtk_aes_write(cryp, RDR_PROC_COUNT(RING0), MTK_CNT_RST);
- mtk_aes_write(cryp, RDR_THRESH(RING0),
+ mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
+ mtk_aes_write(cryp, RDR_THRESH(aes->id),
MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
- tasklet_schedule(&aes->task);
- } else {
- dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
- }
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mtk_aes_dec_irq(int irq, void *dev_id)
-{
- struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
- struct mtk_aes_rec *aes = cryp->aes[1];
- u32 val = mtk_aes_read(cryp, RDR_STAT(RING1));
-
- mtk_aes_write(cryp, RDR_STAT(RING1), val);
-
- if (likely(AES_FLAGS_BUSY & aes->flags)) {
- mtk_aes_write(cryp, RDR_PROC_COUNT(RING1), MTK_CNT_RST);
- mtk_aes_write(cryp, RDR_THRESH(RING1),
- MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
-
- tasklet_schedule(&aes->task);
+ tasklet_schedule(&aes->done_task);
} else {
dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
}
@@ -1171,14 +1170,20 @@ static int mtk_aes_record_init(struct mtk_cryp *cryp)
if (!aes[i]->buf)
goto err_cleanup;
- aes[i]->id = i;
+ aes[i]->cryp = cryp;
spin_lock_init(&aes[i]->lock);
crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
+
+ tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
+ (unsigned long)aes[i]);
+ tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
+ (unsigned long)aes[i]);
}
- tasklet_init(&aes[0]->task, mtk_aes_enc_task, (unsigned long)cryp);
- tasklet_init(&aes[1]->task, mtk_aes_dec_task, (unsigned long)cryp);
+ /* Link to ring0 and ring1 respectively */
+ aes[0]->id = MTK_RING0;
+ aes[1]->id = MTK_RING1;
return 0;
@@ -1196,7 +1201,9 @@ static void mtk_aes_record_free(struct mtk_cryp *cryp)
int i;
for (i = 0; i < MTK_REC_NUM; i++) {
- tasklet_kill(&cryp->aes[i]->task);
+ tasklet_kill(&cryp->aes[i]->done_task);
+ tasklet_kill(&cryp->aes[i]->queue_task);
+
free_page((unsigned long)cryp->aes[i]->buf);
kfree(cryp->aes[i]);
}
@@ -1246,25 +1253,23 @@ int mtk_cipher_alg_register(struct mtk_cryp *cryp)
if (ret)
goto err_record;
- /* Ring0 is use by encryption record */
- ret = devm_request_irq(cryp->dev, cryp->irq[RING0], mtk_aes_enc_irq,
- IRQF_TRIGGER_LOW, "mtk-aes", cryp);
+ ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
+ 0, "mtk-aes", cryp->aes[0]);
if (ret) {
- dev_err(cryp->dev, "unable to request AES encryption irq.\n");
+ dev_err(cryp->dev, "unable to request AES irq.\n");
goto err_res;
}
- /* Ring1 is use by decryption record */
- ret = devm_request_irq(cryp->dev, cryp->irq[RING1], mtk_aes_dec_irq,
- IRQF_TRIGGER_LOW, "mtk-aes", cryp);
+ ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
+ 0, "mtk-aes", cryp->aes[1]);
if (ret) {
- dev_err(cryp->dev, "unable to request AES decryption irq.\n");
+ dev_err(cryp->dev, "unable to request AES irq.\n");
goto err_res;
}
/* Enable ring0 and ring1 interrupt */
- mtk_aes_write(cryp, AIC_ENABLE_SET(RING0), MTK_IRQ_RDR0);
- mtk_aes_write(cryp, AIC_ENABLE_SET(RING1), MTK_IRQ_RDR1);
+ mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
+ mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
spin_lock(&mtk_aes.lock);
list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index a9c713d4c733..b6ecc288b540 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -334,7 +334,7 @@ static int mtk_packet_engine_setup(struct mtk_cryp *cryp)
/* Enable the 4 rings for the packet engines. */
mtk_desc_ring_link(cryp, 0xf);
- for (i = 0; i < RING_MAX; i++) {
+ for (i = 0; i < MTK_RING_MAX; i++) {
mtk_cmd_desc_ring_setup(cryp, i, &cap);
mtk_res_desc_ring_setup(cryp, i, &cap);
}
@@ -359,7 +359,7 @@ static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw)
{
u32 val;
- if (hw == RING_MAX)
+ if (hw == MTK_RING_MAX)
val = readl(cryp->base + AIC_G_VERSION);
else
val = readl(cryp->base + AIC_VERSION(hw));
@@ -368,7 +368,7 @@ static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw)
if (val != MTK_AIC_VER11 && val != MTK_AIC_VER12)
return -ENXIO;
- if (hw == RING_MAX)
+ if (hw == MTK_RING_MAX)
val = readl(cryp->base + AIC_G_OPTIONS);
else
val = readl(cryp->base + AIC_OPTIONS(hw));
@@ -389,7 +389,7 @@ static int mtk_aic_init(struct mtk_cryp *cryp, int hw)
return err;
/* Disable all interrupts and set initial configuration */
- if (hw == RING_MAX) {
+ if (hw == MTK_RING_MAX) {
writel(0, cryp->base + AIC_G_ENABLE_CTRL);
writel(0, cryp->base + AIC_G_POL_CTRL);
writel(0, cryp->base + AIC_G_TYPE_CTRL);
@@ -431,7 +431,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
{
int i;
- for (i = 0; i < RING_MAX; i++) {
+ for (i = 0; i < MTK_RING_MAX; i++) {
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
cryp->ring[i]->res_base,
cryp->ring[i]->res_dma);
@@ -447,7 +447,7 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
struct mtk_ring **ring = cryp->ring;
int i, err = ENOMEM;
- for (i = 0; i < RING_MAX; i++) {
+ for (i = 0; i < MTK_RING_MAX; i++) {
ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
if (!ring[i])
goto err_cleanup;
@@ -465,6 +465,9 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
GFP_KERNEL);
if (!ring[i]->res_base)
goto err_cleanup;
+
+ ring[i]->cmd_next = ring[i]->cmd_base;
+ ring[i]->res_next = ring[i]->res_base;
}
return 0;
diff --git a/drivers/crypto/mediatek/mtk-platform.h b/drivers/crypto/mediatek/mtk-platform.h
index ed6d8717f7f4..303c152dc931 100644
--- a/drivers/crypto/mediatek/mtk-platform.h
+++ b/drivers/crypto/mediatek/mtk-platform.h
@@ -38,14 +38,14 @@
* Ring 2/3 are used by SHA.
*/
enum {
- RING0 = 0,
- RING1,
- RING2,
- RING3,
- RING_MAX,
+ MTK_RING0,
+ MTK_RING1,
+ MTK_RING2,
+ MTK_RING3,
+ MTK_RING_MAX
};
-#define MTK_REC_NUM (RING_MAX / 2)
+#define MTK_REC_NUM (MTK_RING_MAX / 2)
#define MTK_IRQ_NUM 5
/**
@@ -84,11 +84,12 @@ struct mtk_desc {
/**
* struct mtk_ring - Descriptor ring
* @cmd_base: pointer to command descriptor ring base
+ * @cmd_next: pointer to the next command descriptor
* @cmd_dma: DMA address of command descriptor ring
- * @cmd_pos: current position in the command descriptor ring
* @res_base: pointer to result descriptor ring base
+ * @res_next: pointer to the next result descriptor
+ * @res_prev: pointer to the previous result descriptor
* @res_dma: DMA address of result descriptor ring
- * @res_pos: current position in the result descriptor ring
*
* A descriptor ring is a circular buffer that is used to manage
* one or more descriptors. There are two type of descriptor rings;
@@ -96,11 +97,12 @@ struct mtk_desc {
*/
struct mtk_ring {
struct mtk_desc *cmd_base;
+ struct mtk_desc *cmd_next;
dma_addr_t cmd_dma;
- u32 cmd_pos;
struct mtk_desc *res_base;
+ struct mtk_desc *res_next;
+ struct mtk_desc *res_prev;
dma_addr_t res_dma;
- u32 res_pos;
};
/**
@@ -125,9 +127,11 @@ typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes);
/**
* struct mtk_aes_rec - AES operation record
+ * @cryp: pointer to Cryptographic device
* @queue: crypto request queue
* @areq: pointer to async request
- * @task: the tasklet is use in AES interrupt
+ * @done_task: the tasklet is use in AES interrupt
+ * @queue_task: the tasklet is used to dequeue request
* @ctx: pointer to current context
* @src: the structure that holds source sg list info
* @dst: the structure that holds destination sg list info
@@ -136,16 +140,18 @@ typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes);
* @resume: pointer to resume function
* @total: request buffer length
* @buf: pointer to page buffer
- * @id: record identification
+ * @id: the current use of ring
* @flags: it's describing AES operation state
* @lock: the async queue lock
*
* Structure used to record AES execution state.
*/
struct mtk_aes_rec {
+ struct mtk_cryp *cryp;
struct crypto_queue queue;
struct crypto_async_request *areq;
- struct tasklet_struct task;
+ struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
struct mtk_aes_base_ctx *ctx;
struct mtk_aes_dma src;
struct mtk_aes_dma dst;
@@ -166,19 +172,23 @@ struct mtk_aes_rec {
/**
* struct mtk_sha_rec - SHA operation record
+ * @cryp: pointer to Cryptographic device
* @queue: crypto request queue
* @req: pointer to ahash request
- * @task: the tasklet is use in SHA interrupt
- * @id: record identification
+ * @done_task: the tasklet is use in SHA interrupt
+ * @queue_task: the tasklet is used to dequeue request
+ * @id: the current use of ring
* @flags: it's describing SHA operation state
- * @lock: the ablkcipher queue lock
+ * @lock: the async queue lock
*
* Structure used to record SHA execution state.
*/
struct mtk_sha_rec {
+ struct mtk_cryp *cryp;
struct crypto_queue queue;
struct ahash_request *req;
- struct tasklet_struct task;
+ struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
u8 id;
unsigned long flags;
@@ -193,13 +203,11 @@ struct mtk_sha_rec {
* @clk_ethif: pointer to ethif clock
* @clk_cryp: pointer to crypto clock
* @irq: global system and rings IRQ
- * @ring: pointer to execution state of AES
- * @aes: pointer to execution state of SHA
- * @sha: each execution record map to a ring
+ * @ring: pointer to descriptor rings
+ * @aes: pointer to operation record of AES
+ * @sha: pointer to operation record of SHA
* @aes_list: device list of AES
* @sha_list: device list of SHA
- * @tmp: pointer to temporary buffer for internal use
- * @tmp_dma: DMA address of temporary buffer
* @rec: it's used to select SHA record for tfm
*
* Structure storing cryptographic device information.
@@ -211,15 +219,13 @@ struct mtk_cryp {
struct clk *clk_cryp;
int irq[MTK_IRQ_NUM];
- struct mtk_ring *ring[RING_MAX];
+ struct mtk_ring *ring[MTK_RING_MAX];
struct mtk_aes_rec *aes[MTK_REC_NUM];
struct mtk_sha_rec *sha[MTK_REC_NUM];
struct list_head aes_list;
struct list_head sha_list;
- void *tmp;
- dma_addr_t tmp_dma;
bool rec;
};
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index 55e3805fba07..2226f12d1c7a 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -17,13 +17,13 @@
#define SHA_ALIGN_MSK (sizeof(u32) - 1)
#define SHA_QUEUE_SIZE 512
-#define SHA_TMP_BUF_SIZE 512
#define SHA_BUF_SIZE ((u32)PAGE_SIZE)
#define SHA_OP_UPDATE 1
#define SHA_OP_FINAL 2
#define SHA_DATA_LEN_MSK cpu_to_le32(GENMASK(16, 0))
+#define SHA_MAX_DIGEST_BUF_SIZE 32
/* SHA command token */
#define SHA_CT_SIZE 5
@@ -34,7 +34,6 @@
/* SHA transform information */
#define SHA_TFM_HASH cpu_to_le32(0x2 << 0)
-#define SHA_TFM_INNER_DIG cpu_to_le32(0x1 << 21)
#define SHA_TFM_SIZE(x) cpu_to_le32((x) << 8)
#define SHA_TFM_START cpu_to_le32(0x1 << 4)
#define SHA_TFM_CONTINUE cpu_to_le32(0x1 << 5)
@@ -61,31 +60,17 @@
#define SHA_FLAGS_PAD BIT(10)
/**
- * mtk_sha_ct is a set of hardware instructions(command token)
- * that are used to control engine's processing flow of SHA,
- * and it contains the first two words of transform state.
+ * mtk_sha_info - hardware information of AES
+ * @cmd: command token, hardware instruction
+ * @tfm: transform state of cipher algorithm.
+ * @state: contains keys and initial vectors.
+ *
*/
-struct mtk_sha_ct {
+struct mtk_sha_info {
__le32 ctrl[2];
__le32 cmd[3];
-};
-
-/**
- * mtk_sha_tfm is used to define SHA transform state
- * and store result digest that produced by engine.
- */
-struct mtk_sha_tfm {
- __le32 ctrl[2];
- __le32 digest[SIZE_IN_WORDS(SHA512_DIGEST_SIZE)];
-};
-
-/**
- * mtk_sha_info consists of command token and transform state
- * of SHA, its role is similar to mtk_aes_info.
- */
-struct mtk_sha_info {
- struct mtk_sha_ct ct;
- struct mtk_sha_tfm tfm;
+ __le32 tfm[2];
+ __le32 digest[SHA_MAX_DIGEST_BUF_SIZE];
};
struct mtk_sha_reqctx {
@@ -94,7 +79,6 @@ struct mtk_sha_reqctx {
unsigned long op;
u64 digcnt;
- bool start;
size_t bufcnt;
dma_addr_t dma_addr;
@@ -153,6 +137,21 @@ static inline void mtk_sha_write(struct mtk_cryp *cryp,
writel_relaxed(value, cryp->base + offset);
}
+static inline void mtk_sha_ring_shift(struct mtk_ring *ring,
+ struct mtk_desc **cmd_curr,
+ struct mtk_desc **res_curr,
+ int *count)
+{
+ *cmd_curr = ring->cmd_next++;
+ *res_curr = ring->res_next++;
+ (*count)++;
+
+ if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) {
+ ring->cmd_next = ring->cmd_base;
+ ring->res_next = ring->res_base;
+ }
+}
+
static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx)
{
struct mtk_cryp *cryp = NULL;
@@ -251,7 +250,9 @@ static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
bits[1] = cpu_to_be64(size << 3);
bits[0] = cpu_to_be64(size >> 61);
- if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) {
+ switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
+ case SHA_FLAGS_SHA384:
+ case SHA_FLAGS_SHA512:
index = ctx->bufcnt & 0x7f;
padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
*(ctx->buffer + ctx->bufcnt) = 0x80;
@@ -259,7 +260,9 @@ static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
ctx->bufcnt += padlen + 16;
ctx->flags |= SHA_FLAGS_PAD;
- } else {
+ break;
+
+ default:
index = ctx->bufcnt & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
*(ctx->buffer + ctx->bufcnt) = 0x80;
@@ -267,36 +270,35 @@ static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
ctx->bufcnt += padlen + 8;
ctx->flags |= SHA_FLAGS_PAD;
+ break;
}
}
/* Initialize basic transform information of SHA */
static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
{
- struct mtk_sha_ct *ct = &ctx->info.ct;
- struct mtk_sha_tfm *tfm = &ctx->info.tfm;
+ struct mtk_sha_info *info = &ctx->info;
ctx->ct_hdr = SHA_CT_CTRL_HDR;
ctx->ct_size = SHA_CT_SIZE;
- tfm->ctrl[0] = SHA_TFM_HASH | SHA_TFM_INNER_DIG |
- SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
+ info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
case SHA_FLAGS_SHA1:
- tfm->ctrl[0] |= SHA_TFM_SHA1;
+ info->tfm[0] |= SHA_TFM_SHA1;
break;
case SHA_FLAGS_SHA224:
- tfm->ctrl[0] |= SHA_TFM_SHA224;
+ info->tfm[0] |= SHA_TFM_SHA224;
break;
case SHA_FLAGS_SHA256:
- tfm->ctrl[0] |= SHA_TFM_SHA256;
+ info->tfm[0] |= SHA_TFM_SHA256;
break;
case SHA_FLAGS_SHA384:
- tfm->ctrl[0] |= SHA_TFM_SHA384;
+ info->tfm[0] |= SHA_TFM_SHA384;
break;
case SHA_FLAGS_SHA512:
- tfm->ctrl[0] |= SHA_TFM_SHA512;
+ info->tfm[0] |= SHA_TFM_SHA512;
break;
default:
@@ -304,13 +306,13 @@ static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
return;
}
- tfm->ctrl[1] = SHA_TFM_HASH_STORE;
- ct->ctrl[0] = tfm->ctrl[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
- ct->ctrl[1] = tfm->ctrl[1];
+ info->tfm[1] = SHA_TFM_HASH_STORE;
+ info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
+ info->ctrl[1] = info->tfm[1];
- ct->cmd[0] = SHA_CMD0;
- ct->cmd[1] = SHA_CMD1;
- ct->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
+ info->cmd[0] = SHA_CMD0;
+ info->cmd[1] = SHA_CMD1;
+ info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
}
/*
@@ -319,23 +321,21 @@ static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
*/
static int mtk_sha_info_update(struct mtk_cryp *cryp,
struct mtk_sha_rec *sha,
- size_t len)
+ size_t len1, size_t len2)
{
struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
struct mtk_sha_info *info = &ctx->info;
- struct mtk_sha_ct *ct = &info->ct;
-
- if (ctx->start)
- ctx->start = false;
- else
- ct->ctrl[0] &= ~SHA_TFM_START;
ctx->ct_hdr &= ~SHA_DATA_LEN_MSK;
- ctx->ct_hdr |= cpu_to_le32(len);
- ct->cmd[0] &= ~SHA_DATA_LEN_MSK;
- ct->cmd[0] |= cpu_to_le32(len);
+ ctx->ct_hdr |= cpu_to_le32(len1 + len2);
+ info->cmd[0] &= ~SHA_DATA_LEN_MSK;
+ info->cmd[0] |= cpu_to_le32(len1 + len2);
+
+ /* Setting SHA_TFM_START only for the first iteration */
+ if (ctx->digcnt)
+ info->ctrl[0] &= ~SHA_TFM_START;
- ctx->digcnt += len;
+ ctx->digcnt += len1;
ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
DMA_BIDIRECTIONAL);
@@ -343,7 +343,8 @@ static int mtk_sha_info_update(struct mtk_cryp *cryp,
dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
return -EINVAL;
}
- ctx->tfm_dma = ctx->ct_dma + sizeof(*ct);
+
+ ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd);
return 0;
}
@@ -408,7 +409,6 @@ static int mtk_sha_init(struct ahash_request *req)
ctx->bufcnt = 0;
ctx->digcnt = 0;
ctx->buffer = tctx->buf;
- ctx->start = true;
if (tctx->flags & SHA_FLAGS_HMAC) {
struct mtk_sha_hmac_ctx *bctx = tctx->base;
@@ -422,89 +422,39 @@ static int mtk_sha_init(struct ahash_request *req)
}
static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
- dma_addr_t addr, size_t len)
+ dma_addr_t addr1, size_t len1,
+ dma_addr_t addr2, size_t len2)
{
struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
struct mtk_ring *ring = cryp->ring[sha->id];
- struct mtk_desc *cmd = ring->cmd_base + ring->cmd_pos;
- struct mtk_desc *res = ring->res_base + ring->res_pos;
- int err;
-
- err = mtk_sha_info_update(cryp, sha, len);
- if (err)
- return err;
-
- /* Fill in the command/result descriptors */
- res->hdr = MTK_DESC_FIRST | MTK_DESC_LAST | MTK_DESC_BUF_LEN(len);
- res->buf = cpu_to_le32(cryp->tmp_dma);
-
- cmd->hdr = MTK_DESC_FIRST | MTK_DESC_LAST | MTK_DESC_BUF_LEN(len) |
- MTK_DESC_CT_LEN(ctx->ct_size);
-
- cmd->buf = cpu_to_le32(addr);
- cmd->ct = cpu_to_le32(ctx->ct_dma);
- cmd->ct_hdr = ctx->ct_hdr;
- cmd->tfm = cpu_to_le32(ctx->tfm_dma);
-
- if (++ring->cmd_pos == MTK_DESC_NUM)
- ring->cmd_pos = 0;
-
- ring->res_pos = ring->cmd_pos;
- /*
- * Make sure that all changes to the DMA ring are done before we
- * start engine.
- */
- wmb();
- /* Start DMA transfer */
- mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(1));
- mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(1));
-
- return -EINPROGRESS;
-}
-
-static int mtk_sha_xmit2(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha,
- struct mtk_sha_reqctx *ctx,
- size_t len1, size_t len2)
-{
- struct mtk_ring *ring = cryp->ring[sha->id];
- struct mtk_desc *cmd = ring->cmd_base + ring->cmd_pos;
- struct mtk_desc *res = ring->res_base + ring->res_pos;
- int err;
+ struct mtk_desc *cmd, *res;
+ int err, count = 0;
- err = mtk_sha_info_update(cryp, sha, len1 + len2);
+ err = mtk_sha_info_update(cryp, sha, len1, len2);
if (err)
return err;
/* Fill in the command/result descriptors */
- res->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST;
- res->buf = cpu_to_le32(cryp->tmp_dma);
+ mtk_sha_ring_shift(ring, &cmd, &res, &count);
- cmd->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST |
+ res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1);
+ cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) |
MTK_DESC_CT_LEN(ctx->ct_size);
- cmd->buf = cpu_to_le32(sg_dma_address(ctx->sg));
+ cmd->buf = cpu_to_le32(addr1);
cmd->ct = cpu_to_le32(ctx->ct_dma);
cmd->ct_hdr = ctx->ct_hdr;
cmd->tfm = cpu_to_le32(ctx->tfm_dma);
- if (++ring->cmd_pos == MTK_DESC_NUM)
- ring->cmd_pos = 0;
-
- ring->res_pos = ring->cmd_pos;
-
- cmd = ring->cmd_base + ring->cmd_pos;
- res = ring->res_base + ring->res_pos;
-
- res->hdr = MTK_DESC_BUF_LEN(len2) | MTK_DESC_LAST;
- res->buf = cpu_to_le32(cryp->tmp_dma);
+ if (len2) {
+ mtk_sha_ring_shift(ring, &cmd, &res, &count);
- cmd->hdr = MTK_DESC_BUF_LEN(len2) | MTK_DESC_LAST;
- cmd->buf = cpu_to_le32(ctx->dma_addr);
-
- if (++ring->cmd_pos == MTK_DESC_NUM)
- ring->cmd_pos = 0;
+ res->hdr = MTK_DESC_BUF_LEN(len2);
+ cmd->hdr = MTK_DESC_BUF_LEN(len2);
+ cmd->buf = cpu_to_le32(addr2);
+ }
- ring->res_pos = ring->cmd_pos;
+ cmd->hdr |= MTK_DESC_LAST;
+ res->hdr |= MTK_DESC_LAST;
/*
* Make sure that all changes to the DMA ring are done before we
@@ -512,8 +462,8 @@ static int mtk_sha_xmit2(struct mtk_cryp *cryp,
*/
wmb();
/* Start DMA transfer */
- mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(2));
- mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(2));
+ mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
+ mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
return -EINPROGRESS;
}
@@ -532,7 +482,7 @@ static int mtk_sha_dma_map(struct mtk_cryp *cryp,
ctx->flags &= ~SHA_FLAGS_SG;
- return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count);
+ return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0);
}
static int mtk_sha_update_slow(struct mtk_cryp *cryp,
@@ -625,7 +575,8 @@ static int mtk_sha_update_start(struct mtk_cryp *cryp,
if (len == 0) {
ctx->flags &= ~SHA_FLAGS_SG;
- return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count);
+ return mtk_sha_xmit(cryp, sha, ctx->dma_addr,
+ count, 0, 0);
} else {
ctx->sg = sg;
@@ -635,7 +586,8 @@ static int mtk_sha_update_start(struct mtk_cryp *cryp,
}
ctx->flags |= SHA_FLAGS_SG;
- return mtk_sha_xmit2(cryp, sha, ctx, len, count);
+ return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
+ len, ctx->dma_addr, count);
}
}
@@ -646,7 +598,8 @@ static int mtk_sha_update_start(struct mtk_cryp *cryp,
ctx->flags |= SHA_FLAGS_SG;
- return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), len);
+ return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
+ len, 0, 0);
}
static int mtk_sha_final_req(struct mtk_cryp *cryp,
@@ -668,7 +621,7 @@ static int mtk_sha_final_req(struct mtk_cryp *cryp,
static int mtk_sha_finish(struct ahash_request *req)
{
struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
- u32 *digest = ctx->info.tfm.digest;
+ __le32 *digest = ctx->info.digest;
u32 *result = (u32 *)req->result;
int i;
@@ -694,7 +647,7 @@ static void mtk_sha_finish_req(struct mtk_cryp *cryp,
sha->req->base.complete(&sha->req->base, err);
/* Handle new request */
- mtk_sha_handle_queue(cryp, sha->id - RING2, NULL);
+ tasklet_schedule(&sha->queue_task);
}
static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
@@ -1216,60 +1169,38 @@ static struct ahash_alg algs_sha384_sha512[] = {
},
};
-static void mtk_sha_task0(unsigned long data)
+static void mtk_sha_queue_task(unsigned long data)
{
- struct mtk_cryp *cryp = (struct mtk_cryp *)data;
- struct mtk_sha_rec *sha = cryp->sha[0];
+ struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
- mtk_sha_unmap(cryp, sha);
- mtk_sha_complete(cryp, sha);
+ mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL);
}
-static void mtk_sha_task1(unsigned long data)
+static void mtk_sha_done_task(unsigned long data)
{
- struct mtk_cryp *cryp = (struct mtk_cryp *)data;
- struct mtk_sha_rec *sha = cryp->sha[1];
+ struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
+ struct mtk_cryp *cryp = sha->cryp;
mtk_sha_unmap(cryp, sha);
mtk_sha_complete(cryp, sha);
}
-static irqreturn_t mtk_sha_ring2_irq(int irq, void *dev_id)
+static irqreturn_t mtk_sha_irq(int irq, void *dev_id)
{
- struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
- struct mtk_sha_rec *sha = cryp->sha[0];
- u32 val = mtk_sha_read(cryp, RDR_STAT(RING2));
+ struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id;
+ struct mtk_cryp *cryp = sha->cryp;
+ u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id));
- mtk_sha_write(cryp, RDR_STAT(RING2), val);
+ mtk_sha_write(cryp, RDR_STAT(sha->id), val);
if (likely((SHA_FLAGS_BUSY & sha->flags))) {
- mtk_sha_write(cryp, RDR_PROC_COUNT(RING2), MTK_CNT_RST);
- mtk_sha_write(cryp, RDR_THRESH(RING2),
+ mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST);
+ mtk_sha_write(cryp, RDR_THRESH(sha->id),
MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
- tasklet_schedule(&sha->task);
+ tasklet_schedule(&sha->done_task);
} else {
- dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
- }
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mtk_sha_ring3_irq(int irq, void *dev_id)
-{
- struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
- struct mtk_sha_rec *sha = cryp->sha[1];
- u32 val = mtk_sha_read(cryp, RDR_STAT(RING3));
-
- mtk_sha_write(cryp, RDR_STAT(RING3), val);
-
- if (likely((SHA_FLAGS_BUSY & sha->flags))) {
- mtk_sha_write(cryp, RDR_PROC_COUNT(RING3), MTK_CNT_RST);
- mtk_sha_write(cryp, RDR_THRESH(RING3),
- MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
-
- tasklet_schedule(&sha->task);
- } else {
- dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
+ dev_warn(cryp->dev, "SHA interrupt when no active requests.\n");
}
return IRQ_HANDLED;
}
@@ -1288,14 +1219,20 @@ static int mtk_sha_record_init(struct mtk_cryp *cryp)
if (!sha[i])
goto err_cleanup;
- sha[i]->id = i + RING2;
+ sha[i]->cryp = cryp;
spin_lock_init(&sha[i]->lock);
crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE);
+
+ tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task,
+ (unsigned long)sha[i]);
+ tasklet_init(&sha[i]->done_task, mtk_sha_done_task,
+ (unsigned long)sha[i]);
}
- tasklet_init(&sha[0]->task, mtk_sha_task0, (unsigned long)cryp);
- tasklet_init(&sha[1]->task, mtk_sha_task1, (unsigned long)cryp);
+ /* Link to ring2 and ring3 respectively */
+ sha[0]->id = MTK_RING2;
+ sha[1]->id = MTK_RING3;
cryp->rec = 1;
@@ -1312,7 +1249,9 @@ static void mtk_sha_record_free(struct mtk_cryp *cryp)
int i;
for (i = 0; i < MTK_REC_NUM; i++) {
- tasklet_kill(&cryp->sha[i]->task);
+ tasklet_kill(&cryp->sha[i]->done_task);
+ tasklet_kill(&cryp->sha[i]->queue_task);
+
kfree(cryp->sha[i]);
}
}
@@ -1368,35 +1307,23 @@ int mtk_hash_alg_register(struct mtk_cryp *cryp)
if (err)
goto err_record;
- /* Ring2 is use by SHA record0 */
- err = devm_request_irq(cryp->dev, cryp->irq[RING2],
- mtk_sha_ring2_irq, IRQF_TRIGGER_LOW,
- "mtk-sha", cryp);
+ err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq,
+ 0, "mtk-sha", cryp->sha[0]);
if (err) {
dev_err(cryp->dev, "unable to request sha irq0.\n");
goto err_res;
}
- /* Ring3 is use by SHA record1 */
- err = devm_request_irq(cryp->dev, cryp->irq[RING3],
- mtk_sha_ring3_irq, IRQF_TRIGGER_LOW,
- "mtk-sha", cryp);
+ err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq,
+ 0, "mtk-sha", cryp->sha[1]);
if (err) {
dev_err(cryp->dev, "unable to request sha irq1.\n");
goto err_res;
}
/* Enable ring2 and ring3 interrupt for hash */
- mtk_sha_write(cryp, AIC_ENABLE_SET(RING2), MTK_IRQ_RDR2);
- mtk_sha_write(cryp, AIC_ENABLE_SET(RING3), MTK_IRQ_RDR3);
-
- cryp->tmp = dma_alloc_coherent(cryp->dev, SHA_TMP_BUF_SIZE,
- &cryp->tmp_dma, GFP_KERNEL);
- if (!cryp->tmp) {
- dev_err(cryp->dev, "unable to allocate tmp buffer.\n");
- err = -EINVAL;
- goto err_res;
- }
+ mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2);
+ mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3);
spin_lock(&mtk_sha.lock);
list_add_tail(&cryp->sha_list, &mtk_sha.dev_list);
@@ -1412,8 +1339,6 @@ err_algs:
spin_lock(&mtk_sha.lock);
list_del(&cryp->sha_list);
spin_unlock(&mtk_sha.lock);
- dma_free_coherent(cryp->dev, SHA_TMP_BUF_SIZE,
- cryp->tmp, cryp->tmp_dma);
err_res:
mtk_sha_record_free(cryp);
err_record:
@@ -1429,7 +1354,5 @@ void mtk_hash_alg_release(struct mtk_cryp *cryp)
spin_unlock(&mtk_sha.lock);
mtk_sha_unregister_algs();
- dma_free_coherent(cryp->dev, SHA_TMP_BUF_SIZE,
- cryp->tmp, cryp->tmp_dma);
mtk_sha_record_free(cryp);
}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 0d35dca2e925..2aab80bc241f 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -491,7 +491,7 @@ static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
ctx->g2 = false;
}
-static int qat_dh_set_secret(struct crypto_kpp *tfm, void *buf,
+static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 1b9da3dc799b..7ac657f46d15 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -170,6 +170,32 @@ struct s5p_aes_ctx {
int keylen;
};
+/**
+ * struct s5p_aes_dev - Crypto device state container
+ * @dev: Associated device
+ * @clk: Clock for accessing hardware
+ * @ioaddr: Mapped IO memory region
+ * @aes_ioaddr: Per-varian offset for AES block IO memory
+ * @irq_fc: Feed control interrupt line
+ * @req: Crypto request currently handled by the device
+ * @ctx: Configuration for currently handled crypto request
+ * @sg_src: Scatter list with source data for currently handled block
+ * in device. This is DMA-mapped into device.
+ * @sg_dst: Scatter list with destination data for currently handled block
+ * in device. This is DMA-mapped into device.
+ * @sg_src_cpy: In case of unaligned access, copied scatter list
+ * with source data.
+ * @sg_dst_cpy: In case of unaligned access, copied scatter list
+ * with destination data.
+ * @tasklet: New request scheduling jib
+ * @queue: Crypto queue
+ * @busy: Indicates whether the device is currently handling some request
+ * thus it uses some of the fields from this state, like:
+ * req, ctx, sg_src/dst (and copies). This essentially
+ * protects against concurrent access to these fields.
+ * @lock: Lock for protecting both access to device hardware registers
+ * and fields related to current request (including the busy field).
+ */
struct s5p_aes_dev {
struct device *dev;
struct clk *clk;
@@ -182,7 +208,6 @@ struct s5p_aes_dev {
struct scatterlist *sg_src;
struct scatterlist *sg_dst;
- /* In case of unaligned access: */
struct scatterlist *sg_src_cpy;
struct scatterlist *sg_dst_cpy;
@@ -190,8 +215,6 @@ struct s5p_aes_dev {
struct crypto_queue queue;
bool busy;
spinlock_t lock;
-
- struct samsung_aes_variant *variant;
};
static struct s5p_aes_dev *s5p_dev;
@@ -287,7 +310,6 @@ static void s5p_sg_done(struct s5p_aes_dev *dev)
static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
{
dev->req->base.complete(&dev->req->base, err);
- dev->busy = false;
}
static void s5p_unset_outdata(struct s5p_aes_dev *dev)
@@ -462,7 +484,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&dev->lock, flags);
s5p_aes_complete(dev, 0);
- dev->busy = true;
+ /* Device is still busy */
tasklet_schedule(&dev->tasklet);
} else {
/*
@@ -483,6 +505,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
error:
s5p_sg_done(dev);
+ dev->busy = false;
spin_unlock_irqrestore(&dev->lock, flags);
s5p_aes_complete(dev, err);
@@ -634,6 +657,7 @@ outdata_error:
indata_error:
s5p_sg_done(dev);
+ dev->busy = false;
spin_unlock_irqrestore(&dev->lock, flags);
s5p_aes_complete(dev, err);
}
@@ -851,7 +875,6 @@ static int s5p_aes_probe(struct platform_device *pdev)
}
pdata->busy = false;
- pdata->variant = variant;
pdata->dev = dev;
platform_set_drvdata(pdev, pdata);
s5p_dev = pdata;
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
new file mode 100644
index 000000000000..09b4ec87c212
--- /dev/null
+++ b/drivers/crypto/stm32/Kconfig
@@ -0,0 +1,7 @@
+config CRYPTO_DEV_STM32
+ tristate "Support for STM32 crypto accelerators"
+ depends on ARCH_STM32
+ select CRYPTO_HASH
+ help
+ This enables support for the CRC32 hw accelerator which can be found
+ on STMicroelectronis STM32 SOC.
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
new file mode 100644
index 000000000000..73b4c6e47f5f
--- /dev/null
+++ b/drivers/crypto/stm32/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_STM32) += stm32_cryp.o
+stm32_cryp-objs := stm32_crc32.o
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
new file mode 100644
index 000000000000..ec83b1e6bfe8
--- /dev/null
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author: Fabien Dessenne <fabien.dessenne@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/bitrev.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <crypto/internal/hash.h>
+
+#include <asm/unaligned.h>
+
+#define DRIVER_NAME "stm32-crc32"
+#define CHKSUM_DIGEST_SIZE 4
+#define CHKSUM_BLOCK_SIZE 1
+
+/* Registers */
+#define CRC_DR 0x00000000
+#define CRC_CR 0x00000008
+#define CRC_INIT 0x00000010
+#define CRC_POL 0x00000014
+
+/* Registers values */
+#define CRC_CR_RESET BIT(0)
+#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
+#define CRC_INIT_DEFAULT 0xFFFFFFFF
+
+/* Polynomial reversed */
+#define POLY_CRC32 0xEDB88320
+#define POLY_CRC32C 0x82F63B78
+
+struct stm32_crc {
+ struct list_head list;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ u8 pending_data[sizeof(u32)];
+ size_t nb_pending_bytes;
+};
+
+struct stm32_crc_list {
+ struct list_head dev_list;
+ spinlock_t lock; /* protect dev_list */
+};
+
+static struct stm32_crc_list crc_list = {
+ .dev_list = LIST_HEAD_INIT(crc_list.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(crc_list.lock),
+};
+
+struct stm32_crc_ctx {
+ u32 key;
+ u32 poly;
+};
+
+struct stm32_crc_desc_ctx {
+ u32 partial; /* crc32c: partial in first 4 bytes of that struct */
+ struct stm32_crc *crc;
+};
+
+static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
+{
+ struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
+
+ mctx->key = CRC_INIT_DEFAULT;
+ mctx->poly = POLY_CRC32;
+ return 0;
+}
+
+static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
+{
+ struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
+
+ mctx->key = CRC_INIT_DEFAULT;
+ mctx->poly = POLY_CRC32C;
+ return 0;
+}
+
+static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
+
+ if (keylen != sizeof(u32)) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ mctx->key = get_unaligned_le32(key);
+ return 0;
+}
+
+static int stm32_crc_init(struct shash_desc *desc)
+{
+ struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
+ struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
+ struct stm32_crc *crc;
+
+ spin_lock_bh(&crc_list.lock);
+ list_for_each_entry(crc, &crc_list.dev_list, list) {
+ ctx->crc = crc;
+ break;
+ }
+ spin_unlock_bh(&crc_list.lock);
+
+ /* Reset, set key, poly and configure in bit reverse mode */
+ writel(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
+ writel(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
+ writel(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
+
+ /* Store partial result */
+ ctx->partial = readl(ctx->crc->regs + CRC_DR);
+ ctx->crc->nb_pending_bytes = 0;
+
+ return 0;
+}
+
+static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
+ unsigned int length)
+{
+ struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
+ struct stm32_crc *crc = ctx->crc;
+ u32 *d32;
+ unsigned int i;
+
+ if (unlikely(crc->nb_pending_bytes)) {
+ while (crc->nb_pending_bytes != sizeof(u32) && length) {
+ /* Fill in pending data */
+ crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
+ length--;
+ }
+
+ if (crc->nb_pending_bytes == sizeof(u32)) {
+ /* Process completed pending data */
+ writel(*(u32 *)crc->pending_data, crc->regs + CRC_DR);
+ crc->nb_pending_bytes = 0;
+ }
+ }
+
+ d32 = (u32 *)d8;
+ for (i = 0; i < length >> 2; i++)
+ /* Process 32 bits data */
+ writel(*(d32++), crc->regs + CRC_DR);
+
+ /* Store partial result */
+ ctx->partial = readl(crc->regs + CRC_DR);
+
+ /* Check for pending data (non 32 bits) */
+ length &= 3;
+ if (likely(!length))
+ return 0;
+
+ if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
+ /* Shall not happen */
+ dev_err(crc->dev, "Pending data overflow\n");
+ return -EINVAL;
+ }
+
+ d8 = (const u8 *)d32;
+ for (i = 0; i < length; i++)
+ /* Store pending data */
+ crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
+
+ return 0;
+}
+
+static int stm32_crc_final(struct shash_desc *desc, u8 *out)
+{
+ struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
+ struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
+
+ /* Send computed CRC */
+ put_unaligned_le32(mctx->poly == POLY_CRC32C ?
+ ~ctx->partial : ctx->partial, out);
+
+ return 0;
+}
+
+static int stm32_crc_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int length, u8 *out)
+{
+ return stm32_crc_update(desc, data, length) ?:
+ stm32_crc_final(desc, out);
+}
+
+static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int length, u8 *out)
+{
+ return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
+}
+
+static struct shash_alg algs[] = {
+ /* CRC-32 */
+ {
+ .setkey = stm32_crc_setkey,
+ .init = stm32_crc_init,
+ .update = stm32_crc_update,
+ .final = stm32_crc_final,
+ .finup = stm32_crc_finup,
+ .digest = stm32_crc_digest,
+ .descsize = sizeof(struct stm32_crc_desc_ctx),
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ .base = {
+ .cra_name = "crc32",
+ .cra_driver_name = DRIVER_NAME,
+ .cra_priority = 200,
+ .cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .cra_alignmask = 3,
+ .cra_ctxsize = sizeof(struct stm32_crc_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_crc32_cra_init,
+ }
+ },
+ /* CRC-32Castagnoli */
+ {
+ .setkey = stm32_crc_setkey,
+ .init = stm32_crc_init,
+ .update = stm32_crc_update,
+ .final = stm32_crc_final,
+ .finup = stm32_crc_finup,
+ .digest = stm32_crc_digest,
+ .descsize = sizeof(struct stm32_crc_desc_ctx),
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ .base = {
+ .cra_name = "crc32c",
+ .cra_driver_name = DRIVER_NAME,
+ .cra_priority = 200,
+ .cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .cra_alignmask = 3,
+ .cra_ctxsize = sizeof(struct stm32_crc_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_crc32c_cra_init,
+ }
+ }
+};
+
+static int stm32_crc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_crc *crc;
+ struct resource *res;
+ int ret;
+
+ crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
+ if (!crc)
+ return -ENOMEM;
+
+ crc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ crc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(crc->regs)) {
+ dev_err(dev, "Cannot map CRC IO\n");
+ return PTR_ERR(crc->regs);
+ }
+
+ crc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(crc->clk)) {
+ dev_err(dev, "Could not get clock\n");
+ return PTR_ERR(crc->clk);
+ }
+
+ ret = clk_prepare_enable(crc->clk);
+ if (ret) {
+ dev_err(crc->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, crc);
+
+ spin_lock(&crc_list.lock);
+ list_add(&crc->list, &crc_list.dev_list);
+ spin_unlock(&crc_list.lock);
+
+ ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
+ if (ret) {
+ dev_err(dev, "Failed to register\n");
+ clk_disable_unprepare(crc->clk);
+ return ret;
+ }
+
+ dev_info(dev, "Initialized\n");
+
+ return 0;
+}
+
+static int stm32_crc_remove(struct platform_device *pdev)
+{
+ struct stm32_crc *crc = platform_get_drvdata(pdev);
+
+ spin_lock(&crc_list.lock);
+ list_del(&crc->list);
+ spin_unlock(&crc_list.lock);
+
+ crypto_unregister_shash(algs);
+
+ clk_disable_unprepare(crc->clk);
+
+ return 0;
+}
+
+static const struct of_device_id stm32_dt_ids[] = {
+ { .compatible = "st,stm32f7-crc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_dt_ids);
+
+static struct platform_driver stm32_crc_driver = {
+ .probe = stm32_crc_probe,
+ .remove = stm32_crc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = stm32_dt_ids,
+ },
+};
+
+module_platform_driver(stm32_crc_driver);
+
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 8e8d60e9a1a2..b1f0d523dff9 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -37,7 +37,7 @@ struct udl_fbdev {
};
#define DL_ALIGN_UP(x, a) ALIGN(x, a)
-#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)
+#define DL_ALIGN_DOWN(x, a) ALIGN_DOWN(x, a)
/** Read the red component (0..255) of a 32 bpp colour. */
#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index afb0967d2ce6..6faaca1b48b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3809,6 +3809,15 @@ static int adap_init0(struct adapter *adap)
}
if (caps_cmd.cryptocaps) {
/* Should query params here...TODO */
+ params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
+ params, val);
+ if (ret < 0) {
+ if (ret != -EINVAL)
+ goto bye;
+ } else {
+ adap->vres.ncrypto_fc = val[0];
+ }
adap->params.crypto |= ULP_CRYPTO_LOOKASIDE;
adap->num_uld += 1;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 4c856605fdfa..6e74040af49a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -272,6 +272,7 @@ struct cxgb4_virt_res { /* virtualized HW resources */
struct cxgb4_range qp;
struct cxgb4_range cq;
struct cxgb4_range ocq;
+ unsigned int ncrypto_fc;
};
#define OCQ_WIN_OFFSET(pdev, vres) \
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index ccc05f874419..8f8c079d0d2b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1167,7 +1167,8 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D,
FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E,
FW_PARAMS_PARAM_PFVF_ETHOFLD_END = 0x30,
- FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31
+ FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31,
+ FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x32
};
/*
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 6f509f68085e..3d891db57ee6 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2019,8 +2019,7 @@ out:
return ret;
}
-static int qman_query_fq_np(struct qman_fq *fq,
- struct qm_mcr_queryfq_np *np)
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
{
union qm_mc_command *mcc;
union qm_mc_result *mcr;
@@ -2046,6 +2045,7 @@ out:
put_affine_portal();
return ret;
}
+EXPORT_SYMBOL(qman_query_fq_np);
static int qman_query_cgr(struct qman_cgr *cgr,
struct qm_mcr_querycgr *cgrd)
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index f4e6e70de259..90bc40c48675 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -34,6 +34,8 @@ u16 qman_ip_rev;
EXPORT_SYMBOL(qman_ip_rev);
u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
EXPORT_SYMBOL(qm_channel_pool1);
+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
+EXPORT_SYMBOL(qm_channel_caam);
/* Register offsets */
#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
@@ -720,8 +722,10 @@ static int fsl_qman_probe(struct platform_device *pdev)
return -ENODEV;
}
- if ((qman_ip_rev & 0xff00) >= QMAN_REV30)
+ if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+ qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
+ }
ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
WARN_ON(ret);
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 53685b59718e..22725bdc6f15 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -89,67 +89,6 @@ static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
}
-/* "Query FQ Non-Programmable Fields" */
-
-struct qm_mcr_queryfq_np {
- u8 verb;
- u8 result;
- u8 __reserved1;
- u8 state; /* QM_MCR_NP_STATE_*** */
- u32 fqd_link; /* 24-bit, _res2[24-31] */
- u16 odp_seq; /* 14-bit, _res3[14-15] */
- u16 orp_nesn; /* 14-bit, _res4[14-15] */
- u16 orp_ea_hseq; /* 15-bit, _res5[15] */
- u16 orp_ea_tseq; /* 15-bit, _res6[15] */
- u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
- u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
- u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
- u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
- u8 __reserved2[5];
- u8 is; /* 1-bit, _res12[1-7] */
- u16 ics_surp;
- u32 byte_cnt;
- u32 frm_cnt; /* 24-bit, _res13[24-31] */
- u32 __reserved3;
- u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
- u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
- u16 __reserved4;
- u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
- u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
- u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
-} __packed;
-
-#define QM_MCR_NP_STATE_FE 0x10
-#define QM_MCR_NP_STATE_R 0x08
-#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
-#define QM_MCR_NP_STATE_OOS 0x00
-#define QM_MCR_NP_STATE_RETIRED 0x01
-#define QM_MCR_NP_STATE_TEN_SCHED 0x02
-#define QM_MCR_NP_STATE_TRU_SCHED 0x03
-#define QM_MCR_NP_STATE_PARKED 0x04
-#define QM_MCR_NP_STATE_ACTIVE 0x05
-#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
-#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
-#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
-#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
-#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
-
-enum qm_mcr_queryfq_np_masks {
- qm_mcr_fqd_link_mask = BIT(24)-1,
- qm_mcr_odp_seq_mask = BIT(14)-1,
- qm_mcr_orp_nesn_mask = BIT(14)-1,
- qm_mcr_orp_ea_hseq_mask = BIT(15)-1,
- qm_mcr_orp_ea_tseq_mask = BIT(15)-1,
- qm_mcr_orp_ea_hptr_mask = BIT(24)-1,
- qm_mcr_orp_ea_tptr_mask = BIT(24)-1,
- qm_mcr_pfdr_hptr_mask = BIT(24)-1,
- qm_mcr_pfdr_tptr_mask = BIT(24)-1,
- qm_mcr_is_mask = BIT(1)-1,
- qm_mcr_frm_cnt_mask = BIT(24)-1,
-};
-#define qm_mcr_np_get(np, field) \
- ((np)->field & (qm_mcr_##field##_mask))
-
/* Congestion Groups */
/*
@@ -271,42 +210,6 @@ const struct qm_portal_config *qman_destroy_affine_portal(void);
*/
int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
-/*
- * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
- * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
- * FQID(n) to fill in the frame queue ID.
- */
-#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
-#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
-#define QM_VDQCR_EXACT 0x40000000
-#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
-#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
-#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
-#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
-
-#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
-#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
-#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
-
-/*
- * qman_volatile_dequeue - Issue a volatile dequeue command
- * @fq: the frame queue object to dequeue from
- * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
- * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
- *
- * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
- * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
- * the VDQCR is already in use, otherwise returns non-zero for failure. If
- * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
- * the VDQCR command has finished executing (ie. once the callback for the last
- * DQRR entry resulting from the VDQCR command has been called). If not using
- * the FINISH flag, completion can be determined either by detecting the
- * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
- * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
- * for the QMAN_FQ_STATE_VDQCR bit to disappear.
- */
-int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
-
int qman_alloc_fq_table(u32 num_fqids);
/* QMan s/w corenet portal, low-level i/face */