From f63601fd616ab370774fa00ea10bcaaa9e48e84c Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 18 Jun 2015 15:46:20 +0200 Subject: crypto: marvell/cesa - add a new driver for Marvell's CESA The existing mv_cesa driver supports some features of the CESA IP but is quite limited, and reworking it to support new features (like involving the TDMA engine to offload the CPU) is almost impossible. This driver has been rewritten from scratch to take those new features into account. This commit introduce the base infrastructure allowing us to add support for DMA optimization. It also includes support for one hash (SHA1) and one cipher (AES) algorithm, and enable those features on the Armada 370 SoC. Other algorithms and platforms will be added later on. Signed-off-by: Boris Brezillon Signed-off-by: Arnaud Ebalard Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 417 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 417 insertions(+) create mode 100644 drivers/crypto/marvell/cesa.c (limited to 'drivers/crypto/marvell/cesa.c') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c new file mode 100644 index 000000000000..76a694303839 --- /dev/null +++ b/drivers/crypto/marvell/cesa.c @@ -0,0 +1,417 @@ +/* + * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA) + * that can be found on the following platform: Orion, Kirkwood, Armada. This + * driver supports the TDMA engine on platforms on which it is available. + * + * Author: Boris Brezillon + * Author: Arnaud Ebalard + * + * This work is based on an initial version written by + * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cesa.h" + +struct mv_cesa_dev *cesa_dev; + +static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine) +{ + struct crypto_async_request *req, *backlog; + struct mv_cesa_ctx *ctx; + + spin_lock_bh(&cesa_dev->lock); + backlog = crypto_get_backlog(&cesa_dev->queue); + req = crypto_dequeue_request(&cesa_dev->queue); + engine->req = req; + spin_unlock_bh(&cesa_dev->lock); + + if (!req) + return; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + ctx = crypto_tfm_ctx(req->tfm); + ctx->ops->prepare(req, engine); + ctx->ops->step(req); +} + +static irqreturn_t mv_cesa_int(int irq, void *priv) +{ + struct mv_cesa_engine *engine = priv; + struct crypto_async_request *req; + struct mv_cesa_ctx *ctx; + u32 status, mask; + irqreturn_t ret = IRQ_NONE; + + while (true) { + int res; + + mask = mv_cesa_get_int_mask(engine); + status = readl(engine->regs + CESA_SA_INT_STATUS); + + if (!(status & mask)) + break; + + /* + * TODO: avoid clearing the FPGA_INT_STATUS if this not + * relevant on some platforms. + */ + writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS); + writel(~status, engine->regs + CESA_SA_INT_STATUS); + + ret = IRQ_HANDLED; + spin_lock_bh(&engine->lock); + req = engine->req; + spin_unlock_bh(&engine->lock); + if (req) { + ctx = crypto_tfm_ctx(req->tfm); + res = ctx->ops->process(req, status & mask); + if (res != -EINPROGRESS) { + spin_lock_bh(&engine->lock); + engine->req = NULL; + mv_cesa_dequeue_req_unlocked(engine); + spin_unlock_bh(&engine->lock); + ctx->ops->cleanup(req); + local_bh_disable(); + req->complete(req, res); + local_bh_enable(); + } else { + ctx->ops->step(req); + } + } + } + + return ret; +} + +int mv_cesa_queue_req(struct crypto_async_request *req) +{ + int ret; + int i; + + spin_lock_bh(&cesa_dev->lock); + ret = crypto_enqueue_request(&cesa_dev->queue, req); + spin_unlock_bh(&cesa_dev->lock); + + if (ret != -EINPROGRESS) + return ret; + + for (i = 0; i < cesa_dev->caps->nengines; i++) { + spin_lock_bh(&cesa_dev->engines[i].lock); + if (!cesa_dev->engines[i].req) + mv_cesa_dequeue_req_unlocked(&cesa_dev->engines[i]); + spin_unlock_bh(&cesa_dev->engines[i].lock); + } + + return -EINPROGRESS; +} + +static int mv_cesa_add_algs(struct mv_cesa_dev *cesa) +{ + int ret; + int i, j; + + for (i = 0; i < cesa->caps->ncipher_algs; i++) { + ret = crypto_register_alg(cesa->caps->cipher_algs[i]); + if (ret) + goto err_unregister_crypto; + } + + for (i = 0; i < cesa->caps->nahash_algs; i++) { + ret = crypto_register_ahash(cesa->caps->ahash_algs[i]); + if (ret) + goto err_unregister_ahash; + } + + return 0; + +err_unregister_ahash: + for (j = 0; j < i; j++) + crypto_unregister_ahash(cesa->caps->ahash_algs[j]); + i = cesa->caps->ncipher_algs; + +err_unregister_crypto: + for (j = 0; j < i; j++) + crypto_unregister_alg(cesa->caps->cipher_algs[j]); + + return ret; +} + +static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa) +{ + int i; + + for (i = 0; i < cesa->caps->nahash_algs; i++) + crypto_unregister_ahash(cesa->caps->ahash_algs[i]); + + for (i = 0; i < cesa->caps->ncipher_algs; i++) + crypto_unregister_alg(cesa->caps->cipher_algs[i]); +} + +static struct crypto_alg *armada_370_cipher_algs[] = { + &mv_cesa_ecb_aes_alg, + &mv_cesa_cbc_aes_alg, +}; + +static struct ahash_alg *armada_370_ahash_algs[] = { + &mv_sha1_alg, + &mv_ahmac_sha1_alg, +}; + +static const struct mv_cesa_caps armada_370_caps = { + .nengines = 1, + .cipher_algs = armada_370_cipher_algs, + .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs), + .ahash_algs = armada_370_ahash_algs, + .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs), +}; + +static const struct of_device_id mv_cesa_of_match_table[] = { + { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps }, + {} +}; +MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); + +static int mv_cesa_get_sram(struct platform_device *pdev, int idx) +{ + struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); + struct mv_cesa_engine *engine = &cesa->engines[idx]; + const char *res_name = "sram"; + struct resource *res; + + engine->pool = of_get_named_gen_pool(cesa->dev->of_node, + "marvell,crypto-srams", + idx); + if (engine->pool) { + engine->sram = gen_pool_dma_alloc(engine->pool, + cesa->sram_size, + &engine->sram_dma); + if (engine->sram) + return 0; + + engine->pool = NULL; + return -ENOMEM; + } + + if (cesa->caps->nengines > 1) { + if (!idx) + res_name = "sram0"; + else + res_name = "sram1"; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + res_name); + if (!res || resource_size(res) < cesa->sram_size) + return -EINVAL; + + engine->sram = devm_ioremap_resource(cesa->dev, res); + if (IS_ERR(engine->sram)) + return PTR_ERR(engine->sram); + + engine->sram_dma = phys_to_dma(cesa->dev, + (phys_addr_t)res->start); + + return 0; +} + +static void mv_cesa_put_sram(struct platform_device *pdev, int idx) +{ + struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); + struct mv_cesa_engine *engine = &cesa->engines[idx]; + + if (!engine->pool) + return; + + gen_pool_free(engine->pool, (unsigned long)engine->sram, + cesa->sram_size); +} + +static int mv_cesa_probe(struct platform_device *pdev) +{ + const struct mv_cesa_caps *caps = NULL; + const struct mbus_dram_target_info *dram; + const struct of_device_id *match; + struct device *dev = &pdev->dev; + struct mv_cesa_dev *cesa; + struct mv_cesa_engine *engines; + struct resource *res; + int irq, ret, i; + u32 sram_size; + + if (cesa_dev) { + dev_err(&pdev->dev, "Only one CESA device authorized\n"); + return -EEXIST; + } + + if (!dev->of_node) + return -ENOTSUPP; + + match = of_match_node(mv_cesa_of_match_table, dev->of_node); + if (!match || !match->data) + return -ENOTSUPP; + + caps = match->data; + + cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL); + if (!cesa) + return -ENOMEM; + + cesa->caps = caps; + cesa->dev = dev; + + sram_size = CESA_SA_DEFAULT_SRAM_SIZE; + of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size", + &sram_size); + if (sram_size < CESA_SA_MIN_SRAM_SIZE) + sram_size = CESA_SA_MIN_SRAM_SIZE; + + cesa->sram_size = sram_size; + cesa->engines = devm_kzalloc(dev, caps->nengines * sizeof(*engines), + GFP_KERNEL); + if (!cesa->engines) + return -ENOMEM; + + spin_lock_init(&cesa->lock); + crypto_init_queue(&cesa->queue, 50); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); + cesa->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(cesa->regs)) + return -ENOMEM; + + dram = mv_mbus_dram_info_nooverlap(); + + platform_set_drvdata(pdev, cesa); + + for (i = 0; i < caps->nengines; i++) { + struct mv_cesa_engine *engine = &cesa->engines[i]; + char res_name[7]; + + engine->id = i; + spin_lock_init(&engine->lock); + + ret = mv_cesa_get_sram(pdev, i); + if (ret) + goto err_cleanup; + + irq = platform_get_irq(pdev, i); + if (irq < 0) { + ret = irq; + goto err_cleanup; + } + + /* + * Not all platforms can gate the CESA clocks: do not complain + * if the clock does not exist. + */ + snprintf(res_name, sizeof(res_name), "cesa%d", i); + engine->clk = devm_clk_get(dev, res_name); + if (IS_ERR(engine->clk)) { + engine->clk = devm_clk_get(dev, NULL); + if (IS_ERR(engine->clk)) + engine->clk = NULL; + } + + snprintf(res_name, sizeof(res_name), "cesaz%d", i); + engine->zclk = devm_clk_get(dev, res_name); + if (IS_ERR(engine->zclk)) + engine->zclk = NULL; + + ret = clk_prepare_enable(engine->clk); + if (ret) + goto err_cleanup; + + ret = clk_prepare_enable(engine->zclk); + if (ret) + goto err_cleanup; + + engine->regs = cesa->regs + CESA_ENGINE_OFF(i); + + writel(0, cesa->engines[i].regs + CESA_SA_INT_STATUS); + writel(CESA_SA_CFG_STOP_DIG_ERR, + cesa->engines[i].regs + CESA_SA_CFG); + writel(engine->sram_dma & CESA_SA_SRAM_MSK, + cesa->engines[i].regs + CESA_SA_DESC_P0); + + ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int, + IRQF_ONESHOT, + dev_name(&pdev->dev), + &cesa->engines[i]); + if (ret) + goto err_cleanup; + } + + cesa_dev = cesa; + + ret = mv_cesa_add_algs(cesa); + if (ret) { + cesa_dev = NULL; + goto err_cleanup; + } + + dev_info(dev, "CESA device successfully registered\n"); + + return 0; + +err_cleanup: + for (i = 0; i < caps->nengines; i++) { + clk_disable_unprepare(cesa->engines[i].zclk); + clk_disable_unprepare(cesa->engines[i].clk); + mv_cesa_put_sram(pdev, i); + } + + return ret; +} + +static int mv_cesa_remove(struct platform_device *pdev) +{ + struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); + int i; + + mv_cesa_remove_algs(cesa); + + for (i = 0; i < cesa->caps->nengines; i++) { + clk_disable_unprepare(cesa->engines[i].zclk); + clk_disable_unprepare(cesa->engines[i].clk); + mv_cesa_put_sram(pdev, i); + } + + return 0; +} + +static struct platform_driver marvell_cesa = { + .probe = mv_cesa_probe, + .remove = mv_cesa_remove, + .driver = { + .owner = THIS_MODULE, + .name = "marvell-cesa", + .of_match_table = mv_cesa_of_match_table, + }, +}; +module_platform_driver(marvell_cesa); + +MODULE_ALIAS("platform:mv_crypto"); +MODULE_AUTHOR("Boris Brezillon "); +MODULE_AUTHOR("Arnaud Ebalard "); +MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3-59-g8ed1b From db509a45339fd786de355b11db34ff7421488cb1 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 18 Jun 2015 15:46:21 +0200 Subject: crypto: marvell/cesa - add TDMA support The CESA IP supports CPU offload through a dedicated DMA engine (TDMA) which can control the crypto block. When you use this mode, all the required data (operation metadata and payload data) are transferred using DMA, and the results are retrieved through DMA when possible (hash results are not retrieved through DMA yet), thus reducing the involvement of the CPU and providing better performances in most cases (for small requests, the cost of DMA preparation might exceed the performance gain). Note that some CESA IPs do not embed this dedicated DMA, hence the activation of this feature on a per platform basis. Signed-off-by: Boris Brezillon Signed-off-by: Arnaud Ebalard Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 1 + drivers/crypto/marvell/Makefile | 2 +- drivers/crypto/marvell/cesa.c | 68 +++++++ drivers/crypto/marvell/cesa.h | 229 ++++++++++++++++++++++ drivers/crypto/marvell/cipher.c | 179 ++++++++++++++++- drivers/crypto/marvell/hash.c | 414 +++++++++++++++++++++++++++++++++++++++- drivers/crypto/marvell/tdma.c | 224 ++++++++++++++++++++++ 7 files changed, 1101 insertions(+), 16 deletions(-) create mode 100644 drivers/crypto/marvell/tdma.c (limited to 'drivers/crypto/marvell/cesa.c') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index dbb35bbefaf3..fd69aa04b475 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -184,6 +184,7 @@ config CRYPTO_DEV_MARVELL_CESA help This driver allows you to utilize the Cryptographic Engines and Security Accelerator (CESA) which can be found on the Armada 370. + This driver supports CPU offload through DMA transfers. This driver is aimed at replacing the mv_cesa driver. This will only happen once it has received proper testing. diff --git a/drivers/crypto/marvell/Makefile b/drivers/crypto/marvell/Makefile index 68d0982c3416..0c12b13574dc 100644 --- a/drivers/crypto/marvell/Makefile +++ b/drivers/crypto/marvell/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o -marvell-cesa-objs := cesa.o cipher.o hash.o +marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 76a694303839..986f024d73fa 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -184,6 +184,7 @@ static const struct mv_cesa_caps armada_370_caps = { .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs), .ahash_algs = armada_370_ahash_algs, .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs), + .has_tdma = true, }; static const struct of_device_id mv_cesa_of_match_table[] = { @@ -192,6 +193,66 @@ static const struct of_device_id mv_cesa_of_match_table[] = { }; MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); +static void +mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine, + const struct mbus_dram_target_info *dram) +{ + void __iomem *iobase = engine->regs; + int i; + + for (i = 0; i < 4; i++) { + writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i)); + writel(0, iobase + CESA_TDMA_WINDOW_BASE(i)); + } + + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + writel(((cs->size - 1) & 0xffff0000) | + (cs->mbus_attr << 8) | + (dram->mbus_dram_target_id << 4) | 1, + iobase + CESA_TDMA_WINDOW_CTRL(i)); + writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i)); + } +} + +static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa) +{ + struct device *dev = cesa->dev; + struct mv_cesa_dev_dma *dma; + + if (!cesa->caps->has_tdma) + return 0; + + dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); + if (!dma) + return -ENOMEM; + + dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev, + sizeof(struct mv_cesa_tdma_desc), + 16, 0); + if (!dma->tdma_desc_pool) + return -ENOMEM; + + dma->op_pool = dmam_pool_create("cesa_op", dev, + sizeof(struct mv_cesa_op_ctx), 16, 0); + if (!dma->op_pool) + return -ENOMEM; + + dma->cache_pool = dmam_pool_create("cesa_cache", dev, + CESA_MAX_HASH_BLOCK_SIZE, 1, 0); + if (!dma->cache_pool) + return -ENOMEM; + + dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0); + if (!dma->cache_pool) + return -ENOMEM; + + cesa->dma = dma; + + return 0; +} + static int mv_cesa_get_sram(struct platform_device *pdev, int idx) { struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); @@ -299,6 +360,10 @@ static int mv_cesa_probe(struct platform_device *pdev) if (IS_ERR(cesa->regs)) return -ENOMEM; + ret = mv_cesa_dev_dma_init(cesa); + if (ret) + return ret; + dram = mv_mbus_dram_info_nooverlap(); platform_set_drvdata(pdev, cesa); @@ -347,6 +412,9 @@ static int mv_cesa_probe(struct platform_device *pdev) engine->regs = cesa->regs + CESA_ENGINE_OFF(i); + if (dram && cesa->caps->has_tdma) + mv_cesa_conf_mbus_windows(&cesa->engines[i], dram); + writel(0, cesa->engines[i].regs + CESA_SA_INT_STATUS); writel(CESA_SA_CFG_STOP_DIG_ERR, cesa->engines[i].regs + CESA_SA_CFG); diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index f68057c7d96a..f8faf260b6d7 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -6,6 +6,7 @@ #include #include +#include #define CESA_ENGINE_OFF(i) (((i) * 0x2000)) @@ -267,11 +268,94 @@ struct mv_cesa_op_ctx { } ctx; }; +/* TDMA descriptor flags */ +#define CESA_TDMA_DST_IN_SRAM BIT(31) +#define CESA_TDMA_SRC_IN_SRAM BIT(30) +#define CESA_TDMA_TYPE_MSK GENMASK(29, 0) +#define CESA_TDMA_DUMMY 0 +#define CESA_TDMA_DATA 1 +#define CESA_TDMA_OP 2 + +/** + * struct mv_cesa_tdma_desc - TDMA descriptor + * @byte_cnt: number of bytes to transfer + * @src: DMA address of the source + * @dst: DMA address of the destination + * @next_dma: DMA address of the next TDMA descriptor + * @cur_dma: DMA address of this TDMA descriptor + * @next: pointer to the next TDMA descriptor + * @op: CESA operation attached to this TDMA descriptor + * @data: raw data attached to this TDMA descriptor + * @flags: flags describing the TDMA transfer. See the + * "TDMA descriptor flags" section above + * + * TDMA descriptor used to create a transfer chain describing a crypto + * operation. + */ +struct mv_cesa_tdma_desc { + u32 byte_cnt; + u32 src; + u32 dst; + u32 next_dma; + u32 cur_dma; + struct mv_cesa_tdma_desc *next; + union { + struct mv_cesa_op_ctx *op; + void *data; + }; + u32 flags; +}; + +/** + * struct mv_cesa_sg_dma_iter - scatter-gather iterator + * @dir: transfer direction + * @sg: scatter list + * @offset: current position in the scatter list + * @op_offset: current position in the crypto operation + * + * Iterator used to iterate over a scatterlist while creating a TDMA chain for + * a crypto operation. + */ +struct mv_cesa_sg_dma_iter { + enum dma_data_direction dir; + struct scatterlist *sg; + unsigned int offset; + unsigned int op_offset; +}; + +/** + * struct mv_cesa_dma_iter - crypto operation iterator + * @len: the crypto operation length + * @offset: current position in the crypto operation + * @op_len: sub-operation length (the crypto engine can only act on 2kb + * chunks) + * + * Iterator used to create a TDMA chain for a given crypto operation. + */ +struct mv_cesa_dma_iter { + unsigned int len; + unsigned int offset; + unsigned int op_len; +}; + +/** + * struct mv_cesa_tdma_chain - TDMA chain + * @first: first entry in the TDMA chain + * @last: last entry in the TDMA chain + * + * Stores a TDMA chain for a specific crypto operation. + */ +struct mv_cesa_tdma_chain { + struct mv_cesa_tdma_desc *first; + struct mv_cesa_tdma_desc *last; +}; + struct mv_cesa_engine; /** * struct mv_cesa_caps - CESA device capabilities * @engines: number of engines + * @has_tdma: whether this device has a TDMA block * @cipher_algs: supported cipher algorithms * @ncipher_algs: number of supported cipher algorithms * @ahash_algs: supported hash algorithms @@ -281,12 +365,31 @@ struct mv_cesa_engine; */ struct mv_cesa_caps { int nengines; + bool has_tdma; struct crypto_alg **cipher_algs; int ncipher_algs; struct ahash_alg **ahash_algs; int nahash_algs; }; +/** + * struct mv_cesa_dev_dma - DMA pools + * @tdma_desc_pool: TDMA desc pool + * @op_pool: crypto operation pool + * @cache_pool: data cache pool (used by hash implementation when the + * hash request is smaller than the hash block size) + * @padding_pool: padding pool (used by hash implementation when hardware + * padding cannot be used) + * + * Structure containing the different DMA pools used by this driver. + */ +struct mv_cesa_dev_dma { + struct dma_pool *tdma_desc_pool; + struct dma_pool *op_pool; + struct dma_pool *cache_pool; + struct dma_pool *padding_pool; +}; + /** * struct mv_cesa_dev - CESA device * @caps: device capabilities @@ -295,6 +398,7 @@ struct mv_cesa_caps { * @lock: device lock * @queue: crypto request queue * @engines: array of engines + * @dma: dma pools * * Structure storing CESA device information. */ @@ -306,6 +410,7 @@ struct mv_cesa_dev { spinlock_t lock; struct crypto_queue queue; struct mv_cesa_engine *engines; + struct mv_cesa_dev_dma *dma; }; /** @@ -391,9 +496,11 @@ struct mv_cesa_hmac_ctx { /** * enum mv_cesa_req_type - request type definitions * @CESA_STD_REQ: standard request + * @CESA_DMA_REQ: DMA request */ enum mv_cesa_req_type { CESA_STD_REQ, + CESA_DMA_REQ, }; /** @@ -406,6 +513,27 @@ struct mv_cesa_req { struct mv_cesa_engine *engine; }; +/** + * struct mv_cesa_tdma_req - CESA TDMA request + * @base: base information + * @chain: TDMA chain + */ +struct mv_cesa_tdma_req { + struct mv_cesa_req base; + struct mv_cesa_tdma_chain chain; +}; + +/** + * struct mv_cesa_sg_std_iter - CESA scatter-gather iterator for standard + * requests + * @iter: sg mapping iterator + * @offset: current offset in the SG entry mapped in memory + */ +struct mv_cesa_sg_std_iter { + struct sg_mapping_iter iter; + unsigned int offset; +}; + /** * struct mv_cesa_ablkcipher_std_req - cipher standard request * @base: base information @@ -430,6 +558,7 @@ struct mv_cesa_ablkcipher_std_req { struct mv_cesa_ablkcipher_req { union { struct mv_cesa_req base; + struct mv_cesa_tdma_req dma; struct mv_cesa_ablkcipher_std_req std; } req; int src_nents; @@ -446,6 +575,20 @@ struct mv_cesa_ahash_std_req { unsigned int offset; }; +/** + * struct mv_cesa_ahash_dma_req - DMA hash request + * @base: base information + * @padding: padding buffer + * @padding_dma: DMA address of the padding buffer + * @cache_dma: DMA address of the cache buffer + */ +struct mv_cesa_ahash_dma_req { + struct mv_cesa_tdma_req base; + u8 *padding; + dma_addr_t padding_dma; + dma_addr_t cache_dma; +}; + /** * struct mv_cesa_ahash_req - hash request * @req: type specific request information @@ -460,6 +603,7 @@ struct mv_cesa_ahash_std_req { struct mv_cesa_ahash_req { union { struct mv_cesa_req base; + struct mv_cesa_ahash_dma_req dma; struct mv_cesa_ahash_std_req std; } req; struct mv_cesa_op_ctx op_tmpl; @@ -543,6 +687,91 @@ static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine) int mv_cesa_queue_req(struct crypto_async_request *req); +/* TDMA functions */ + +static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter, + unsigned int len) +{ + iter->len = len; + iter->op_len = min(len, CESA_SA_SRAM_PAYLOAD_SIZE); + iter->offset = 0; +} + +static inline void mv_cesa_sg_dma_iter_init(struct mv_cesa_sg_dma_iter *iter, + struct scatterlist *sg, + enum dma_data_direction dir) +{ + iter->op_offset = 0; + iter->offset = 0; + iter->sg = sg; + iter->dir = dir; +} + +static inline unsigned int +mv_cesa_req_dma_iter_transfer_len(struct mv_cesa_dma_iter *iter, + struct mv_cesa_sg_dma_iter *sgiter) +{ + return min(iter->op_len - sgiter->op_offset, + sg_dma_len(sgiter->sg) - sgiter->offset); +} + +bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *chain, + struct mv_cesa_sg_dma_iter *sgiter, + unsigned int len); + +static inline bool mv_cesa_req_dma_iter_next_op(struct mv_cesa_dma_iter *iter) +{ + iter->offset += iter->op_len; + iter->op_len = min(iter->len - iter->offset, + CESA_SA_SRAM_PAYLOAD_SIZE); + + return iter->op_len; +} + +void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq); + +static inline int mv_cesa_dma_process(struct mv_cesa_tdma_req *dreq, + u32 status) +{ + if (!(status & CESA_SA_INT_ACC0_IDMA_DONE)) + return -EINPROGRESS; + + if (status & CESA_SA_INT_IDMA_OWN_ERR) + return -EINVAL; + + return 0; +} + +void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq, + struct mv_cesa_engine *engine); + +void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq); + +static inline void +mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain) +{ + memset(chain, 0, sizeof(*chain)); +} + +struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, + const struct mv_cesa_op_ctx *op_templ, + bool skip_ctx, + gfp_t flags); + +int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain, + dma_addr_t dst, dma_addr_t src, u32 size, + u32 flags, gfp_t gfp_flags); + +int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, + u32 flags); + +int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, u32 flags); + +int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, + struct mv_cesa_dma_iter *dma_iter, + struct mv_cesa_sg_dma_iter *sgiter, + gfp_t gfp_flags); + /* Algorithm definitions */ extern struct ahash_alg mv_sha1_alg; diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index e6eea488f11e..e21783b83046 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -21,6 +21,55 @@ struct mv_cesa_aes_ctx { struct crypto_aes_ctx aes; }; +struct mv_cesa_ablkcipher_dma_iter { + struct mv_cesa_dma_iter base; + struct mv_cesa_sg_dma_iter src; + struct mv_cesa_sg_dma_iter dst; +}; + +static inline void +mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter, + struct ablkcipher_request *req) +{ + mv_cesa_req_dma_iter_init(&iter->base, req->nbytes); + mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); + mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE); +} + +static inline bool +mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter) +{ + iter->src.op_offset = 0; + iter->dst.op_offset = 0; + + return mv_cesa_req_dma_iter_next_op(&iter->base); +} + +static inline void +mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + + if (req->dst != req->src) { + dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, + DMA_FROM_DEVICE); + dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, + DMA_TO_DEVICE); + } else { + dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, + DMA_BIDIRECTIONAL); + } + mv_cesa_dma_cleanup(&creq->req.dma); +} + +static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ablkcipher_dma_cleanup(req); +} + static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req) { struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); @@ -77,7 +126,11 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, struct mv_cesa_engine *engine = sreq->base.engine; int ret; - ret = mv_cesa_ablkcipher_std_process(ablkreq, status); + if (creq->req.base.type == CESA_DMA_REQ) + ret = mv_cesa_dma_process(&creq->req.dma, status); + else + ret = mv_cesa_ablkcipher_std_process(ablkreq, status); + if (ret) return ret; @@ -90,8 +143,21 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, static void mv_cesa_ablkcipher_step(struct crypto_async_request *req) { struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); - mv_cesa_ablkcipher_std_step(ablkreq); + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_dma_step(&creq->req.dma); + else + mv_cesa_ablkcipher_std_step(ablkreq); +} + +static inline void +mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + struct mv_cesa_tdma_req *dreq = &creq->req.dma; + + mv_cesa_dma_prepare(dreq, dreq->base.engine); } static inline void @@ -115,12 +181,18 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, creq->req.base.engine = engine; - mv_cesa_ablkcipher_std_prepare(ablkreq); + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ablkcipher_dma_prepare(ablkreq); + else + mv_cesa_ablkcipher_std_prepare(ablkreq); } static inline void mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req) { + struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); + + mv_cesa_ablkcipher_cleanup(ablkreq); } static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = { @@ -166,6 +238,92 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, return 0; } +static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, + const struct mv_cesa_op_ctx *op_templ) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + struct mv_cesa_tdma_req *dreq = &creq->req.dma; + struct mv_cesa_ablkcipher_dma_iter iter; + struct mv_cesa_tdma_chain chain; + bool skip_ctx = false; + int ret; + + dreq->base.type = CESA_DMA_REQ; + dreq->chain.first = NULL; + dreq->chain.last = NULL; + + if (req->src != req->dst) { + ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, + DMA_TO_DEVICE); + if (!ret) + return -ENOMEM; + + ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents, + DMA_FROM_DEVICE); + if (!ret) { + ret = -ENOMEM; + goto err_unmap_src; + } + } else { + ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, + DMA_BIDIRECTIONAL); + if (!ret) + return -ENOMEM; + } + + mv_cesa_tdma_desc_iter_init(&chain); + mv_cesa_ablkcipher_req_iter_init(&iter, req); + + do { + struct mv_cesa_op_ctx *op; + + op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags); + if (IS_ERR(op)) { + ret = PTR_ERR(op); + goto err_free_tdma; + } + skip_ctx = true; + + mv_cesa_set_crypt_op_len(op, iter.base.op_len); + + /* Add input transfers */ + ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base, + &iter.src, flags); + if (ret) + goto err_free_tdma; + + /* Add dummy desc to launch the crypto operation */ + ret = mv_cesa_dma_add_dummy_launch(&chain, flags); + if (ret) + goto err_free_tdma; + + /* Add output transfers */ + ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base, + &iter.dst, flags); + if (ret) + goto err_free_tdma; + + } while (mv_cesa_ablkcipher_req_iter_next_op(&iter)); + + dreq->chain = chain; + + return 0; + +err_free_tdma: + mv_cesa_dma_cleanup(dreq); + if (req->dst != req->src) + dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, + DMA_FROM_DEVICE); + +err_unmap_src: + dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, + req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); + + return ret; +} + static inline int mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req, const struct mv_cesa_op_ctx *op_templ) @@ -186,6 +344,7 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); unsigned int blksize = crypto_ablkcipher_blocksize(tfm); + int ret; if (!IS_ALIGNED(req->nbytes, blksize)) return -EINVAL; @@ -196,7 +355,13 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, CESA_SA_DESC_CFG_OP_MSK); - return mv_cesa_ablkcipher_std_req_init(req, tmpl); + /* TODO: add a threshold for DMA usage */ + if (cesa_dev->caps->has_tdma) + ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl); + else + ret = mv_cesa_ablkcipher_std_req_init(req, tmpl); + + return ret; } static int mv_cesa_aes_op(struct ablkcipher_request *req, @@ -230,7 +395,11 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req, if (ret) return ret; - return mv_cesa_queue_req(&req->base); + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) + mv_cesa_ablkcipher_cleanup(req); + + return ret; } static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req) diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 2d33f6816c31..528da260a7cc 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -16,6 +16,47 @@ #include "cesa.h" +struct mv_cesa_ahash_dma_iter { + struct mv_cesa_dma_iter base; + struct mv_cesa_sg_dma_iter src; +}; + +static inline void +mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter, + struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int len = req->nbytes; + + if (!creq->last_req) + len = (len + creq->cache_ptr) & ~CESA_HASH_BLOCK_SIZE_MSK; + + mv_cesa_req_dma_iter_init(&iter->base, len); + mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); + iter->src.op_offset = creq->cache_ptr; +} + +static inline bool +mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter) +{ + iter->src.op_offset = 0; + + return mv_cesa_req_dma_iter_next_op(&iter->base); +} + +static inline int mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_req *creq, + gfp_t flags) +{ + struct mv_cesa_ahash_dma_req *dreq = &creq->req.dma; + + creq->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, + &dreq->cache_dma); + if (!creq->cache) + return -ENOMEM; + + return 0; +} + static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req *creq, gfp_t flags) { @@ -31,11 +72,23 @@ static int mv_cesa_ahash_alloc_cache(struct ahash_request *req) struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; + int ret; if (creq->cache) return 0; - return mv_cesa_ahash_std_alloc_cache(creq, flags); + if (creq->req.base.type == CESA_DMA_REQ) + ret = mv_cesa_ahash_dma_alloc_cache(creq, flags); + else + ret = mv_cesa_ahash_std_alloc_cache(creq, flags); + + return ret; +} + +static inline void mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_req *creq) +{ + dma_pool_free(cesa_dev->dma->cache_pool, creq->cache, + creq->req.dma.cache_dma); } static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req *creq) @@ -48,16 +101,69 @@ static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req *creq) if (!creq->cache) return; - mv_cesa_ahash_std_free_cache(creq); + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ahash_dma_free_cache(creq); + else + mv_cesa_ahash_std_free_cache(creq); creq->cache = NULL; } +static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req, + gfp_t flags) +{ + if (req->padding) + return 0; + + req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags, + &req->padding_dma); + if (!req->padding) + return -ENOMEM; + + return 0; +} + +static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req) +{ + if (!req->padding) + return; + + dma_pool_free(cesa_dev->dma->padding_pool, req->padding, + req->padding_dma); + req->padding = NULL; +} + +static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + + mv_cesa_ahash_dma_free_padding(&creq->req.dma); +} + +static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + + dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); + mv_cesa_dma_cleanup(&creq->req.dma.base); +} + +static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ahash_dma_cleanup(req); +} + static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); mv_cesa_ahash_free_cache(creq); + + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ahash_dma_last_cleanup(req); } static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq) @@ -183,6 +289,14 @@ static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status) return 0; } +static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_tdma_req *dreq = &creq->req.dma.base; + + mv_cesa_dma_prepare(dreq, dreq->base.engine); +} + static void mv_cesa_ahash_std_prepare(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); @@ -197,8 +311,12 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req) static void mv_cesa_ahash_step(struct crypto_async_request *req) { struct ahash_request *ahashreq = ahash_request_cast(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); - mv_cesa_ahash_std_step(ahashreq); + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_dma_step(&creq->req.dma.base); + else + mv_cesa_ahash_std_step(ahashreq); } static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) @@ -209,7 +327,11 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) unsigned int digsize; int ret, i; - ret = mv_cesa_ahash_std_process(ahashreq, status); + if (creq->req.base.type == CESA_DMA_REQ) + ret = mv_cesa_dma_process(&creq->req.dma.base, status); + else + ret = mv_cesa_ahash_std_process(ahashreq, status); + if (ret == -EINPROGRESS) return ret; @@ -243,7 +365,10 @@ static void mv_cesa_ahash_prepare(struct crypto_async_request *req, creq->req.base.engine = engine; - mv_cesa_ahash_std_prepare(ahashreq); + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ahash_dma_prepare(ahashreq); + else + mv_cesa_ahash_std_prepare(ahashreq); digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); for (i = 0; i < digsize / 4; i++) @@ -258,6 +383,8 @@ static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) if (creq->last_req) mv_cesa_ahash_last_cleanup(ahashreq); + + mv_cesa_ahash_cleanup(ahashreq); } static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { @@ -325,14 +452,267 @@ static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached) return 0; } +static struct mv_cesa_op_ctx * +mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, + struct mv_cesa_ahash_dma_iter *dma_iter, + struct mv_cesa_ahash_req *creq, + gfp_t flags) +{ + struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; + struct mv_cesa_op_ctx *op = NULL; + int ret; + + if (!creq->cache_ptr) + return NULL; + + ret = mv_cesa_dma_add_data_transfer(chain, + CESA_SA_DATA_SRAM_OFFSET, + ahashdreq->cache_dma, + creq->cache_ptr, + CESA_TDMA_DST_IN_SRAM, + flags); + if (ret) + return ERR_PTR(ret); + + if (!dma_iter->base.op_len) { + op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); + if (IS_ERR(op)) + return op; + + mv_cesa_set_mac_op_frag_len(op, creq->cache_ptr); + + /* Add dummy desc to launch crypto operation */ + ret = mv_cesa_dma_add_dummy_launch(chain, flags); + if (ret) + return ERR_PTR(ret); + } + + return op; +} + +static struct mv_cesa_op_ctx * +mv_cesa_ahash_dma_add_data(struct mv_cesa_tdma_chain *chain, + struct mv_cesa_ahash_dma_iter *dma_iter, + struct mv_cesa_ahash_req *creq, + gfp_t flags) +{ + struct mv_cesa_op_ctx *op; + int ret; + + op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); + if (IS_ERR(op)) + return op; + + mv_cesa_set_mac_op_frag_len(op, dma_iter->base.op_len); + + if ((mv_cesa_get_op_cfg(&creq->op_tmpl) & CESA_SA_DESC_CFG_FRAG_MSK) == + CESA_SA_DESC_CFG_FIRST_FRAG) + mv_cesa_update_op_cfg(&creq->op_tmpl, + CESA_SA_DESC_CFG_MID_FRAG, + CESA_SA_DESC_CFG_FRAG_MSK); + + /* Add input transfers */ + ret = mv_cesa_dma_add_op_transfers(chain, &dma_iter->base, + &dma_iter->src, flags); + if (ret) + return ERR_PTR(ret); + + /* Add dummy desc to launch crypto operation */ + ret = mv_cesa_dma_add_dummy_launch(chain, flags); + if (ret) + return ERR_PTR(ret); + + return op; +} + +static struct mv_cesa_op_ctx * +mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain, + struct mv_cesa_ahash_dma_iter *dma_iter, + struct mv_cesa_ahash_req *creq, + struct mv_cesa_op_ctx *op, + gfp_t flags) +{ + struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; + unsigned int len, trailerlen, padoff = 0; + int ret; + + if (!creq->last_req) + return op; + + if (op && creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { + u32 frag = CESA_SA_DESC_CFG_NOT_FRAG; + + if ((mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK) != + CESA_SA_DESC_CFG_FIRST_FRAG) + frag = CESA_SA_DESC_CFG_LAST_FRAG; + + mv_cesa_update_op_cfg(op, frag, CESA_SA_DESC_CFG_FRAG_MSK); + + return op; + } + + ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags); + if (ret) + return ERR_PTR(ret); + + trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding); + + if (op) { + len = min(CESA_SA_SRAM_PAYLOAD_SIZE - dma_iter->base.op_len, + trailerlen); + if (len) { + ret = mv_cesa_dma_add_data_transfer(chain, + CESA_SA_DATA_SRAM_OFFSET + + dma_iter->base.op_len, + ahashdreq->padding_dma, + len, CESA_TDMA_DST_IN_SRAM, + flags); + if (ret) + return ERR_PTR(ret); + + mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, + CESA_SA_DESC_CFG_FRAG_MSK); + mv_cesa_set_mac_op_frag_len(op, + dma_iter->base.op_len + len); + padoff += len; + } + } + + if (padoff >= trailerlen) + return op; + + if ((mv_cesa_get_op_cfg(&creq->op_tmpl) & CESA_SA_DESC_CFG_FRAG_MSK) != + CESA_SA_DESC_CFG_FIRST_FRAG) + mv_cesa_update_op_cfg(&creq->op_tmpl, + CESA_SA_DESC_CFG_MID_FRAG, + CESA_SA_DESC_CFG_FRAG_MSK); + + op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); + if (IS_ERR(op)) + return op; + + mv_cesa_set_mac_op_frag_len(op, trailerlen - padoff); + + ret = mv_cesa_dma_add_data_transfer(chain, + CESA_SA_DATA_SRAM_OFFSET, + ahashdreq->padding_dma + + padoff, + trailerlen - padoff, + CESA_TDMA_DST_IN_SRAM, + flags); + if (ret) + return ERR_PTR(ret); + + /* Add dummy desc to launch crypto operation */ + ret = mv_cesa_dma_add_dummy_launch(chain, flags); + if (ret) + return ERR_PTR(ret); + + return op; +} + +static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; + struct mv_cesa_tdma_req *dreq = &ahashdreq->base; + struct mv_cesa_tdma_chain chain; + struct mv_cesa_ahash_dma_iter iter; + struct mv_cesa_op_ctx *op = NULL; + int ret; + + dreq->chain.first = NULL; + dreq->chain.last = NULL; + + if (creq->src_nents) { + ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, + DMA_TO_DEVICE); + if (!ret) { + ret = -ENOMEM; + goto err; + } + } + + mv_cesa_tdma_desc_iter_init(&chain); + mv_cesa_ahash_req_iter_init(&iter, req); + + op = mv_cesa_ahash_dma_add_cache(&chain, &iter, + creq, flags); + if (IS_ERR(op)) { + ret = PTR_ERR(op); + goto err_free_tdma; + } + + do { + if (!iter.base.op_len) + break; + + op = mv_cesa_ahash_dma_add_data(&chain, &iter, + creq, flags); + if (IS_ERR(op)) { + ret = PTR_ERR(op); + goto err_free_tdma; + } + } while (mv_cesa_ahash_req_iter_next_op(&iter)); + + op = mv_cesa_ahash_dma_last_req(&chain, &iter, creq, op, flags); + if (IS_ERR(op)) { + ret = PTR_ERR(op); + goto err_free_tdma; + } + + if (op) { + /* Add dummy desc to wait for crypto operation end */ + ret = mv_cesa_dma_add_dummy_end(&chain, flags); + if (ret) + goto err_free_tdma; + } + + if (!creq->last_req) + creq->cache_ptr = req->nbytes + creq->cache_ptr - + iter.base.len; + else + creq->cache_ptr = 0; + + dreq->chain = chain; + + return 0; + +err_free_tdma: + mv_cesa_dma_cleanup(dreq); + dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); + +err: + mv_cesa_ahash_last_cleanup(req); + + return ret; +} + static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + int ret; + + if (cesa_dev->caps->has_tdma) + creq->req.base.type = CESA_DMA_REQ; + else + creq->req.base.type = CESA_STD_REQ; - creq->req.base.type = CESA_STD_REQ; creq->src_nents = sg_nents_for_len(req->src, req->nbytes); - return mv_cesa_ahash_cache_req(req, cached); + ret = mv_cesa_ahash_cache_req(req, cached); + if (ret) + return ret; + + if (*cached) + return 0; + + if (creq->req.base.type == CESA_DMA_REQ) + ret = mv_cesa_ahash_dma_req_init(req); + + return ret; } static int mv_cesa_ahash_update(struct ahash_request *req) @@ -349,7 +729,13 @@ static int mv_cesa_ahash_update(struct ahash_request *req) if (cached) return 0; - return mv_cesa_queue_req(&req->base); + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) { + mv_cesa_ahash_cleanup(req); + return ret; + } + + return ret; } static int mv_cesa_ahash_final(struct ahash_request *req) @@ -370,7 +756,11 @@ static int mv_cesa_ahash_final(struct ahash_request *req) if (cached) return 0; - return mv_cesa_queue_req(&req->base); + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) + mv_cesa_ahash_cleanup(req); + + return ret; } static int mv_cesa_ahash_finup(struct ahash_request *req) @@ -391,7 +781,11 @@ static int mv_cesa_ahash_finup(struct ahash_request *req) if (cached) return 0; - return mv_cesa_queue_req(&req->base); + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) + mv_cesa_ahash_cleanup(req); + + return ret; } static int mv_cesa_sha1_init(struct ahash_request *req) diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c new file mode 100644 index 000000000000..64a366c50174 --- /dev/null +++ b/drivers/crypto/marvell/tdma.c @@ -0,0 +1,224 @@ +/* + * Provide TDMA helper functions used by cipher and hash algorithm + * implementations. + * + * Author: Boris Brezillon + * Author: Arnaud Ebalard + * + * This work is based on an initial version written by + * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include "cesa.h" + +bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter, + struct mv_cesa_sg_dma_iter *sgiter, + unsigned int len) +{ + if (!sgiter->sg) + return false; + + sgiter->op_offset += len; + sgiter->offset += len; + if (sgiter->offset == sg_dma_len(sgiter->sg)) { + if (sg_is_last(sgiter->sg)) + return false; + sgiter->offset = 0; + sgiter->sg = sg_next(sgiter->sg); + } + + if (sgiter->op_offset == iter->op_len) + return false; + + return true; +} + +void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq) +{ + struct mv_cesa_engine *engine = dreq->base.engine; + + writel(0, engine->regs + CESA_SA_CFG); + + mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE); + writel(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B | + CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN, + engine->regs + CESA_TDMA_CONTROL); + + writel(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT | + CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS, + engine->regs + CESA_SA_CFG); + writel(dreq->chain.first->cur_dma, + engine->regs + CESA_TDMA_NEXT_ADDR); + writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); +} + +void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq) +{ + struct mv_cesa_tdma_desc *tdma; + + for (tdma = dreq->chain.first; tdma;) { + struct mv_cesa_tdma_desc *old_tdma = tdma; + + if (tdma->flags & CESA_TDMA_OP) + dma_pool_free(cesa_dev->dma->op_pool, tdma->op, + le32_to_cpu(tdma->src)); + + tdma = tdma->next; + dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma, + le32_to_cpu(old_tdma->cur_dma)); + } + + dreq->chain.first = NULL; + dreq->chain.last = NULL; +} + +void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq, + struct mv_cesa_engine *engine) +{ + struct mv_cesa_tdma_desc *tdma; + + for (tdma = dreq->chain.first; tdma; tdma = tdma->next) { + if (tdma->flags & CESA_TDMA_DST_IN_SRAM) + tdma->dst = cpu_to_le32(tdma->dst + engine->sram_dma); + + if (tdma->flags & CESA_TDMA_SRC_IN_SRAM) + tdma->src = cpu_to_le32(tdma->src + engine->sram_dma); + + if (tdma->flags & CESA_TDMA_OP) + mv_cesa_adjust_op(engine, tdma->op); + } +} + +static struct mv_cesa_tdma_desc * +mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags) +{ + struct mv_cesa_tdma_desc *new_tdma = NULL; + dma_addr_t dma_handle; + + new_tdma = dma_pool_alloc(cesa_dev->dma->tdma_desc_pool, flags, + &dma_handle); + if (!new_tdma) + return ERR_PTR(-ENOMEM); + + memset(new_tdma, 0, sizeof(*new_tdma)); + new_tdma->cur_dma = cpu_to_le32(dma_handle); + if (chain->last) { + chain->last->next_dma = new_tdma->cur_dma; + chain->last->next = new_tdma; + } else { + chain->first = new_tdma; + } + + chain->last = new_tdma; + + return new_tdma; +} + +struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, + const struct mv_cesa_op_ctx *op_templ, + bool skip_ctx, + gfp_t flags) +{ + struct mv_cesa_tdma_desc *tdma; + struct mv_cesa_op_ctx *op; + dma_addr_t dma_handle; + + tdma = mv_cesa_dma_add_desc(chain, flags); + if (IS_ERR(tdma)) + return ERR_CAST(tdma); + + op = dma_pool_alloc(cesa_dev->dma->op_pool, flags, &dma_handle); + if (!op) + return ERR_PTR(-ENOMEM); + + *op = *op_templ; + + tdma = chain->last; + tdma->op = op; + tdma->byte_cnt = (skip_ctx ? sizeof(op->desc) : sizeof(*op)) | BIT(31); + tdma->src = dma_handle; + tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP; + + return op; +} + +int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain, + dma_addr_t dst, dma_addr_t src, u32 size, + u32 flags, gfp_t gfp_flags) +{ + struct mv_cesa_tdma_desc *tdma; + + tdma = mv_cesa_dma_add_desc(chain, gfp_flags); + if (IS_ERR(tdma)) + return PTR_ERR(tdma); + + tdma->byte_cnt = size | BIT(31); + tdma->src = src; + tdma->dst = dst; + + flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM); + tdma->flags = flags | CESA_TDMA_DATA; + + return 0; +} + +int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, + u32 flags) +{ + struct mv_cesa_tdma_desc *tdma; + + tdma = mv_cesa_dma_add_desc(chain, flags); + if (IS_ERR(tdma)) + return PTR_ERR(tdma); + + return 0; +} + +int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, u32 flags) +{ + struct mv_cesa_tdma_desc *tdma; + + tdma = mv_cesa_dma_add_desc(chain, flags); + if (IS_ERR(tdma)) + return PTR_ERR(tdma); + + tdma->byte_cnt = BIT(31); + + return 0; +} + +int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, + struct mv_cesa_dma_iter *dma_iter, + struct mv_cesa_sg_dma_iter *sgiter, + gfp_t gfp_flags) +{ + u32 flags = sgiter->dir == DMA_TO_DEVICE ? + CESA_TDMA_DST_IN_SRAM : CESA_TDMA_SRC_IN_SRAM; + unsigned int len; + + do { + dma_addr_t dst, src; + int ret; + + len = mv_cesa_req_dma_iter_transfer_len(dma_iter, sgiter); + if (sgiter->dir == DMA_TO_DEVICE) { + dst = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset; + src = sg_dma_address(sgiter->sg) + sgiter->offset; + } else { + dst = sg_dma_address(sgiter->sg) + sgiter->offset; + src = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset; + } + + ret = mv_cesa_dma_add_data_transfer(chain, dst, src, len, + flags, gfp_flags); + if (ret) + return ret; + + } while (mv_cesa_req_dma_iter_next_transfer(dma_iter, sgiter, len)); + + return 0; +} -- cgit v1.2.3-59-g8ed1b From 7b3aaaa095b437bfcb4e4c761a19f50294659f3a Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 18 Jun 2015 15:46:22 +0200 Subject: crypto: marvell/cesa - add DES support Add support for DES operations. Signed-off-by: Boris Brezillon Signed-off-by: Arnaud Ebalard Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 2 + drivers/crypto/marvell/cesa.h | 2 + drivers/crypto/marvell/cipher.c | 150 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 154 insertions(+) (limited to 'drivers/crypto/marvell/cesa.c') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 986f024d73fa..212840e346d1 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -169,6 +169,8 @@ static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa) } static struct crypto_alg *armada_370_cipher_algs[] = { + &mv_cesa_ecb_des_alg, + &mv_cesa_cbc_des_alg, &mv_cesa_ecb_aes_alg, &mv_cesa_cbc_aes_alg, }; diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index f8faf260b6d7..4b07209526bd 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -777,6 +777,8 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, extern struct ahash_alg mv_sha1_alg; extern struct ahash_alg mv_ahmac_sha1_alg; +extern struct crypto_alg mv_cesa_ecb_des_alg; +extern struct crypto_alg mv_cesa_cbc_des_alg; extern struct crypto_alg mv_cesa_ecb_aes_alg; extern struct crypto_alg mv_cesa_cbc_aes_alg; diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index e21783b83046..e420ea6c0ec4 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -13,9 +13,15 @@ */ #include +#include #include "cesa.h" +struct mv_cesa_des_ctx { + struct mv_cesa_ctx base; + u8 key[DES_KEY_SIZE]; +}; + struct mv_cesa_aes_ctx { struct mv_cesa_ctx base; struct crypto_aes_ctx aes; @@ -238,6 +244,30 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, return 0; } +static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); + u32 tmp[DES_EXPKEY_WORDS]; + int ret; + + if (len != DES_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + ret = des_ekey(tmp, key); + if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + + memcpy(ctx->key, key, DES_KEY_SIZE); + + return 0; +} + static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, const struct mv_cesa_op_ctx *op_templ) { @@ -364,6 +394,126 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, return ret; } +static int mv_cesa_des_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + int ret; + + mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES, + CESA_SA_DESC_CFG_CRYPTM_MSK); + + memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE); + + ret = mv_cesa_ablkcipher_req_init(req, tmpl); + if (ret) + return ret; + + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) + mv_cesa_ablkcipher_cleanup(req); + + return ret; +} + +static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_des_op(req, &tmpl); +} + +static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_des_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_ecb_des_alg = { + .cra_name = "ecb(des)", + .cra_driver_name = "mv-ecb-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = mv_cesa_des_setkey, + .encrypt = mv_cesa_ecb_des_encrypt, + .decrypt = mv_cesa_ecb_des_decrypt, + }, + }, +}; + +static int mv_cesa_cbc_des_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, + CESA_SA_DESC_CFG_CRYPTCM_MSK); + + memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE); + + return mv_cesa_des_op(req, tmpl); +} + +static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_cbc_des_op(req, &tmpl); +} + +static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_cbc_des_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_cbc_des_alg = { + .cra_name = "cbc(des)", + .cra_driver_name = "mv-cbc-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = mv_cesa_des_setkey, + .encrypt = mv_cesa_cbc_des_encrypt, + .decrypt = mv_cesa_cbc_des_decrypt, + }, + }, +}; + static int mv_cesa_aes_op(struct ablkcipher_request *req, struct mv_cesa_op_ctx *tmpl) { -- cgit v1.2.3-59-g8ed1b From 4ada483978237068bf21c0dc318af676145bfed5 Mon Sep 17 00:00:00 2001 From: Arnaud Ebalard Date: Thu, 18 Jun 2015 15:46:23 +0200 Subject: crypto: marvell/cesa - add Triple-DES support Add support for Triple-DES operations. Signed-off-by: Arnaud Ebalard Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 2 + drivers/crypto/marvell/cesa.h | 2 + drivers/crypto/marvell/cipher.c | 147 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 151 insertions(+) (limited to 'drivers/crypto/marvell/cesa.c') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 212840e346d1..c0b1b49cbba5 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -171,6 +171,8 @@ static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa) static struct crypto_alg *armada_370_cipher_algs[] = { &mv_cesa_ecb_des_alg, &mv_cesa_cbc_des_alg, + &mv_cesa_ecb_des3_ede_alg, + &mv_cesa_cbc_des3_ede_alg, &mv_cesa_ecb_aes_alg, &mv_cesa_cbc_aes_alg, }; diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index 4b07209526bd..1229c3d75b71 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -779,6 +779,8 @@ extern struct ahash_alg mv_ahmac_sha1_alg; extern struct crypto_alg mv_cesa_ecb_des_alg; extern struct crypto_alg mv_cesa_cbc_des_alg; +extern struct crypto_alg mv_cesa_ecb_des3_ede_alg; +extern struct crypto_alg mv_cesa_cbc_des3_ede_alg; extern struct crypto_alg mv_cesa_ecb_aes_alg; extern struct crypto_alg mv_cesa_cbc_aes_alg; diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index e420ea6c0ec4..0745cf3b9c0e 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -22,6 +22,11 @@ struct mv_cesa_des_ctx { u8 key[DES_KEY_SIZE]; }; +struct mv_cesa_des3_ctx { + struct mv_cesa_ctx base; + u8 key[DES3_EDE_KEY_SIZE]; +}; + struct mv_cesa_aes_ctx { struct mv_cesa_ctx base; struct crypto_aes_ctx aes; @@ -268,6 +273,22 @@ static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, return 0; } +static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); + + if (len != DES3_EDE_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key, key, DES3_EDE_KEY_SIZE); + + return 0; +} + static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, const struct mv_cesa_op_ctx *op_templ) { @@ -514,6 +535,132 @@ struct crypto_alg mv_cesa_cbc_des_alg = { }, }; +static int mv_cesa_des3_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + int ret; + + mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES, + CESA_SA_DESC_CFG_CRYPTM_MSK); + + memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE); + + ret = mv_cesa_ablkcipher_req_init(req, tmpl); + if (ret) + return ret; + + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) + mv_cesa_ablkcipher_cleanup(req); + + return ret; +} + +static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_3DES_EDE | + CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_des3_op(req, &tmpl); +} + +static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_3DES_EDE | + CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_des3_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_ecb_des3_ede_alg = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "mv-ecb-des3-ede", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = mv_cesa_des3_ede_setkey, + .encrypt = mv_cesa_ecb_des3_ede_encrypt, + .decrypt = mv_cesa_ecb_des3_ede_decrypt, + }, + }, +}; + +static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE); + + return mv_cesa_des3_op(req, tmpl); +} + +static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_CBC | + CESA_SA_DESC_CFG_3DES_EDE | + CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_cbc_des3_op(req, &tmpl); +} + +static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_CBC | + CESA_SA_DESC_CFG_3DES_EDE | + CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_cbc_des3_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_cbc_des3_ede_alg = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "mv-cbc-des3-ede", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = mv_cesa_des3_ede_setkey, + .encrypt = mv_cesa_cbc_des3_ede_encrypt, + .decrypt = mv_cesa_cbc_des3_ede_decrypt, + }, + }, +}; + static int mv_cesa_aes_op(struct ablkcipher_request *req, struct mv_cesa_op_ctx *tmpl) { -- cgit v1.2.3-59-g8ed1b From 7aeef693d18d359134f47bf7b6621ec303b570f9 Mon Sep 17 00:00:00 2001 From: Arnaud Ebalard Date: Thu, 18 Jun 2015 15:46:24 +0200 Subject: crypto: marvell/cesa - add MD5 support Add support for MD5 operations. Signed-off-by: Arnaud Ebalard Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 2 + drivers/crypto/marvell/cesa.h | 2 + drivers/crypto/marvell/hash.c | 172 +++++++++++++++++++++++++++++++++++++++++- 3 files changed, 174 insertions(+), 2 deletions(-) (limited to 'drivers/crypto/marvell/cesa.c') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index c0b1b49cbba5..047c1c0e7eb4 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -178,7 +178,9 @@ static struct crypto_alg *armada_370_cipher_algs[] = { }; static struct ahash_alg *armada_370_ahash_algs[] = { + &mv_md5_alg, &mv_sha1_alg, + &mv_ahmac_md5_alg, &mv_ahmac_sha1_alg, }; diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index 1229c3d75b71..283e322cc234 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -774,7 +774,9 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, /* Algorithm definitions */ +extern struct ahash_alg mv_md5_alg; extern struct ahash_alg mv_sha1_alg; +extern struct ahash_alg mv_ahmac_md5_alg; extern struct ahash_alg mv_ahmac_sha1_alg; extern struct crypto_alg mv_cesa_ecb_des_alg; diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 528da260a7cc..36bb756d5fa9 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -12,6 +12,7 @@ * by the Free Software Foundation. */ +#include #include #include "cesa.h" @@ -346,8 +347,16 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) ahashreq->nbytes - creq->cache_ptr); if (creq->last_req) { - for (i = 0; i < digsize / 4; i++) - creq->state[i] = cpu_to_be32(creq->state[i]); + for (i = 0; i < digsize / 4; i++) { + /* + * Hardware provides MD5 digest in a different + * endianness than SHA-1 and SHA-256 ones. + */ + if (digsize == MD5_DIGEST_SIZE) + creq->state[i] = cpu_to_le32(creq->state[i]); + else + creq->state[i] = cpu_to_be32(creq->state[i]); + } memcpy(ahashreq->result, creq->state, digsize); } @@ -788,6 +797,95 @@ static int mv_cesa_ahash_finup(struct ahash_request *req) return ret; } +static int mv_cesa_md5_init(struct ahash_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_md5_export(struct ahash_request *req, void *out) +{ + struct md5_state *out_state = out; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int digsize = crypto_ahash_digestsize(ahash); + + out_state->byte_count = creq->len; + memcpy(out_state->hash, creq->state, digsize); + memset(out_state->block, 0, sizeof(out_state->block)); + if (creq->cache) + memcpy(out_state->block, creq->cache, creq->cache_ptr); + + return 0; +} + +static int mv_cesa_md5_import(struct ahash_request *req, const void *in) +{ + const struct md5_state *in_state = in; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int digsize = crypto_ahash_digestsize(ahash); + unsigned int cache_ptr; + int ret; + + creq->len = in_state->byte_count; + memcpy(creq->state, in_state->hash, digsize); + creq->cache_ptr = 0; + + cache_ptr = creq->len % sizeof(in_state->block); + if (!cache_ptr) + return 0; + + ret = mv_cesa_ahash_alloc_cache(req); + if (ret) + return ret; + + memcpy(creq->cache, in_state->block, cache_ptr); + creq->cache_ptr = cache_ptr; + + return 0; +} + +static int mv_cesa_md5_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_md5_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +struct ahash_alg mv_md5_alg = { + .init = mv_cesa_md5_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_md5_digest, + .export = mv_cesa_md5_export, + .import = mv_cesa_md5_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "md5", + .cra_driver_name = "mv-md5", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), + .cra_init = mv_cesa_ahash_cra_init, + .cra_module = THIS_MODULE, + } + } +}; + static int mv_cesa_sha1_init(struct ahash_request *req) { struct mv_cesa_op_ctx tmpl; @@ -1043,6 +1141,76 @@ static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm) return 0; } +static int mv_cesa_ahmac_md5_init(struct ahash_request *req) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5); + memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct md5_state istate, ostate; + int ret, i; + + ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(istate.hash); i++) + ctx->iv[i] = be32_to_cpu(istate.hash[i]); + + for (i = 0; i < ARRAY_SIZE(ostate.hash); i++) + ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]); + + return 0; +} + +static int mv_cesa_ahmac_md5_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_ahmac_md5_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +struct ahash_alg mv_ahmac_md5_alg = { + .init = mv_cesa_ahmac_md5_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_ahmac_md5_digest, + .setkey = mv_cesa_ahmac_md5_setkey, + .export = mv_cesa_md5_export, + .import = mv_cesa_md5_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), + .base = { + .cra_name = "hmac(md5)", + .cra_driver_name = "mv-hmac-md5", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), + .cra_init = mv_cesa_ahmac_cra_init, + .cra_module = THIS_MODULE, + } + } +}; + static int mv_cesa_ahmac_sha1_init(struct ahash_request *req) { struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); -- cgit v1.2.3-59-g8ed1b From f85a762e49bda3cba143ab0db8bde6d2bbe8baf9 Mon Sep 17 00:00:00 2001 From: Arnaud Ebalard Date: Thu, 18 Jun 2015 15:46:25 +0200 Subject: crypto: marvell/cesa - add SHA256 support Add support for SHA256 operations. Signed-off-by: Arnaud Ebalard Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 2 + drivers/crypto/marvell/cesa.h | 2 + drivers/crypto/marvell/hash.c | 159 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 163 insertions(+) (limited to 'drivers/crypto/marvell/cesa.c') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 047c1c0e7eb4..9c43cd7e5d1d 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -180,8 +180,10 @@ static struct crypto_alg *armada_370_cipher_algs[] = { static struct ahash_alg *armada_370_ahash_algs[] = { &mv_md5_alg, &mv_sha1_alg, + &mv_sha256_alg, &mv_ahmac_md5_alg, &mv_ahmac_sha1_alg, + &mv_ahmac_sha256_alg, }; static const struct mv_cesa_caps armada_370_caps = { diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index 283e322cc234..b60698b30d30 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -776,8 +776,10 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, extern struct ahash_alg mv_md5_alg; extern struct ahash_alg mv_sha1_alg; +extern struct ahash_alg mv_sha256_alg; extern struct ahash_alg mv_ahmac_md5_alg; extern struct ahash_alg mv_ahmac_sha1_alg; +extern struct ahash_alg mv_ahmac_sha256_alg; extern struct crypto_alg mv_cesa_ecb_des_alg; extern struct crypto_alg mv_cesa_cbc_des_alg; diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 36bb756d5fa9..ae9272eb9c1a 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -975,6 +975,95 @@ struct ahash_alg mv_sha1_alg = { } }; +static int mv_cesa_sha256_init(struct ahash_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_sha256_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_sha256_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +static int mv_cesa_sha256_export(struct ahash_request *req, void *out) +{ + struct sha256_state *out_state = out; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int ds = crypto_ahash_digestsize(ahash); + + out_state->count = creq->len; + memcpy(out_state->state, creq->state, ds); + memset(out_state->buf, 0, sizeof(out_state->buf)); + if (creq->cache) + memcpy(out_state->buf, creq->cache, creq->cache_ptr); + + return 0; +} + +static int mv_cesa_sha256_import(struct ahash_request *req, const void *in) +{ + const struct sha256_state *in_state = in; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int digsize = crypto_ahash_digestsize(ahash); + unsigned int cache_ptr; + int ret; + + creq->len = in_state->count; + memcpy(creq->state, in_state->state, digsize); + creq->cache_ptr = 0; + + cache_ptr = creq->len % SHA256_BLOCK_SIZE; + if (!cache_ptr) + return 0; + + ret = mv_cesa_ahash_alloc_cache(req); + if (ret) + return ret; + + memcpy(creq->cache, in_state->buf, cache_ptr); + creq->cache_ptr = cache_ptr; + + return 0; +} + +struct ahash_alg mv_sha256_alg = { + .init = mv_cesa_sha256_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_sha256_digest, + .export = mv_cesa_sha256_export, + .import = mv_cesa_sha256_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "sha256", + .cra_driver_name = "mv-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), + .cra_init = mv_cesa_ahash_cra_init, + .cra_module = THIS_MODULE, + } + } +}; + struct mv_cesa_ahash_result { struct completion completion; int error; @@ -1280,3 +1369,73 @@ struct ahash_alg mv_ahmac_sha1_alg = { } } }; + +static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct sha256_state istate, ostate; + int ret, i; + + ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(istate.state); i++) + ctx->iv[i] = be32_to_cpu(istate.state[i]); + + for (i = 0; i < ARRAY_SIZE(ostate.state); i++) + ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); + + return 0; +} + +static int mv_cesa_ahmac_sha256_init(struct ahash_request *req) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256); + memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_ahmac_sha256_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +struct ahash_alg mv_ahmac_sha256_alg = { + .init = mv_cesa_ahmac_sha256_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_ahmac_sha256_digest, + .setkey = mv_cesa_ahmac_sha256_setkey, + .export = mv_cesa_sha256_export, + .import = mv_cesa_sha256_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "mv-hmac-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), + .cra_init = mv_cesa_ahmac_cra_init, + .cra_module = THIS_MODULE, + } + } +}; -- cgit v1.2.3-59-g8ed1b From 898c9d5ea2182287620f8995b32e457d7b3b54ca Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 18 Jun 2015 15:46:26 +0200 Subject: crypto: marvell/cesa - add support for all armada SoCs Add CESA IP description for all the missing armada SoCs (XP, 375 and 38x). Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/crypto/marvell/cesa.c') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 9c43cd7e5d1d..af590bf853d2 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -195,8 +195,20 @@ static const struct mv_cesa_caps armada_370_caps = { .has_tdma = true, }; +static const struct mv_cesa_caps armada_xp_caps = { + .nengines = 2, + .cipher_algs = armada_370_cipher_algs, + .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs), + .ahash_algs = armada_370_ahash_algs, + .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs), + .has_tdma = true, +}; + static const struct of_device_id mv_cesa_of_match_table[] = { { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps }, + { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps }, + { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps }, + { .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps }, {} }; MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); -- cgit v1.2.3-59-g8ed1b From 64c55d499bdadf242e83ad6e5670c72da6c1a3c9 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 18 Jun 2015 15:46:27 +0200 Subject: crypto: marvell/cesa - add allhwsupport module parameter The old and new marvell CESA drivers both support Orion and Kirkwood SoCs. Add a module parameter to choose whether these SoCs should be attached to the new or the old driver. The default policy is to keep attaching those IPs to the old driver if it is enabled, until we decide the new CESA driver is stable/secure enough. Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/crypto/marvell/cesa.c') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index af590bf853d2..a05b5cb3aea7 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -31,6 +31,10 @@ #include "cesa.h" +static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA); +module_param_named(allhwsupport, allhwsupport, int, 0444); +MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)"); + struct mv_cesa_dev *cesa_dev; static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine) -- cgit v1.2.3-59-g8ed1b From 0bf6948995f9f7c4b0ce062db793c205e921ee48 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 18 Jun 2015 15:46:28 +0200 Subject: crypto: marvell/cesa - add support for Orion SoCs Add the Orion SoC description, and select this implementation by default to support non-DT probing: Orion is the only platform where non-DT boards are declaring the CESA block. Control the allhwsupport module parameter to avoid probing the CESA IP when the old CESA driver is enabled (unless it is explicitly requested to do so). Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 42 +++++++++++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 7 deletions(-) (limited to 'drivers/crypto/marvell/cesa.c') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index a05b5cb3aea7..8e5ea7218bb2 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -172,6 +172,22 @@ static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa) crypto_unregister_alg(cesa->caps->cipher_algs[i]); } +static struct crypto_alg *orion_cipher_algs[] = { + &mv_cesa_ecb_des_alg, + &mv_cesa_cbc_des_alg, + &mv_cesa_ecb_des3_ede_alg, + &mv_cesa_cbc_des3_ede_alg, + &mv_cesa_ecb_aes_alg, + &mv_cesa_cbc_aes_alg, +}; + +static struct ahash_alg *orion_ahash_algs[] = { + &mv_md5_alg, + &mv_sha1_alg, + &mv_ahmac_md5_alg, + &mv_ahmac_sha1_alg, +}; + static struct crypto_alg *armada_370_cipher_algs[] = { &mv_cesa_ecb_des_alg, &mv_cesa_cbc_des_alg, @@ -190,6 +206,15 @@ static struct ahash_alg *armada_370_ahash_algs[] = { &mv_ahmac_sha256_alg, }; +static const struct mv_cesa_caps orion_caps = { + .nengines = 1, + .cipher_algs = orion_cipher_algs, + .ncipher_algs = ARRAY_SIZE(orion_cipher_algs), + .ahash_algs = orion_ahash_algs, + .nahash_algs = ARRAY_SIZE(orion_ahash_algs), + .has_tdma = false, +}; + static const struct mv_cesa_caps armada_370_caps = { .nengines = 1, .cipher_algs = armada_370_cipher_algs, @@ -209,6 +234,7 @@ static const struct mv_cesa_caps armada_xp_caps = { }; static const struct of_device_id mv_cesa_of_match_table[] = { + { .compatible = "marvell,orion-crypto", .data = &orion_caps }, { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps }, { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps }, { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps }, @@ -334,7 +360,7 @@ static void mv_cesa_put_sram(struct platform_device *pdev, int idx) static int mv_cesa_probe(struct platform_device *pdev) { - const struct mv_cesa_caps *caps = NULL; + const struct mv_cesa_caps *caps = &orion_caps; const struct mbus_dram_target_info *dram; const struct of_device_id *match; struct device *dev = &pdev->dev; @@ -349,14 +375,16 @@ static int mv_cesa_probe(struct platform_device *pdev) return -EEXIST; } - if (!dev->of_node) - return -ENOTSUPP; + if (dev->of_node) { + match = of_match_node(mv_cesa_of_match_table, dev->of_node); + if (!match || !match->data) + return -ENOTSUPP; - match = of_match_node(mv_cesa_of_match_table, dev->of_node); - if (!match || !match->data) - return -ENOTSUPP; + caps = match->data; + } - caps = match->data; + if (caps == &orion_caps && !allhwsupport) + return -ENOTSUPP; cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL); if (!cesa) -- cgit v1.2.3-59-g8ed1b From 7240425579b881a3e26ba62a1bca29b45d4bfadc Mon Sep 17 00:00:00 2001 From: Arnaud Ebalard Date: Thu, 18 Jun 2015 15:46:29 +0200 Subject: crypto: marvell/cesa - add support for Kirkwood and Dove SoCs Add the Kirkwood and Dove SoC descriptions, and control the allhwsupport module parameter to avoid probing the CESA IP when the old CESA driver is enabled (unless it is explicitly requested to do so). Signed-off-by: Arnaud Ebalard Signed-off-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers/crypto/marvell/cesa.c') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 8e5ea7218bb2..a432633bced4 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -215,6 +215,15 @@ static const struct mv_cesa_caps orion_caps = { .has_tdma = false, }; +static const struct mv_cesa_caps kirkwood_caps = { + .nengines = 1, + .cipher_algs = orion_cipher_algs, + .ncipher_algs = ARRAY_SIZE(orion_cipher_algs), + .ahash_algs = orion_ahash_algs, + .nahash_algs = ARRAY_SIZE(orion_ahash_algs), + .has_tdma = true, +}; + static const struct mv_cesa_caps armada_370_caps = { .nengines = 1, .cipher_algs = armada_370_cipher_algs, @@ -235,6 +244,8 @@ static const struct mv_cesa_caps armada_xp_caps = { static const struct of_device_id mv_cesa_of_match_table[] = { { .compatible = "marvell,orion-crypto", .data = &orion_caps }, + { .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps }, + { .compatible = "marvell,dove-crypto", .data = &kirkwood_caps }, { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps }, { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps }, { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps }, @@ -383,7 +394,7 @@ static int mv_cesa_probe(struct platform_device *pdev) caps = match->data; } - if (caps == &orion_caps && !allhwsupport) + if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport) return -ENOTSUPP; cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL); -- cgit v1.2.3-59-g8ed1b