diff options
Diffstat (limited to 'drivers/crypto')
141 files changed, 4876 insertions, 2408 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index caa98a7fe392..0be55fcc19ba 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -258,7 +258,7 @@ config CRYPTO_DEV_HIFN_795X_RNG Select this option if you want to enable the random number generator on the HIFN 795x crypto adapters. -source drivers/crypto/caam/Kconfig +source "drivers/crypto/caam/Kconfig" config CRYPTO_DEV_TALITOS tristate "Talitos Freescale Security Engine (SEC)" @@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU depends on ARCH_BCM_IPROC depends on MAILBOX default m + select CRYPTO_AUTHENC select CRYPTO_DES select CRYPTO_MD5 select CRYPTO_SHA1 @@ -762,10 +763,12 @@ config CRYPTO_DEV_CCREE select CRYPTO_ECB select CRYPTO_CTR select CRYPTO_XTS + select CRYPTO_SM4 + select CRYPTO_SM3 help Say 'Y' to enable a driver for the REE interface of the Arm TrustZone CryptoCell family of processors. Currently the - CryptoCell 712, 710 and 630 are supported. + CryptoCell 713, 703, 712, 710 and 630 are supported. Choose this if you wish to use hardware acceleration of cryptographic operations on the system REE. If unsure say Y. diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index f5c07498ea4f..4092c2aad8e2 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -520,8 +520,7 @@ static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key, uint8_t src[16] = { 0 }; int rc = 0; - aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK); + aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(aes_tfm)) { rc = PTR_ERR(aes_tfm); pr_warn("could not load aes cipher driver: %d\n", rc); diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 6eaec9ba0f68..06574a884715 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -40,9 +40,11 @@ #include <crypto/ctr.h> #include <crypto/gcm.h> #include <crypto/sha.h> +#include <crypto/rng.h> #include <crypto/scatterwalk.h> #include <crypto/skcipher.h> #include <crypto/internal/aead.h> +#include <crypto/internal/rng.h> #include <crypto/internal/skcipher.h> #include "crypto4xx_reg_def.h" #include "crypto4xx_core.h" @@ -283,9 +285,9 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx) */ static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) { - dev->gdr = dma_zalloc_coherent(dev->core_dev->device, - sizeof(struct ce_gd) * PPC4XX_NUM_GD, - &dev->gdr_pa, GFP_ATOMIC); + dev->gdr = dma_alloc_coherent(dev->core_dev->device, + sizeof(struct ce_gd) * PPC4XX_NUM_GD, + &dev->gdr_pa, GFP_ATOMIC); if (!dev->gdr) return -ENOMEM; @@ -596,7 +598,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev, pd->pd_ctl_len.bf.pkt_len, dst); } else { - __dma_sync_page(sg_page(dst), dst->offset, dst->length, + dma_unmap_page(dev->core_dev->device, pd->dest, dst->length, DMA_FROM_DEVICE); } @@ -1035,6 +1037,10 @@ static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, rc = crypto_register_ahash(&alg->alg.u.hash); break; + case CRYPTO_ALG_TYPE_RNG: + rc = crypto_register_rng(&alg->alg.u.rng); + break; + default: rc = crypto_register_skcipher(&alg->alg.u.cipher); break; @@ -1064,6 +1070,10 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev) crypto_unregister_aead(&alg->alg.u.aead); break; + case CRYPTO_ALG_TYPE_RNG: + crypto_unregister_rng(&alg->alg.u.rng); + break; + default: crypto_unregister_skcipher(&alg->alg.u.cipher); } @@ -1122,6 +1132,69 @@ static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data) PPC4XX_TMO_ERR_INT); } +static int ppc4xx_prng_data_read(struct crypto4xx_device *dev, + u8 *data, unsigned int max) +{ + unsigned int i, curr = 0; + u32 val[2]; + + do { + /* trigger PRN generation */ + writel(PPC4XX_PRNG_CTRL_AUTO_EN, + dev->ce_base + CRYPTO4XX_PRNG_CTRL); + + for (i = 0; i < 1024; i++) { + /* usually 19 iterations are enough */ + if ((readl(dev->ce_base + CRYPTO4XX_PRNG_STAT) & + CRYPTO4XX_PRNG_STAT_BUSY)) + continue; + + val[0] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_0); + val[1] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_1); + break; + } + if (i == 1024) + return -ETIMEDOUT; + + if ((max - curr) >= 8) { + memcpy(data, &val, 8); + data += 8; + curr += 8; + } else { + /* copy only remaining bytes */ + memcpy(data, &val, max - curr); + break; + } + } while (curr < max); + + return curr; +} + +static int crypto4xx_prng_generate(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dstn, unsigned int dlen) +{ + struct rng_alg *alg = crypto_rng_alg(tfm); + struct crypto4xx_alg *amcc_alg; + struct crypto4xx_device *dev; + int ret; + + amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.rng); + dev = amcc_alg->dev; + + mutex_lock(&dev->core_dev->rng_lock); + ret = ppc4xx_prng_data_read(dev, dstn, dlen); + mutex_unlock(&dev->core_dev->rng_lock); + return ret; +} + + +static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed, + unsigned int slen) +{ + return 0; +} + /** * Supported Crypto Algorithms */ @@ -1291,6 +1364,18 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { .cra_module = THIS_MODULE, }, } }, + { .type = CRYPTO_ALG_TYPE_RNG, .u.rng = { + .base = { + .cra_name = "stdrng", + .cra_driver_name = "crypto4xx_rng", + .cra_priority = 300, + .cra_ctxsize = 0, + .cra_module = THIS_MODULE, + }, + .generate = crypto4xx_prng_generate, + .seed = crypto4xx_prng_seed, + .seedsize = 0, + } }, }; /** @@ -1360,6 +1445,7 @@ static int crypto4xx_probe(struct platform_device *ofdev) core_dev->dev->core_dev = core_dev; core_dev->dev->is_revb = is_revb; core_dev->device = dev; + mutex_init(&core_dev->rng_lock); spin_lock_init(&core_dev->lock); INIT_LIST_HEAD(&core_dev->dev->alg_list); ratelimit_default_init(&core_dev->dev->aead_ratelimit); @@ -1439,6 +1525,7 @@ static int crypto4xx_remove(struct platform_device *ofdev) tasklet_kill(&core_dev->tasklet); /* Un-register with Linux CryptoAPI */ crypto4xx_unregister_alg(core_dev->dev); + mutex_destroy(&core_dev->rng_lock); /* Free all allocated memory */ crypto4xx_stop_all(core_dev); diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index e2ca56722f07..18df695ca6b1 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -23,8 +23,10 @@ #define __CRYPTO4XX_CORE_H__ #include <linux/ratelimit.h> +#include <linux/mutex.h> #include <crypto/internal/hash.h> #include <crypto/internal/aead.h> +#include <crypto/internal/rng.h> #include <crypto/internal/skcipher.h> #include "crypto4xx_reg_def.h" #include "crypto4xx_sa.h" @@ -119,6 +121,7 @@ struct crypto4xx_core_device { u32 irq; struct tasklet_struct tasklet; spinlock_t lock; + struct mutex rng_lock; }; struct crypto4xx_ctx { @@ -143,6 +146,7 @@ struct crypto4xx_alg_common { struct skcipher_alg cipher; struct ahash_alg hash; struct aead_alg aead; + struct rng_alg rng; } u; }; diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h index 472331787e04..80c67490bbf6 100644 --- a/drivers/crypto/amcc/crypto4xx_reg_def.h +++ b/drivers/crypto/amcc/crypto4xx_reg_def.h @@ -100,6 +100,7 @@ #define CRYPTO4XX_ENDIAN_CFG 0x000600d8 #define CRYPTO4XX_PRNG_STAT 0x00070000 +#define CRYPTO4XX_PRNG_STAT_BUSY 0x1 #define CRYPTO4XX_PRNG_CTRL 0x00070004 #define CRYPTO4XX_PRNG_SEED_L 0x00070008 #define CRYPTO4XX_PRNG_SEED_H 0x0007000c diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c index 5e63742b0d22..53ab1f140a26 100644 --- a/drivers/crypto/amcc/crypto4xx_trng.c +++ b/drivers/crypto/amcc/crypto4xx_trng.c @@ -80,8 +80,10 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev) /* Find the TRNG device node and map it */ trng = of_find_matching_node(NULL, ppc4xx_trng_match); - if (!trng || !of_device_is_available(trng)) + if (!trng || !of_device_is_available(trng)) { + of_node_put(trng); return; + } dev->trng_base = of_iomap(trng, 0); of_node_put(trng); diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h index 931d22531f51..7bbda51b7337 100644 --- a/drivers/crypto/amcc/crypto4xx_trng.h +++ b/drivers/crypto/amcc/crypto4xx_trng.h @@ -26,9 +26,9 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev); void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev); #else static inline void ppc4xx_trng_probe( - struct crypto4xx_device *dev __maybe_unused) { } + struct crypto4xx_core_device *dev __maybe_unused) { } static inline void ppc4xx_trng_remove( - struct crypto4xx_device *dev __maybe_unused) { } + struct crypto4xx_core_device *dev __maybe_unused) { } #endif #endif diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 438e1ffb2ec0..65bf1a299562 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -785,7 +785,7 @@ static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, } err = des_ekey(tmp, key); - if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { ctfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index f3442c2bdbdc..57e5dca3253f 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -135,8 +135,6 @@ #define regk_crypto_ext 0x00000001 #define regk_crypto_hmac_sha1 0x00000007 #define regk_crypto_hmac_sha256 0x00000009 -#define regk_crypto_hmac_sha384 0x0000000b -#define regk_crypto_hmac_sha512 0x0000000d #define regk_crypto_init 0x00000000 #define regk_crypto_key_128 0x00000000 #define regk_crypto_key_192 0x00000001 @@ -144,8 +142,6 @@ #define regk_crypto_null 0x00000000 #define regk_crypto_sha1 0x00000006 #define regk_crypto_sha256 0x00000008 -#define regk_crypto_sha384 0x0000000a -#define regk_crypto_sha512 0x0000000c /* DMA descriptor structures */ struct pdma_descr_ctrl { @@ -190,8 +186,6 @@ struct pdma_stat_descr { /* Hash modes (including HMAC variants) */ #define ARTPEC6_CRYPTO_HASH_SHA1 1 #define ARTPEC6_CRYPTO_HASH_SHA256 2 -#define ARTPEC6_CRYPTO_HASH_SHA384 3 -#define ARTPEC6_CRYPTO_HASH_SHA512 4 /* Crypto modes */ #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1 @@ -284,6 +278,7 @@ enum artpec6_crypto_hash_flags { struct artpec6_crypto_req_common { struct list_head list; + struct list_head complete_in_progress; struct artpec6_crypto_dma_descriptors *dma; struct crypto_async_request *req; void (*complete)(struct crypto_async_request *req); @@ -291,11 +286,11 @@ struct artpec6_crypto_req_common { }; struct artpec6_hash_request_context { - char partial_buffer[SHA512_BLOCK_SIZE]; - char partial_buffer_out[SHA512_BLOCK_SIZE]; - char key_buffer[SHA512_BLOCK_SIZE]; - char pad_buffer[SHA512_BLOCK_SIZE + 32]; - unsigned char digeststate[SHA512_DIGEST_SIZE]; + char partial_buffer[SHA256_BLOCK_SIZE]; + char partial_buffer_out[SHA256_BLOCK_SIZE]; + char key_buffer[SHA256_BLOCK_SIZE]; + char pad_buffer[SHA256_BLOCK_SIZE + 32]; + unsigned char digeststate[SHA256_DIGEST_SIZE]; size_t partial_bytes; u64 digcnt; u32 key_md; @@ -305,8 +300,8 @@ struct artpec6_hash_request_context { }; struct artpec6_hash_export_state { - char partial_buffer[SHA512_BLOCK_SIZE]; - unsigned char digeststate[SHA512_DIGEST_SIZE]; + char partial_buffer[SHA256_BLOCK_SIZE]; + unsigned char digeststate[SHA256_DIGEST_SIZE]; size_t partial_bytes; u64 digcnt; int oper; @@ -314,7 +309,7 @@ struct artpec6_hash_export_state { }; struct artpec6_hashalg_context { - char hmac_key[SHA512_BLOCK_SIZE]; + char hmac_key[SHA256_BLOCK_SIZE]; size_t hmac_key_length; struct crypto_shash *child_hash; }; @@ -670,8 +665,8 @@ artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common) * to be written. */ return artpec6_crypto_dma_map_single(common, - dma->stat + dma->in_cnt - 1, - sizeof(dma->stat[0]), + dma->stat, + sizeof(dma->stat[0]) * dma->in_cnt, DMA_BIDIRECTIONAL, &dma->stat_dma_addr); } @@ -1315,8 +1310,7 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq) struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm); struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq); size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq)); - size_t contextsize = digestsize == SHA384_DIGEST_SIZE ? - SHA512_DIGEST_SIZE : digestsize; + size_t contextsize = digestsize; size_t blocksize = crypto_tfm_alg_blocksize( crypto_ahash_tfm(crypto_ahash_reqtfm(areq))); struct artpec6_crypto_req_common *common = &req_ctx->common; @@ -1456,7 +1450,6 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq) /* Finalize */ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) { - bool needtrim = contextsize != digestsize; size_t hash_pad_len; u64 digest_bits; u32 oper; @@ -1502,19 +1495,10 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq) /* Descriptor for the final result */ error = artpec6_crypto_setup_in_descr(common, areq->result, digestsize, - !needtrim); + true); if (error) return error; - if (needtrim) { - /* Discard the extra context bytes for SHA-384 */ - error = artpec6_crypto_setup_in_descr(common, - req_ctx->partial_buffer, - digestsize - contextsize, true); - if (error) - return error; - } - } else { /* This is not the final operation for this request */ if (!run_hw) return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START; @@ -1923,7 +1907,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq) /* For the decryption, cryptlen includes the tag. */ input_length = areq->cryptlen; if (req_ctx->decrypt) - input_length -= AES_BLOCK_SIZE; + input_length -= crypto_aead_authsize(cipher); /* Prepare the context buffer */ req_ctx->hw_ctx.aad_length_bits = @@ -1988,7 +1972,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq) size_t output_len = areq->cryptlen; if (req_ctx->decrypt) - output_len -= AES_BLOCK_SIZE; + output_len -= crypto_aead_authsize(cipher); artpec6_crypto_walk_init(&walk, areq->dst); @@ -2017,19 +2001,32 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq) * the output ciphertext. For decryption it is put in a context * buffer for later compare against the input tag. */ - count = AES_BLOCK_SIZE; if (req_ctx->decrypt) { ret = artpec6_crypto_setup_in_descr(common, - req_ctx->decryption_tag, count, false); + req_ctx->decryption_tag, AES_BLOCK_SIZE, false); if (ret) return ret; } else { + /* For encryption the requested tag size may be smaller + * than the hardware's generated tag. + */ + size_t authsize = crypto_aead_authsize(cipher); + ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, - count); + authsize); if (ret) return ret; + + if (authsize < AES_BLOCK_SIZE) { + count = AES_BLOCK_SIZE - authsize; + ret = artpec6_crypto_setup_in_descr(common, + ac->pad_buffer, + count, false); + if (ret) + return ret; + } } } @@ -2045,7 +2042,8 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq) return artpec6_crypto_dma_map_descs(common); } -static void artpec6_crypto_process_queue(struct artpec6_crypto *ac) +static void artpec6_crypto_process_queue(struct artpec6_crypto *ac, + struct list_head *completions) { struct artpec6_crypto_req_common *req; @@ -2056,7 +2054,7 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac) list_move_tail(&req->list, &ac->pending); artpec6_crypto_start_dma(req); - req->req->complete(req->req, -EINPROGRESS); + list_add_tail(&req->complete_in_progress, completions); } /* @@ -2086,6 +2084,11 @@ static void artpec6_crypto_task(unsigned long data) struct artpec6_crypto *ac = (struct artpec6_crypto *)data; struct artpec6_crypto_req_common *req; struct artpec6_crypto_req_common *n; + struct list_head complete_done; + struct list_head complete_in_progress; + + INIT_LIST_HEAD(&complete_done); + INIT_LIST_HEAD(&complete_in_progress); if (list_empty(&ac->pending)) { pr_debug("Spurious IRQ\n"); @@ -2097,9 +2100,12 @@ static void artpec6_crypto_task(unsigned long data) list_for_each_entry_safe(req, n, &ac->pending, list) { struct artpec6_crypto_dma_descriptors *dma = req->dma; u32 stat; + dma_addr_t stataddr; - dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr, - sizeof(dma->stat[0]), + stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1); + dma_sync_single_for_cpu(artpec6_crypto_dev, + stataddr, + 4, DMA_BIDIRECTIONAL); stat = req->dma->stat[req->dma->in_cnt-1]; @@ -2119,19 +2125,30 @@ static void artpec6_crypto_task(unsigned long data) pr_debug("Completing request %p\n", req); - list_del(&req->list); + list_move_tail(&req->list, &complete_done); + ac->pending_count--; + } + + artpec6_crypto_process_queue(ac, &complete_in_progress); + + spin_unlock_bh(&ac->queue_lock); + + /* Perform the completion callbacks without holding the queue lock + * to allow new request submissions from the callbacks. + */ + list_for_each_entry_safe(req, n, &complete_done, list) { artpec6_crypto_dma_unmap_all(req); artpec6_crypto_copy_bounce_buffers(req); - - ac->pending_count--; artpec6_crypto_common_destroy(req); + req->complete(req->req); } - artpec6_crypto_process_queue(ac); - - spin_unlock_bh(&ac->queue_lock); + list_for_each_entry_safe(req, n, &complete_in_progress, + complete_in_progress) { + req->req->complete(req->req, -EINPROGRESS); + } } static void artpec6_crypto_complete_crypto(struct crypto_async_request *req) @@ -2170,27 +2187,29 @@ static void artpec6_crypto_complete_aead(struct crypto_async_request *req) /* Verify GCM hashtag. */ struct aead_request *areq = container_of(req, struct aead_request, base); + struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq); if (req_ctx->decrypt) { u8 input_tag[AES_BLOCK_SIZE]; + unsigned int authsize = crypto_aead_authsize(aead); sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), input_tag, - AES_BLOCK_SIZE, + authsize, areq->assoclen + areq->cryptlen - - AES_BLOCK_SIZE); + authsize); - if (memcmp(req_ctx->decryption_tag, - input_tag, - AES_BLOCK_SIZE)) { + if (crypto_memneq(req_ctx->decryption_tag, + input_tag, + authsize)) { pr_debug("***EBADMSG:\n"); print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1, - input_tag, AES_BLOCK_SIZE, true); + input_tag, authsize, true); print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1, req_ctx->decryption_tag, - AES_BLOCK_SIZE, true); + authsize, true); result = -EBADMSG; } @@ -2266,13 +2285,6 @@ artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac) case ARTPEC6_CRYPTO_HASH_SHA256: oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256; break; - case ARTPEC6_CRYPTO_HASH_SHA384: - oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384; - break; - case ARTPEC6_CRYPTO_HASH_SHA512: - oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512; - break; - default: pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type); return -EINVAL; @@ -2368,53 +2380,11 @@ static int artpec6_crypto_sha256_digest(struct ahash_request *req) return artpec6_crypto_prepare_submit_hash(req); } -static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req) -{ - return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0); -} - -static int __maybe_unused -artpec6_crypto_sha384_digest(struct ahash_request *req) -{ - struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); - - artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0); - req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; - - return artpec6_crypto_prepare_submit_hash(req); -} - -static int artpec6_crypto_sha512_init(struct ahash_request *req) -{ - return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0); -} - -static int artpec6_crypto_sha512_digest(struct ahash_request *req) -{ - struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); - - artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0); - req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; - - return artpec6_crypto_prepare_submit_hash(req); -} - static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req) { return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1); } -static int __maybe_unused -artpec6_crypto_hmac_sha384_init(struct ahash_request *req) -{ - return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1); -} - -static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req) -{ - return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1); -} - static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req) { struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); @@ -2425,27 +2395,6 @@ static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req) return artpec6_crypto_prepare_submit_hash(req); } -static int __maybe_unused -artpec6_crypto_hmac_sha384_digest(struct ahash_request *req) -{ - struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); - - artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1); - req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; - - return artpec6_crypto_prepare_submit_hash(req); -} - -static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req) -{ - struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); - - artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1); - req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; - - return artpec6_crypto_prepare_submit_hash(req); -} - static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm, const char *base_hash_name) { @@ -2480,17 +2429,6 @@ static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm) return artpec6_crypto_ahash_init_common(tfm, "sha256"); } -static int __maybe_unused -artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm) -{ - return artpec6_crypto_ahash_init_common(tfm, "sha384"); -} - -static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm) -{ - return artpec6_crypto_ahash_init_common(tfm, "sha512"); -} - static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm) { struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm); @@ -2761,103 +2699,6 @@ static struct ahash_alg hash_algos[] = { }, }; -static struct ahash_alg artpec7_hash_algos[] = { - /* SHA-384 */ - { - .init = artpec6_crypto_sha384_init, - .update = artpec6_crypto_hash_update, - .final = artpec6_crypto_hash_final, - .digest = artpec6_crypto_sha384_digest, - .import = artpec6_crypto_hash_import, - .export = artpec6_crypto_hash_export, - .halg.digestsize = SHA384_DIGEST_SIZE, - .halg.statesize = sizeof(struct artpec6_hash_export_state), - .halg.base = { - .cra_name = "sha384", - .cra_driver_name = "artpec-sha384", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct artpec6_hashalg_context), - .cra_alignmask = 3, - .cra_module = THIS_MODULE, - .cra_init = artpec6_crypto_ahash_init, - .cra_exit = artpec6_crypto_ahash_exit, - } - }, - /* HMAC SHA-384 */ - { - .init = artpec6_crypto_hmac_sha384_init, - .update = artpec6_crypto_hash_update, - .final = artpec6_crypto_hash_final, - .digest = artpec6_crypto_hmac_sha384_digest, - .import = artpec6_crypto_hash_import, - .export = artpec6_crypto_hash_export, - .setkey = artpec6_crypto_hash_set_key, - .halg.digestsize = SHA384_DIGEST_SIZE, - .halg.statesize = sizeof(struct artpec6_hash_export_state), - .halg.base = { - .cra_name = "hmac(sha384)", - .cra_driver_name = "artpec-hmac-sha384", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct artpec6_hashalg_context), - .cra_alignmask = 3, - .cra_module = THIS_MODULE, - .cra_init = artpec6_crypto_ahash_init_hmac_sha384, - .cra_exit = artpec6_crypto_ahash_exit, - } - }, - /* SHA-512 */ - { - .init = artpec6_crypto_sha512_init, - .update = artpec6_crypto_hash_update, - .final = artpec6_crypto_hash_final, - .digest = artpec6_crypto_sha512_digest, - .import = artpec6_crypto_hash_import, - .export = artpec6_crypto_hash_export, - .halg.digestsize = SHA512_DIGEST_SIZE, - .halg.statesize = sizeof(struct artpec6_hash_export_state), - .halg.base = { - .cra_name = "sha512", - .cra_driver_name = "artpec-sha512", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct artpec6_hashalg_context), - .cra_alignmask = 3, - .cra_module = THIS_MODULE, - .cra_init = artpec6_crypto_ahash_init, - .cra_exit = artpec6_crypto_ahash_exit, - } - }, - /* HMAC SHA-512 */ - { - .init = artpec6_crypto_hmac_sha512_init, - .update = artpec6_crypto_hash_update, - .final = artpec6_crypto_hash_final, - .digest = artpec6_crypto_hmac_sha512_digest, - .import = artpec6_crypto_hash_import, - .export = artpec6_crypto_hash_export, - .setkey = artpec6_crypto_hash_set_key, - .halg.digestsize = SHA512_DIGEST_SIZE, - .halg.statesize = sizeof(struct artpec6_hash_export_state), - .halg.base = { - .cra_name = "hmac(sha512)", - .cra_driver_name = "artpec-hmac-sha512", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct artpec6_hashalg_context), - .cra_alignmask = 3, - .cra_module = THIS_MODULE, - .cra_init = artpec6_crypto_ahash_init_hmac_sha512, - .cra_exit = artpec6_crypto_ahash_exit, - } - }, -}; - /* Crypto */ static struct skcipher_alg crypto_algos[] = { /* AES - ECB */ @@ -2984,12 +2825,6 @@ static void artpec6_crypto_init_debugfs(void) { dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); - if (!dbgfs_root || IS_ERR(dbgfs_root)) { - dbgfs_root = NULL; - pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME); - return; - } - #ifdef CONFIG_FAULT_INJECTION fault_create_debugfs_attr("fail_status_read", dbgfs_root, &artpec6_crypto_fail_status_read); @@ -3001,9 +2836,6 @@ static void artpec6_crypto_init_debugfs(void) static void artpec6_crypto_free_debugfs(void) { - if (!dbgfs_root) - return; - debugfs_remove_recursive(dbgfs_root); dbgfs_root = NULL; } @@ -3104,19 +2936,10 @@ static int artpec6_crypto_probe(struct platform_device *pdev) goto disable_hw; } - if (variant != ARTPEC6_CRYPTO) { - err = crypto_register_ahashes(artpec7_hash_algos, - ARRAY_SIZE(artpec7_hash_algos)); - if (err) { - dev_err(dev, "Failed to register ahashes\n"); - goto unregister_ahashes; - } - } - err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); if (err) { dev_err(dev, "Failed to register ciphers\n"); - goto unregister_a7_ahashes; + goto unregister_ahashes; } err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos)); @@ -3129,10 +2952,6 @@ static int artpec6_crypto_probe(struct platform_device *pdev) unregister_algs: crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); -unregister_a7_ahashes: - if (variant != ARTPEC6_CRYPTO) - crypto_unregister_ahashes(artpec7_hash_algos, - ARRAY_SIZE(artpec7_hash_algos)); unregister_ahashes: crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); disable_hw: @@ -3148,9 +2967,6 @@ static int artpec6_crypto_remove(struct platform_device *pdev) int irq = platform_get_irq(pdev, 0); crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); - if (ac->variant != ARTPEC6_CRYPTO) - crypto_unregister_ahashes(artpec7_hash_algos, - ARRAY_SIZE(artpec7_hash_algos)); crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos)); diff --git a/drivers/crypto/bcm/Makefile b/drivers/crypto/bcm/Makefile index 13cb80eb2665..7469e19afe85 100644 --- a/drivers/crypto/bcm/Makefile +++ b/drivers/crypto/bcm/Makefile @@ -11,5 +11,3 @@ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) := bcm_crypto_spu.o bcm_crypto_spu-objs := util.o spu.o spu2.o cipher.o - -ccflags-y += -I. -DBCMDRIVER diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 2d1f1db9f807..28f592f7e1b7 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -717,7 +717,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx) */ unsigned int new_data_len; - unsigned int chunk_start = 0; + unsigned int __maybe_unused chunk_start = 0; u32 db_size; /* Length of data field, incl gcm and hash padding */ int pad_len = 0; /* total pad len, including gcm, hash, stat padding */ u32 data_pad_len = 0; /* length of GCM/CCM padding */ @@ -1675,8 +1675,6 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg) struct spu_hw *spu = &iproc_priv.spu; struct brcm_message *mssg = msg; struct iproc_reqctx_s *rctx; - struct iproc_ctx_s *ctx; - struct crypto_async_request *areq; int err = 0; rctx = mssg->ctx; @@ -1686,8 +1684,6 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg) err = -EFAULT; goto cb_finish; } - areq = rctx->parent; - ctx = rctx->ctx; /* process the SPU status */ err = spu->spu_status_process(rctx->msg_buf.rx_stat); @@ -1822,7 +1818,7 @@ static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, if (keylen == DES_KEY_SIZE) { if (des_ekey(tmp, key) == 0) { if (crypto_ablkcipher_get_flags(cipher) & - CRYPTO_TFM_REQ_WEAK_KEY) { + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { u32 flags = CRYPTO_TFM_RES_WEAK_KEY; crypto_ablkcipher_set_flags(cipher, flags); @@ -2845,44 +2841,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, struct spu_hw *spu = &iproc_priv.spu; struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); struct crypto_tfm *tfm = crypto_aead_tfm(cipher); - struct rtattr *rta = (void *)key; - struct crypto_authenc_key_param *param; - const u8 *origkey = key; - const unsigned int origkeylen = keylen; - - int ret = 0; + struct crypto_authenc_keys keys; + int ret; flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, keylen); flow_dump(" key: ", key, keylen); - if (!RTA_OK(rta, keylen)) + ret = crypto_authenc_extractkeys(&keys, key, keylen); + if (ret) goto badkey; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - if (RTA_PAYLOAD(rta) < sizeof(*param)) - goto badkey; - - param = RTA_DATA(rta); - ctx->enckeylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - - if (keylen < ctx->enckeylen) - goto badkey; - if (ctx->enckeylen > MAX_KEY_SIZE) + if (keys.enckeylen > MAX_KEY_SIZE || + keys.authkeylen > MAX_KEY_SIZE) goto badkey; - ctx->authkeylen = keylen - ctx->enckeylen; + ctx->enckeylen = keys.enckeylen; + ctx->authkeylen = keys.authkeylen; - if (ctx->authkeylen > MAX_KEY_SIZE) - goto badkey; - - memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); + memcpy(ctx->enckey, keys.enckey, keys.enckeylen); /* May end up padding auth key. So make sure it's zeroed. */ memset(ctx->authkey, 0, sizeof(ctx->authkey)); - memcpy(ctx->authkey, key, ctx->authkeylen); + memcpy(ctx->authkey, keys.authkey, keys.authkeylen); switch (ctx->alg->cipher_info.alg) { case CIPHER_ALG_DES: @@ -2890,9 +2870,9 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, u32 tmp[DES_EXPKEY_WORDS]; u32 flags = CRYPTO_TFM_RES_WEAK_KEY; - if (des_ekey(tmp, key) == 0) { + if (des_ekey(tmp, keys.enckey) == 0) { if (crypto_aead_get_flags(cipher) & - CRYPTO_TFM_REQ_WEAK_KEY) { + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { crypto_aead_set_flags(cipher, flags); return -EINVAL; } @@ -2905,7 +2885,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, break; case CIPHER_ALG_3DES: if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { - const u32 *K = (const u32 *)key; + const u32 *K = (const u32 *)keys.enckey; u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || @@ -2956,9 +2936,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; ctx->fallback_cipher->base.crt_flags |= tfm->crt_flags & CRYPTO_TFM_REQ_MASK; - ret = - crypto_aead_setkey(ctx->fallback_cipher, origkey, - origkeylen); + ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen); if (ret) { flow_log(" fallback setkey() returned:%d\n", ret); tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; @@ -3868,7 +3846,6 @@ static struct iproc_alg_s driver_algs[] = { .cra_driver_name = "ctr-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, .cra_ablkcipher = { - /* .geniv = "chainiv", */ .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, @@ -4605,7 +4582,6 @@ static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg) crypto->cra_priority = cipher_pri; crypto->cra_alignmask = 0; crypto->cra_ctxsize = sizeof(struct iproc_ctx_s); - INIT_LIST_HEAD(&crypto->cra_list); crypto->cra_init = ablkcipher_cra_init; crypto->cra_exit = generic_cra_exit; @@ -4652,12 +4628,16 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg) hash->halg.statesize = sizeof(struct spu_hash_export_s); if (driver_alg->auth_info.mode != HASH_MODE_HMAC) { - hash->setkey = ahash_setkey; hash->init = ahash_init; hash->update = ahash_update; hash->final = ahash_final; hash->finup = ahash_finup; hash->digest = ahash_digest; + if ((driver_alg->auth_info.alg == HASH_ALG_AES) && + ((driver_alg->auth_info.mode == HASH_MODE_XCBC) || + (driver_alg->auth_info.mode == HASH_MODE_CMAC))) { + hash->setkey = ahash_setkey; + } } else { hash->setkey = ahash_hmac_setkey; hash->init = ahash_hmac_init; @@ -4687,7 +4667,6 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg) aead->base.cra_priority = aead_pri; aead->base.cra_alignmask = 0; aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s); - INIT_LIST_HEAD(&aead->base.cra_list); aead->base.cra_flags |= CRYPTO_ALG_ASYNC; /* setkey set in alg initialization */ diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h index 763c425c41ca..f6da49758954 100644 --- a/drivers/crypto/bcm/cipher.h +++ b/drivers/crypto/bcm/cipher.h @@ -23,6 +23,7 @@ #include <crypto/aes.h> #include <crypto/internal/hash.h> #include <crypto/aead.h> +#include <crypto/arc4.h> #include <crypto/gcm.h> #include <crypto/sha.h> #include <crypto/sha3.h> @@ -34,9 +35,6 @@ /* Driver supports up to MAX_SPUS SPU blocks */ #define MAX_SPUS 16 -#define ARC4_MIN_KEY_SIZE 1 -#define ARC4_MAX_KEY_SIZE 256 -#define ARC4_BLOCK_SIZE 1 #define ARC4_STATE_SIZE 4 #define CCM_AES_IV_SIZE 16 diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c index a912c6ad3e85..d8cda5fb75ad 100644 --- a/drivers/crypto/bcm/util.c +++ b/drivers/crypto/bcm/util.c @@ -201,46 +201,6 @@ struct sdesc { char ctx[]; }; -/* do a synchronous decrypt operation */ -int do_decrypt(char *alg_name, - void *key_ptr, unsigned int key_len, - void *iv_ptr, void *src_ptr, void *dst_ptr, - unsigned int block_len) -{ - struct scatterlist sg_in[1], sg_out[1]; - struct crypto_blkcipher *tfm = - crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC); - struct blkcipher_desc desc = {.tfm = tfm, .flags = 0 }; - int ret = 0; - void *iv; - int ivsize; - - flow_log("%s() name:%s block_len:%u\n", __func__, alg_name, block_len); - - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - crypto_blkcipher_setkey((void *)tfm, key_ptr, key_len); - - sg_init_table(sg_in, 1); - sg_set_buf(sg_in, src_ptr, block_len); - - sg_init_table(sg_out, 1); - sg_set_buf(sg_out, dst_ptr, block_len); - - iv = crypto_blkcipher_crt(tfm)->iv; - ivsize = crypto_blkcipher_ivsize(tfm); - memcpy(iv, iv_ptr, ivsize); - - ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, block_len); - crypto_free_blkcipher(tfm); - - if (ret < 0) - pr_err("aes_decrypt failed %d\n", ret); - - return ret; -} - /** * do_shash() - Do a synchronous hash operation in software * @name: The name of the hash algorithm diff --git a/drivers/crypto/bcm/util.h b/drivers/crypto/bcm/util.h index 712e029795f8..15c60356518a 100644 --- a/drivers/crypto/bcm/util.h +++ b/drivers/crypto/bcm/util.h @@ -95,12 +95,6 @@ u32 spu_msg_sg_add(struct scatterlist **to_sg, void add_to_ctr(u8 *ctr_pos, unsigned int increment); -/* do a synchronous decrypt operation */ -int do_decrypt(char *alg_name, - void *key_ptr, unsigned int key_len, - void *iv_ptr, void *src_ptr, void *dst_ptr, - unsigned int block_len); - /* produce a message digest from data of length n bytes */ int do_shash(unsigned char *name, unsigned char *result, const u8 *data1, unsigned int data1_len, diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index c4b1cade55c1..577c9844b322 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -91,6 +91,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API select CRYPTO_AEAD select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER + select CRYPTO_DES help Selecting this will offload crypto for users of the scatterlist crypto API (such as the linux native IPSec diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 869f092432de..579578498deb 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -3,7 +3,7 @@ * caam - Freescale FSL CAAM support for crypto API * * Copyright 2008-2011 Freescale Semiconductor, Inc. - * Copyright 2016-2018 NXP + * Copyright 2016-2019 NXP * * Based on talitos crypto API driver. * @@ -72,6 +72,8 @@ #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ CAAM_CMD_SZ * 5) +#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6) + #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) @@ -513,6 +515,61 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc, return 0; } +static int chachapoly_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + desc = ctx->sh_desc_enc; + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, true, false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); + + desc = ctx->sh_desc_dec; + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, false, false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); + + return 0; +} + +static int chachapoly_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + + if (authsize != POLY1305_DIGEST_SIZE) + return -EINVAL; + + ctx->authsize = authsize; + return chachapoly_set_sh_desc(aead); +} + +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; + + if (keylen != CHACHA_KEY_SIZE + saltlen) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + ctx->cdata.key_virt = key; + ctx->cdata.keylen = keylen - saltlen; + + return chachapoly_set_sh_desc(aead); +} + static int aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { @@ -709,6 +766,27 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, return 0; } +static int des_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 tmp[DES3_EDE_EXPKEY_WORDS]; + struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); + + if (keylen == DES3_EDE_KEY_SIZE && + __des3_ede_setkey(tmp, &tfm->crt_flags, key, DES3_EDE_KEY_SIZE)) { + return -EINVAL; + } + + if (!des_ekey(tmp, key) && (crypto_skcipher_get_flags(skcipher) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { + crypto_skcipher_set_flags(skcipher, + CRYPTO_TFM_RES_WEAK_KEY); + return -EINVAL; + } + + return skcipher_setkey(skcipher, key, keylen); +} + static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { @@ -745,6 +823,8 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, * aead_edesc - s/w-extended aead descriptor * @src_nents: number of segments in input s/w scatterlist * @dst_nents: number of segments in output s/w scatterlist + * @mapped_src_nents: number of segments in input h/w link table + * @mapped_dst_nents: number of segments in output h/w link table * @sec4_sg_bytes: length of dma mapped sec4_sg space * @sec4_sg_dma: bus physical mapped address of h/w link table * @sec4_sg: pointer to h/w link table @@ -753,6 +833,8 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, struct aead_edesc { int src_nents; int dst_nents; + int mapped_src_nents; + int mapped_dst_nents; int sec4_sg_bytes; dma_addr_t sec4_sg_dma; struct sec4_sg_entry *sec4_sg; @@ -763,6 +845,8 @@ struct aead_edesc { * skcipher_edesc - s/w-extended skcipher descriptor * @src_nents: number of segments in input s/w scatterlist * @dst_nents: number of segments in output s/w scatterlist + * @mapped_src_nents: number of segments in input h/w link table + * @mapped_dst_nents: number of segments in output h/w link table * @iv_dma: dma address of iv for checking continuity and link table * @sec4_sg_bytes: length of dma mapped sec4_sg space * @sec4_sg_dma: bus physical mapped address of h/w link table @@ -773,6 +857,8 @@ struct aead_edesc { struct skcipher_edesc { int src_nents; int dst_nents; + int mapped_src_nents; + int mapped_dst_nents; dma_addr_t iv_dma; int sec4_sg_bytes; dma_addr_t sec4_sg_dma; @@ -789,7 +875,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, if (dst != src) { if (src_nents) dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); - dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + if (dst_nents) + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); } else { dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); } @@ -904,8 +991,9 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, * The crypto API expects us to set the IV (req->iv) to the last * ciphertext block. This is used e.g. by the CTS mode. */ - scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize, - ivsize, 0); + if (ivsize) + scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - + ivsize, ivsize, 0); kfree(edesc); @@ -966,11 +1054,12 @@ static void init_aead_job(struct aead_request *req, init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); if (all_contig) { - src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0; + src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) : + 0; in_options = 0; } else { src_dma = edesc->sec4_sg_dma; - sec4_sg_index += edesc->src_nents; + sec4_sg_index += edesc->mapped_src_nents; in_options = LDST_SGF; } @@ -981,8 +1070,11 @@ static void init_aead_job(struct aead_request *req, out_options = in_options; if (unlikely(req->src != req->dst)) { - if (edesc->dst_nents == 1) { + if (!edesc->mapped_dst_nents) { + dst_dma = 0; + } else if (edesc->mapped_dst_nents == 1) { dst_dma = sg_dma_address(req->dst); + out_options = 0; } else { dst_dma = edesc->sec4_sg_dma + sec4_sg_index * @@ -1031,6 +1123,40 @@ static void init_gcm_job(struct aead_request *req, /* End of blank commands */ } +static void init_chachapoly_job(struct aead_request *req, + struct aead_edesc *edesc, bool all_contig, + bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + unsigned int ivsize = crypto_aead_ivsize(aead); + unsigned int assoclen = req->assoclen; + u32 *desc = edesc->hw_desc; + u32 ctx_iv_off = 4; + + init_aead_job(req, edesc, all_contig, encrypt); + + if (ivsize != CHACHAPOLY_IV_SIZE) { + /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */ + ctx_iv_off += 4; + + /* + * The associated data comes already with the IV but we need + * to skip it when we authenticate or encrypt... + */ + assoclen -= ivsize; + } + + append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen); + + /* + * For IPsec load the IV further in the same register. + * For RFC7539 simply load the 12 bytes nonce in a single operation + */ + append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ctx_iv_off << LDST_OFFSET_SHIFT); +} + static void init_authenc_job(struct aead_request *req, struct aead_edesc *edesc, bool all_contig, bool encrypt) @@ -1092,9 +1218,9 @@ static void init_skcipher_job(struct skcipher_request *req, int ivsize = crypto_skcipher_ivsize(skcipher); u32 *desc = edesc->hw_desc; u32 *sh_desc; - u32 out_options = 0; - dma_addr_t dst_dma, ptr; - int len; + u32 in_options = 0, out_options = 0; + dma_addr_t src_dma, dst_dma, ptr; + int len, sec4_sg_index = 0; #ifdef DEBUG print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", @@ -1112,21 +1238,27 @@ static void init_skcipher_job(struct skcipher_request *req, len = desc_len(sh_desc); init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); - append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->cryptlen + ivsize, - LDST_SGF); + if (ivsize || edesc->mapped_src_nents > 1) { + src_dma = edesc->sec4_sg_dma; + sec4_sg_index = edesc->mapped_src_nents + !!ivsize; + in_options = LDST_SGF; + } else { + src_dma = sg_dma_address(req->src); + } + + append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options); if (likely(req->src == req->dst)) { - dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry); - out_options = LDST_SGF; + dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry); + out_options = in_options; + } else if (edesc->mapped_dst_nents == 1) { + dst_dma = sg_dma_address(req->dst); } else { - if (edesc->dst_nents == 1) { - dst_dma = sg_dma_address(req->dst); - } else { - dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) * - sizeof(struct sec4_sg_entry); - out_options = LDST_SGF; - } + dst_dma = edesc->sec4_sg_dma + sec4_sg_index * + sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; } + append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); } @@ -1198,12 +1330,19 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, mapped_src_nents = 0; } - mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, - DMA_FROM_DEVICE); - if (unlikely(!mapped_dst_nents)) { - dev_err(jrdev, "unable to map destination\n"); - dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); - return ERR_PTR(-ENOMEM); + /* Cover also the case of null (zero length) output data */ + if (dst_nents) { + mapped_dst_nents = dma_map_sg(jrdev, req->dst, + dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(jrdev, "unable to map destination\n"); + dma_unmap_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_dst_nents = 0; } } @@ -1222,6 +1361,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; + edesc->mapped_src_nents = mapped_src_nents; + edesc->mapped_dst_nents = mapped_dst_nents; edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + desc_bytes; *all_contig_ptr = !(mapped_src_nents > 1); @@ -1289,6 +1430,72 @@ static int gcm_encrypt(struct aead_request *req) return ret; } +static int chachapoly_encrypt(struct aead_request *req) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool all_contig; + u32 *desc; + int ret; + + edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig, + true); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + desc = edesc->hw_desc; + + init_chachapoly_job(req, edesc, all_contig, true); + print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + aead_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +static int chachapoly_decrypt(struct aead_request *req) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool all_contig; + u32 *desc; + int ret; + + edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig, + false); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + desc = edesc->hw_desc; + + init_chachapoly_job(req, edesc, all_contig, false); + print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + aead_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + static int ipsec_gcm_encrypt(struct aead_request *req) { if (req->assoclen < 8) @@ -1429,7 +1636,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; struct skcipher_edesc *edesc; - dma_addr_t iv_dma; + dma_addr_t iv_dma = 0; u8 *iv; int ivsize = crypto_skcipher_ivsize(skcipher); int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; @@ -1464,7 +1671,6 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, dev_err(jrdev, "unable to map source\n"); return ERR_PTR(-ENOMEM); } - mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, DMA_FROM_DEVICE); if (unlikely(!mapped_dst_nents)) { @@ -1474,7 +1680,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, } } - sec4_sg_ents = 1 + mapped_src_nents; + if (!ivsize && mapped_src_nents == 1) + sec4_sg_ents = 0; // no need for an input hw s/g table + else + sec4_sg_ents = mapped_src_nents + !!ivsize; dst_sg_idx = sec4_sg_ents; sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); @@ -1493,39 +1702,48 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; + edesc->mapped_src_nents = mapped_src_nents; + edesc->mapped_dst_nents = mapped_dst_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc + desc_bytes); /* Make sure IV is located in a DMAable area */ - iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; - memcpy(iv, req->iv, ivsize); + if (ivsize) { + iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; + memcpy(iv, req->iv, ivsize); + + iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, + dst_nents, 0, 0, 0, 0); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } - iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, iv_dma)) { - dev_err(jrdev, "unable to map IV\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); - kfree(edesc); - return ERR_PTR(-ENOMEM); + dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); } - - dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); - sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0); + if (dst_sg_idx) + sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + + !!ivsize, 0); if (mapped_dst_nents > 1) { sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg + dst_sg_idx, 0); } - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { - dev_err(jrdev, "unable to map S/G table\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); - kfree(edesc); - return ERR_PTR(-ENOMEM); + if (sec4_sg_bytes) { + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, + dst_nents, iv_dma, ivsize, 0, 0); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } } edesc->iv_dma = iv_dma; @@ -1592,8 +1810,9 @@ static int skcipher_decrypt(struct skcipher_request *req) * The crypto API expects us to set the IV (req->iv) to the last * ciphertext block. */ - scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize, - ivsize, 0); + if (ivsize) + scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - + ivsize, ivsize, 0); /* Create and submit job descriptor*/ init_skcipher_job(req, edesc, false); @@ -1639,7 +1858,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "cbc-3des-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = skcipher_setkey, + .setkey = des_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, @@ -1655,7 +1874,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "cbc-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, - .setkey = skcipher_setkey, + .setkey = des_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES_KEY_SIZE, @@ -1721,6 +1940,66 @@ static struct caam_skcipher_alg driver_algs[] = { }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, }, + { + .skcipher = { + .base = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = des_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB, + }, + { + .skcipher = { + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB, + }, + { + .skcipher = { + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB, + }, + { + .skcipher = { + .base = { + .cra_name = "ecb(arc4)", + .cra_driver_name = "ecb-arc4-caam", + .cra_blocksize = ARC4_BLOCK_SIZE, + }, + .setkey = skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = ARC4_MIN_KEY_SIZE, + .max_keysize = ARC4_MAX_KEY_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB, + }, }; static struct caam_aead_alg driver_aeads[] = { @@ -3002,6 +3281,50 @@ static struct caam_aead_alg driver_aeads[] = { .geniv = true, }, }, + { + .aead = { + .base = { + .cra_name = "rfc7539(chacha20,poly1305)", + .cra_driver_name = "rfc7539-chacha20-poly1305-" + "caam", + .cra_blocksize = 1, + }, + .setkey = chachapoly_setkey, + .setauthsize = chachapoly_setauthsize, + .encrypt = chachapoly_encrypt, + .decrypt = chachapoly_decrypt, + .ivsize = CHACHAPOLY_IV_SIZE, + .maxauthsize = POLY1305_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | + OP_ALG_AAI_AEAD, + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | + OP_ALG_AAI_AEAD, + }, + }, + { + .aead = { + .base = { + .cra_name = "rfc7539esp(chacha20,poly1305)", + .cra_driver_name = "rfc7539esp-chacha20-" + "poly1305-caam", + .cra_blocksize = 1, + }, + .setkey = chachapoly_setkey, + .setauthsize = chachapoly_setauthsize, + .encrypt = chachapoly_encrypt, + .decrypt = chachapoly_decrypt, + .ivsize = 8, + .maxauthsize = POLY1305_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | + OP_ALG_AAI_AEAD, + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | + OP_ALG_AAI_AEAD, + }, + }, }; static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, @@ -3132,10 +3455,10 @@ static int __init caam_algapi_init(void) { struct device_node *dev_node; struct platform_device *pdev; - struct device *ctrldev; struct caam_drv_private *priv; int i = 0, err = 0; - u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst; + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; + u32 arc4_inst; unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false; @@ -3152,30 +3475,58 @@ static int __init caam_algapi_init(void) return -ENODEV; } - ctrldev = &pdev->dev; - priv = dev_get_drvdata(ctrldev); + priv = dev_get_drvdata(&pdev->dev); of_node_put(dev_node); /* * If priv is NULL, it's probably because the caam driver wasn't * properly initialized (e.g. RNG4 init failed). Thus, bail out here. */ - if (!priv) - return -ENODEV; + if (!priv) { + err = -ENODEV; + goto out_put_dev; + } /* * Register crypto algorithms the device supports. * First, detect presence and attributes of DES, AES, and MD blocks. */ - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); - des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; - aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT; - md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + if (priv->era < 10) { + u32 cha_vid, cha_inst; + + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); + aes_vid = cha_vid & CHA_ID_LS_AES_MASK; + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> + CHA_ID_LS_DES_SHIFT; + aes_inst = cha_inst & CHA_ID_LS_AES_MASK; + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >> + CHA_ID_LS_ARC4_SHIFT; + ccha_inst = 0; + ptha_inst = 0; + } else { + u32 aesa, mdha; + + aesa = rd_reg32(&priv->ctrl->vreg.aesa); + mdha = rd_reg32(&priv->ctrl->vreg.mdha); + + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; + aes_inst = aesa & CHA_VER_NUM_MASK; + md_inst = mdha & CHA_VER_NUM_MASK; + ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK; + ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK; + arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK; + } /* If MD is present, limit digest size based on LP256 */ - if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)) + if (md_inst && md_vid == CHA_VER_VID_MD_LP256) md_limit = SHA256_DIGEST_SIZE; for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { @@ -3192,14 +3543,18 @@ static int __init caam_algapi_init(void) if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) continue; + /* Skip ARC4 algorithms if not supported by device */ + if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4) + continue; + /* * Check support for AES modes not available * on LP devices. */ - if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) - if ((t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) == - OP_ALG_AAI_XTS) - continue; + if (aes_vid == CHA_VER_VID_AES_LP && + (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) == + OP_ALG_AAI_XTS) + continue; caam_skcipher_alg_init(t_alg); @@ -3232,21 +3587,28 @@ static int __init caam_algapi_init(void) if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) continue; + /* Skip CHACHA20 algorithms if not supported by device */ + if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst) + continue; + + /* Skip POLY1305 algorithms if not supported by device */ + if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst) + continue; + /* * Check support for AES algorithms not available * on LP devices. */ - if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) - if (alg_aai == OP_ALG_AAI_GCM) - continue; + if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) + continue; /* * Skip algorithms requiring message digests * if MD or MD size is not supported by device. */ - if (c2_alg_sel && - (!md_inst || (t_alg->aead.maxauthsize > md_limit))) - continue; + if (is_mdha(c2_alg_sel) && + (!md_inst || t_alg->aead.maxauthsize > md_limit)) + continue; caam_aead_alg_init(t_alg); @@ -3264,6 +3626,8 @@ static int __init caam_algapi_init(void) if (registered) pr_info("caam algorithms registered in /proc/crypto\n"); +out_put_dev: + put_device(&pdev->dev); return err; } diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index 1a6f0da14106..1e1a376edc2f 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c @@ -2,7 +2,7 @@ /* * Shared descriptors for aead, skcipher algorithms * - * Copyright 2016-2018 NXP + * Copyright 2016-2019 NXP */ #include "compat.h" @@ -1213,6 +1213,139 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, } EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap); +/** + * cnstr_shdsc_chachapoly - Chacha20 + Poly1305 generic AEAD (rfc7539) and + * IPsec ESP (rfc7634, a.k.a. rfc7539esp) shared + * descriptor (non-protocol). + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - OP_ALG_ALGSEL_CHACHA20 ANDed with + * OP_ALG_AAI_AEAD. + * @adata: pointer to authentication transform definitions + * Valid algorithm values - OP_ALG_ALGSEL_POLY1305 ANDed with + * OP_ALG_AAI_AEAD. + * @ivsize: initialization vector size + * @icvsize: integrity check value (ICV) size (truncated or full) + * @encap: true if encapsulation, false if decapsulation + * @is_qi: true when called from caam/qi + */ +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool encap, + const bool is_qi) +{ + u32 *key_jump_cmd, *wait_cmd; + u32 nfifo; + const bool is_ipsec = (ivsize != CHACHAPOLY_IV_SIZE); + + /* Note: Context registers are saved. */ + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + + /* skip key loading if they are loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, + CLASS_1 | KEY_DEST_CLASS_REG); + + /* For IPsec load the salt from keymat in the context register */ + if (is_ipsec) + append_load_as_imm(desc, cdata->key_virt + cdata->keylen, 4, + LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | + 4 << LDST_OFFSET_SHIFT); + + set_jump_tgt_here(desc, key_jump_cmd); + + /* Class 2 and 1 operations: Poly & ChaCha */ + if (encap) { + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_ENCRYPT); + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_ENCRYPT); + } else { + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT | OP_ALG_ICV_ON); + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT); + } + + if (is_qi) { + u32 *wait_load_cmd; + u32 ctx1_iv_off = is_ipsec ? 8 : 4; + + /* REG3 = assoclen */ + append_seq_load(desc, 4, LDST_CLASS_DECO | + LDST_SRCDST_WORD_DECO_MATH3 | + 4 << LDST_OFFSET_SHIFT); + + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_CALM | JUMP_COND_NCP | + JUMP_COND_NOP | JUMP_COND_NIP | + JUMP_COND_NIFP); + set_jump_tgt_here(desc, wait_load_cmd); + + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ctx1_iv_off << LDST_OFFSET_SHIFT); + } + + /* + * MAGIC with NFIFO + * Read associated data from the input and send them to class1 and + * class2 alignment blocks. From class1 send data to output fifo and + * then write it to memory since we don't need to encrypt AD. + */ + nfifo = NFIFOENTRY_DEST_BOTH | NFIFOENTRY_FC1 | NFIFOENTRY_FC2 | + NFIFOENTRY_DTYPE_POLY | NFIFOENTRY_BND; + append_load_imm_u32(desc, nfifo, LDST_CLASS_IND_CCB | + LDST_SRCDST_WORD_INFO_FIFO_SM | LDLEN_MATH3); + + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + append_seq_fifo_load(desc, 0, FIFOLD_TYPE_NOINFOFIFO | + FIFOLD_CLASS_CLASS1 | LDST_VLF); + append_move_len(desc, MOVE_AUX_LS | MOVE_SRC_AUX_ABLK | + MOVE_DEST_OUTFIFO | MOVELEN_MRSEL_MATH3); + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF); + + /* IPsec - copy IV at the output */ + if (is_ipsec) + append_seq_fifo_store(desc, ivsize, FIFOST_TYPE_METADATA | + 0x2 << 25); + + wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL | + JUMP_COND_NOP | JUMP_TEST_ALL); + set_jump_tgt_here(desc, wait_cmd); + + if (encap) { + /* Read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, + CAAM_CMD_SZ); + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); + + /* Write ICV */ + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + } else { + /* Read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, + CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, + CAAM_CMD_SZ); + aead_append_src_dst(desc, FIFOLD_TYPE_MSG); + + /* Load ICV for verification */ + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); + } + + print_hex_dump_debug("chachapoly shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_chachapoly); + /* For skcipher encrypt and decrypt, read from req->src and write to req->dst */ static inline void skcipher_append_src_dst(u32 *desc) { @@ -1228,7 +1361,8 @@ static inline void skcipher_append_src_dst(u32 *desc) * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed - * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128 + * - OP_ALG_ALGSEL_CHACHA20 * @ivsize: initialization vector size * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template * @ctx1_iv_off: IV offset in CONTEXT1 register @@ -1262,9 +1396,11 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, set_jump_tgt_here(desc, key_jump_cmd); - /* Load iv */ - append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); + /* Load IV, if there is one */ + if (ivsize) + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB | (ctx1_iv_off << + LDST_OFFSET_SHIFT)); /* Load counter into CONTEXT1 reg */ if (is_rfc3686) @@ -1293,7 +1429,8 @@ EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap); * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed - * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128 + * - OP_ALG_ALGSEL_CHACHA20 * @ivsize: initialization vector size * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template * @ctx1_iv_off: IV offset in CONTEXT1 register @@ -1327,9 +1464,11 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, set_jump_tgt_here(desc, key_jump_cmd); - /* load IV */ - append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); + /* Load IV, if there is one */ + if (ivsize) + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB | (ctx1_iv_off << + LDST_OFFSET_SHIFT)); /* Load counter into CONTEXT1 reg */ if (is_rfc3686) diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h index 1315c8f6f951..d5ca42ff961a 100644 --- a/drivers/crypto/caam/caamalg_desc.h +++ b/drivers/crypto/caam/caamalg_desc.h @@ -96,6 +96,11 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, unsigned int ivsize, unsigned int icvsize, const bool is_qi); +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool encap, + const bool is_qi); + void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, unsigned int ivsize, const bool is_rfc3686, const u32 ctx1_iv_off); diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 23c9fc4975f8..c61921d32489 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -782,7 +782,7 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, cpu = smp_processor_id(); drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); - if (likely(!IS_ERR_OR_NULL(drv_ctx))) + if (!IS_ERR_OR_NULL(drv_ctx)) drv_ctx->op_type = type; ctx->drv_ctx[type] = drv_ctx; @@ -802,7 +802,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, if (dst != src) { if (src_nents) dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); - dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + if (dst_nents) + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); } else { dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); } @@ -892,7 +893,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, struct caam_drv_ctx *drv_ctx; drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); - if (unlikely(IS_ERR_OR_NULL(drv_ctx))) + if (IS_ERR_OR_NULL(drv_ctx)) return (struct aead_edesc *)drv_ctx; /* allocate space for base edesc and hw desc commands, link tables */ @@ -955,13 +956,19 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, mapped_src_nents = 0; } - mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, - DMA_FROM_DEVICE); - if (unlikely(!mapped_dst_nents)) { - dev_err(qidev, "unable to map destination\n"); - dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); - qi_cache_free(edesc); - return ERR_PTR(-ENOMEM); + if (dst_nents) { + mapped_dst_nents = dma_map_sg(qidev, req->dst, + dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(qidev, "unable to map destination\n"); + dma_unmap_sg(qidev, req->src, src_nents, + DMA_TO_DEVICE); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_dst_nents = 0; } } @@ -1184,7 +1191,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, struct caam_drv_ctx *drv_ctx; drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); - if (unlikely(IS_ERR_OR_NULL(drv_ctx))) + if (IS_ERR_OR_NULL(drv_ctx)) return (struct skcipher_edesc *)drv_ctx; src_nents = sg_nents_for_len(req->src, req->cryptlen); @@ -2462,7 +2469,7 @@ static int __init caam_qi_algapi_init(void) struct device *ctrldev; struct caam_drv_private *priv; int i = 0, err = 0; - u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst; + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false; @@ -2485,26 +2492,49 @@ static int __init caam_qi_algapi_init(void) * If priv is NULL, it's probably because the caam driver wasn't * properly initialized (e.g. RNG4 init failed). Thus, bail out here. */ - if (!priv || !priv->qi_present) - return -ENODEV; + if (!priv || !priv->qi_present) { + err = -ENODEV; + goto out_put_dev; + } if (caam_dpaa2) { dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); - return -ENODEV; + err = -ENODEV; + goto out_put_dev; } /* * Register crypto algorithms the device supports. * First, detect presence and attributes of DES, AES, and MD blocks. */ - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); - des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; - aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT; - md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + if (priv->era < 10) { + u32 cha_vid, cha_inst; + + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); + aes_vid = cha_vid & CHA_ID_LS_AES_MASK; + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> + CHA_ID_LS_DES_SHIFT; + aes_inst = cha_inst & CHA_ID_LS_AES_MASK; + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + } else { + u32 aesa, mdha; + + aesa = rd_reg32(&priv->ctrl->vreg.aesa); + mdha = rd_reg32(&priv->ctrl->vreg.mdha); + + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; + aes_inst = aesa & CHA_VER_NUM_MASK; + md_inst = mdha & CHA_VER_NUM_MASK; + } /* If MD is present, limit digest size based on LP256 */ - if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)) + if (md_inst && md_vid == CHA_VER_VID_MD_LP256) md_limit = SHA256_DIGEST_SIZE; for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { @@ -2556,8 +2586,7 @@ static int __init caam_qi_algapi_init(void) * Check support for AES algorithms not available * on LP devices. */ - if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) && - (alg_aai == OP_ALG_AAI_GCM)) + if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) continue; /* @@ -2584,6 +2613,8 @@ static int __init caam_qi_algapi_init(void) if (registered) dev_info(priv->qidev, "algorithms registered in /proc/crypto\n"); +out_put_dev: + put_device(ctrldev); return err; } diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 7d8ac0222fa3..c2c1abc68f81 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -25,13 +25,6 @@ #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \ SHA512_DIGEST_SIZE * 2) -#if !IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM) -bool caam_little_end; -EXPORT_SYMBOL(caam_little_end); -bool caam_imx; -EXPORT_SYMBOL(caam_imx); -#endif - /* * This is a a cache of buffers, from which the users of CAAM QI driver * can allocate short buffers. It's speedier than doing kmalloc on the hotpath. @@ -151,7 +144,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, if (dst != src) { if (src_nents) dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); - dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + if (dst_nents) + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); } else { dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); } @@ -392,13 +386,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, mapped_src_nents = 0; } - mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, - DMA_FROM_DEVICE); - if (unlikely(!mapped_dst_nents)) { - dev_err(dev, "unable to map destination\n"); - dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); - qi_cache_free(edesc); - return ERR_PTR(-ENOMEM); + if (dst_nents) { + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(dev, "unable to map destination\n"); + dma_unmap_sg(dev, req->src, src_nents, + DMA_TO_DEVICE); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_dst_nents = 0; } } else { src_nents = sg_nents_for_len(req->src, req->assoclen + @@ -462,7 +461,15 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, edesc->dst_nents = dst_nents; edesc->iv_dma = iv_dma; - edesc->assoclen = cpu_to_caam32(req->assoclen); + if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) == + OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE) + /* + * The associated data comes already with the IV but we need + * to skip it when we authenticate or encrypt... + */ + edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize); + else + edesc->assoclen = cpu_to_caam32(req->assoclen); edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4, DMA_TO_DEVICE); if (dma_mapping_error(dev, edesc->assoclen_dma)) { @@ -532,6 +539,68 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, return edesc; } +static int chachapoly_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + struct device *dev = ctx->dev; + struct caam_flc *flc; + u32 *desc; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + flc = &ctx->flc[ENCRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, true, true); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + flc = &ctx->flc[DECRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, false, true); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + return 0; +} + +static int chachapoly_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + + if (authsize != POLY1305_DIGEST_SIZE) + return -EINVAL; + + ctx->authsize = authsize; + return chachapoly_set_sh_desc(aead); +} + +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; + + if (keylen != CHACHA_KEY_SIZE + saltlen) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + ctx->cdata.key_virt = key; + ctx->cdata.keylen = keylen - saltlen; + + return chachapoly_set_sh_desc(aead); +} + static int gcm_set_sh_desc(struct crypto_aead *aead) { struct caam_ctx *ctx = crypto_aead_ctx(aead); @@ -816,7 +885,9 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, u32 *desc; u32 ctx1_iv_off = 0; const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == - OP_ALG_AAI_CTR_MOD128); + OP_ALG_AAI_CTR_MOD128) && + ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) != + OP_ALG_ALGSEL_CHACHA20); const bool is_rfc3686 = alg->caam.rfc3686; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", @@ -1494,7 +1565,23 @@ static struct caam_skcipher_alg driver_algs[] = { .ivsize = AES_BLOCK_SIZE, }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, - } + }, + { + .skcipher = { + .base = { + .cra_name = "chacha20", + .cra_driver_name = "chacha20-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = CHACHA_IV_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20, + }, }; static struct caam_aead_alg driver_aeads[] = { @@ -2611,6 +2698,50 @@ static struct caam_aead_alg driver_aeads[] = { { .aead = { .base = { + .cra_name = "rfc7539(chacha20,poly1305)", + .cra_driver_name = "rfc7539-chacha20-poly1305-" + "caam-qi2", + .cra_blocksize = 1, + }, + .setkey = chachapoly_setkey, + .setauthsize = chachapoly_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CHACHAPOLY_IV_SIZE, + .maxauthsize = POLY1305_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | + OP_ALG_AAI_AEAD, + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | + OP_ALG_AAI_AEAD, + }, + }, + { + .aead = { + .base = { + .cra_name = "rfc7539esp(chacha20,poly1305)", + .cra_driver_name = "rfc7539esp-chacha20-" + "poly1305-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = chachapoly_setkey, + .setauthsize = chachapoly_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = 8, + .maxauthsize = POLY1305_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | + OP_ALG_AAI_AEAD, + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | + OP_ALG_AAI_AEAD, + }, + }, + { + .aead = { + .base = { .cra_name = "authenc(hmac(sha512)," "rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha512-" @@ -4371,7 +4502,8 @@ static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv) nctx->cb = dpaa2_caam_fqdan_cb; /* Register notification callbacks */ - err = dpaa2_io_service_register(NULL, nctx); + ppriv->dpio = dpaa2_io_service_select(cpu); + err = dpaa2_io_service_register(ppriv->dpio, nctx, dev); if (unlikely(err)) { dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu); nctx->cb = NULL; @@ -4404,7 +4536,7 @@ err: ppriv = per_cpu_ptr(priv->ppriv, cpu); if (!ppriv->nctx.cb) break; - dpaa2_io_service_deregister(NULL, &ppriv->nctx); + dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev); } for_each_online_cpu(cpu) { @@ -4424,7 +4556,8 @@ static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv) for_each_online_cpu(cpu) { ppriv = per_cpu_ptr(priv->ppriv, cpu); - dpaa2_io_service_deregister(NULL, &ppriv->nctx); + dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, + priv->dev); dpaa2_io_store_destroy(ppriv->store); if (++i == priv->num_pairs) @@ -4522,7 +4655,7 @@ static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv) /* Retry while portal is busy */ do { - err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid, + err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid, ppriv->store); } while (err == -EBUSY); @@ -4590,7 +4723,7 @@ static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget) if (cleaned < budget) { napi_complete_done(napi, cleaned); - err = dpaa2_io_service_rearm(NULL, &ppriv->nctx); + err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx); if (unlikely(err)) dev_err(priv->dev, "Notification rearm failed: %d\n", err); @@ -4731,21 +4864,31 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) i = 0; for_each_online_cpu(cpu) { - dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", i, - priv->rx_queue_attr[i].fqid, - priv->tx_queue_attr[i].fqid); + u8 j; + + j = i % priv->num_pairs; ppriv = per_cpu_ptr(priv->ppriv, cpu); - ppriv->req_fqid = priv->tx_queue_attr[i].fqid; - ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid; - ppriv->prio = i; + ppriv->req_fqid = priv->tx_queue_attr[j].fqid; + + /* + * Allow all cores to enqueue, while only some of them + * will take part in dequeuing. + */ + if (++i > priv->num_pairs) + continue; + + ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid; + ppriv->prio = j; + + dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j, + priv->rx_queue_attr[j].fqid, + priv->tx_queue_attr[j].fqid); ppriv->net_dev.dev = *dev; INIT_LIST_HEAD(&ppriv->net_dev.napi_list); netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll, DPAA2_CAAM_NAPI_WEIGHT); - if (++i == priv->num_pairs) - break; } return 0; @@ -4908,6 +5051,11 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) alg_sel == OP_ALG_ALGSEL_AES) continue; + /* Skip CHACHA20 algorithms if not supported by device */ + if (alg_sel == OP_ALG_ALGSEL_CHACHA20 && + !priv->sec_attr.ccha_acc_num) + continue; + t_alg->caam.dev = dev; caam_skcipher_alg_init(t_alg); @@ -4940,11 +5088,22 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) c1_alg_sel == OP_ALG_ALGSEL_AES) continue; + /* Skip CHACHA20 algorithms if not supported by device */ + if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && + !priv->sec_attr.ccha_acc_num) + continue; + + /* Skip POLY1305 algorithms if not supported by device */ + if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && + !priv->sec_attr.ptha_acc_num) + continue; + /* * Skip algorithms requiring message digests * if MD not supported by device. */ - if (!priv->sec_attr.md_acc_num && c2_alg_sel) + if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && + !priv->sec_attr.md_acc_num) continue; t_alg->caam.dev = dev; @@ -5081,7 +5240,8 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) { struct dpaa2_fd fd; struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); - int err = 0, i, id; + struct dpaa2_caam_priv_per_cpu *ppriv; + int err = 0, i; if (IS_ERR(req)) return PTR_ERR(req); @@ -5111,23 +5271,18 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1])); dpaa2_fd_set_flc(&fd, req->flc_dma); - /* - * There is no guarantee that preemption is disabled here, - * thus take action. - */ - preempt_disable(); - id = smp_processor_id() % priv->dpseci_attr.num_tx_queues; + ppriv = this_cpu_ptr(priv->ppriv); for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) { - err = dpaa2_io_service_enqueue_fq(NULL, - priv->tx_queue_attr[id].fqid, + err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid, &fd); if (err != -EBUSY) break; + + cpu_relax(); } - preempt_enable(); if (unlikely(err)) { - dev_err(dev, "Error enqueuing frame: %d\n", err); + dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err); goto err_out; } diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h index 9823bdefd029..20890780fb82 100644 --- a/drivers/crypto/caam/caamalg_qi2.h +++ b/drivers/crypto/caam/caamalg_qi2.h @@ -76,6 +76,7 @@ struct dpaa2_caam_priv { * @nctx: notification context of response FQ * @store: where dequeued frames are stored * @priv: backpointer to dpaa2_caam_priv + * @dpio: portal used for data path operations */ struct dpaa2_caam_priv_per_cpu { struct napi_struct napi; @@ -86,6 +87,7 @@ struct dpaa2_caam_priv_per_cpu { struct dpaa2_io_notification_ctx nctx; struct dpaa2_io_store *store; struct dpaa2_caam_priv *priv; + struct dpaa2_io *dpio; }; /* diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 46924affa0bd..7205d9f4029e 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -3,6 +3,7 @@ * caam - Freescale FSL CAAM support for ahash functions of crypto API * * Copyright 2011 Freescale Semiconductor, Inc. + * Copyright 2018-2019 NXP * * Based on caamalg.c crypto API driver. * @@ -97,13 +98,14 @@ struct caam_hash_ctx { u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; + u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned; dma_addr_t sh_desc_update_dma ____cacheline_aligned; dma_addr_t sh_desc_update_first_dma; dma_addr_t sh_desc_fin_dma; dma_addr_t sh_desc_digest_dma; + dma_addr_t key_dma; enum dma_data_direction dir; struct device *jrdev; - u8 key[CAAM_MAX_HASH_KEY_SIZE]; int ctx_len; struct alginfo adata; }; @@ -112,6 +114,7 @@ struct caam_hash_ctx { struct caam_hash_state { dma_addr_t buf_dma; dma_addr_t ctx_dma; + int ctx_dma_len; u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; int buflen_0; u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; @@ -157,6 +160,11 @@ static inline int *alt_buflen(struct caam_hash_state *state) return state->current_buf ? &state->buflen_0 : &state->buflen_1; } +static inline bool is_cmac_aes(u32 algtype) +{ + return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) == + (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC); +} /* Common job descriptor seq in/out ptr routines */ /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ @@ -164,6 +172,7 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, struct caam_hash_state *state, int ctx_len) { + state->ctx_dma_len = ctx_len; state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, DMA_FROM_DEVICE); if (dma_mapping_error(jrdev, state->ctx_dma)) { @@ -177,18 +186,6 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, return 0; } -/* Map req->result, and append seq_out_ptr command that points to it */ -static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, - u8 *result, int digestsize) -{ - dma_addr_t dst_dma; - - dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); - append_seq_out_ptr(desc, dst_dma, digestsize, 0); - - return dst_dma; -} - /* Map current buffer in state (if length > 0) and put it in link table */ static inline int buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, @@ -217,6 +214,7 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev, struct caam_hash_state *state, int ctx_len, struct sec4_sg_entry *sec4_sg, u32 flag) { + state->ctx_dma_len = ctx_len; state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); if (dma_mapping_error(jrdev, state->ctx_dma)) { dev_err(jrdev, "unable to map ctx\n"); @@ -291,14 +289,119 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) return 0; } +static int axcbc_set_sh_desc(struct crypto_ahash *ahash) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int digestsize = crypto_ahash_digestsize(ahash); + struct device *jrdev = ctx->jrdev; + u32 *desc; + + /* key is loaded from memory for UPDATE and FINALIZE states */ + ctx->adata.key_dma = ctx->key_dma; + + /* shared descriptor for ahash_update */ + desc = ctx->sh_desc_update; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, + ctx->ctx_len, ctx->ctx_len, 0); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + /* shared descriptor for ahash_{final,finup} */ + desc = ctx->sh_desc_fin; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, + digestsize, ctx->ctx_len, 0); + dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + /* key is immediate data for INIT and INITFINAL states */ + ctx->adata.key_virt = ctx->key; + + /* shared descriptor for first invocation of ahash_update */ + desc = ctx->sh_desc_update_first; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, + ctx->ctx_len, ctx->key_dma); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + /* shared descriptor for ahash_digest */ + desc = ctx->sh_desc_digest; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, + digestsize, ctx->ctx_len, 0); + dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + return 0; +} + +static int acmac_set_sh_desc(struct crypto_ahash *ahash) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int digestsize = crypto_ahash_digestsize(ahash); + struct device *jrdev = ctx->jrdev; + u32 *desc; + + /* shared descriptor for ahash_update */ + desc = ctx->sh_desc_update; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, + ctx->ctx_len, ctx->ctx_len, 0); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + /* shared descriptor for ahash_{final,finup} */ + desc = ctx->sh_desc_fin; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, + digestsize, ctx->ctx_len, 0); + dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + /* shared descriptor for first invocation of ahash_update */ + desc = ctx->sh_desc_update_first; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, + ctx->ctx_len, 0); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + /* shared descriptor for ahash_digest */ + desc = ctx->sh_desc_digest; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, + digestsize, ctx->ctx_len, 0); + dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + return 0; +} + /* Digest hash size if it is too large */ -static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, - u32 *keylen, u8 *key_out, u32 digestsize) +static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, + u32 digestsize) { struct device *jrdev = ctx->jrdev; u32 *desc; struct split_key_result result; - dma_addr_t src_dma, dst_dma; + dma_addr_t key_dma; int ret; desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); @@ -309,18 +412,9 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, init_job_desc(desc, 0); - src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, src_dma)) { - dev_err(jrdev, "unable to map key input memory\n"); - kfree(desc); - return -ENOMEM; - } - dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, - DMA_FROM_DEVICE); - if (dma_mapping_error(jrdev, dst_dma)) { - dev_err(jrdev, "unable to map key output memory\n"); - dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); + key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL); + if (dma_mapping_error(jrdev, key_dma)) { + dev_err(jrdev, "unable to map key memory\n"); kfree(desc); return -ENOMEM; } @@ -328,16 +422,16 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, /* Job descriptor to perform unkeyed hash on key_in */ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | OP_ALG_AS_INITFINAL); - append_seq_in_ptr(desc, src_dma, *keylen, 0); + append_seq_in_ptr(desc, key_dma, *keylen, 0); append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); - append_seq_out_ptr(desc, dst_dma, digestsize, 0); + append_seq_out_ptr(desc, key_dma, digestsize, 0); append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT); #ifdef DEBUG print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); + DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -353,12 +447,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, #ifdef DEBUG print_hex_dump(KERN_ERR, "digested key@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key_in, - digestsize, 1); + DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1); #endif } - dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); - dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); + dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); *keylen = digestsize; @@ -382,13 +474,10 @@ static int ahash_setkey(struct crypto_ahash *ahash, #endif if (keylen > blocksize) { - hashed_key = kmalloc_array(digestsize, - sizeof(*hashed_key), - GFP_KERNEL | GFP_DMA); + hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); if (!hashed_key) return -ENOMEM; - ret = hash_digest_key(ctx, key, &keylen, hashed_key, - digestsize); + ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); if (ret) goto bad_free_key; key = hashed_key; @@ -423,9 +512,39 @@ static int ahash_setkey(struct crypto_ahash *ahash, return -EINVAL; } +static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, + unsigned int keylen) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct device *jrdev = ctx->jrdev; + + memcpy(ctx->key, key, keylen); + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); + ctx->adata.keylen = keylen; + + print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1); + + return axcbc_set_sh_desc(ahash); +} + +static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, + unsigned int keylen) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + + /* key is immediate data for all cmac shared descriptors */ + ctx->adata.key_virt = key; + ctx->adata.keylen = keylen; + + print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + return acmac_set_sh_desc(ahash); +} + /* * ahash_edesc - s/w-extended ahash descriptor - * @dst_dma: physical mapped address of req->result * @sec4_sg_dma: physical mapped address of h/w link table * @src_nents: number of segments in input scatterlist * @sec4_sg_bytes: length of dma mapped sec4_sg space @@ -433,7 +552,6 @@ static int ahash_setkey(struct crypto_ahash *ahash, * @sec4_sg: h/w link table */ struct ahash_edesc { - dma_addr_t dst_dma; dma_addr_t sec4_sg_dma; int src_nents; int sec4_sg_bytes; @@ -449,8 +567,6 @@ static inline void ahash_unmap(struct device *dev, if (edesc->src_nents) dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); - if (edesc->dst_dma) - dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); if (edesc->sec4_sg_bytes) dma_unmap_single(dev, edesc->sec4_sg_dma, @@ -467,12 +583,10 @@ static inline void ahash_unmap_ctx(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, int dst_len, u32 flag) { - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_state *state = ahash_request_ctx(req); if (state->ctx_dma) { - dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); + dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); state->ctx_dma = 0; } ahash_unmap(dev, edesc, req, dst_len); @@ -485,9 +599,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, struct ahash_edesc *edesc; struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); int digestsize = crypto_ahash_digestsize(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); #ifdef DEBUG struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -496,17 +610,14 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, if (err) caam_jr_strstatus(jrdev, err); - ahash_unmap(jrdev, edesc, req, digestsize); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); + memcpy(req->result, state->caam_ctx, digestsize); kfree(edesc); #ifdef DEBUG print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); - if (req->result) - print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->result, - digestsize, 1); #endif req->base.complete(&req->base, err); @@ -554,9 +665,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, struct ahash_edesc *edesc; struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); int digestsize = crypto_ahash_digestsize(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); #ifdef DEBUG struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -565,17 +676,14 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, if (err) caam_jr_strstatus(jrdev, err); - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); + memcpy(req->result, state->caam_ctx, digestsize); kfree(edesc); #ifdef DEBUG print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); - if (req->result) - print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->result, - digestsize, 1); #endif req->base.complete(&req->base, err); @@ -687,6 +795,7 @@ static int ahash_update_ctx(struct ahash_request *req) u8 *buf = current_buf(state); int *buflen = current_buflen(state); u8 *next_buf = alt_buf(state); + int blocksize = crypto_ahash_blocksize(ahash); int *next_buflen = alt_buflen(state), last_buflen; int in_len = *buflen + req->nbytes, to_hash; u32 *desc; @@ -695,9 +804,20 @@ static int ahash_update_ctx(struct ahash_request *req) int ret = 0; last_buflen = *next_buflen; - *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); + *next_buflen = in_len & (blocksize - 1); to_hash = in_len - *next_buflen; + /* + * For XCBC and CMAC, if to_hash is multiple of block size, + * keep last block in internal buffer + */ + if ((is_xcbc_aes(ctx->adata.algtype) || + is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && + (*next_buflen == 0)) { + *next_buflen = blocksize; + to_hash -= blocksize; + } + if (to_hash) { src_nents = sg_nents_for_len(req->src, req->nbytes - (*next_buflen)); @@ -745,19 +865,18 @@ static int ahash_update_ctx(struct ahash_request *req) if (ret) goto unmap_ctx; - if (mapped_nents) { + if (mapped_nents) sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg + sec4_sg_src_index, 0); - if (*next_buflen) - scatterwalk_map_and_copy(next_buf, req->src, - to_hash - *buflen, - *next_buflen, 0); - } else { + else sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); - } + if (*next_buflen) + scatterwalk_map_and_copy(next_buf, req->src, + to_hash - *buflen, + *next_buflen, 0); desc = edesc->hw_desc; edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, @@ -800,7 +919,7 @@ static int ahash_update_ctx(struct ahash_request *req) #endif return ret; - unmap_ctx: +unmap_ctx: ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); kfree(edesc); return ret; @@ -836,7 +955,7 @@ static int ahash_final_ctx(struct ahash_request *req) edesc->sec4_sg_bytes = sec4_sg_bytes; ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, - edesc->sec4_sg, DMA_TO_DEVICE); + edesc->sec4_sg, DMA_BIDIRECTIONAL); if (ret) goto unmap_ctx; @@ -856,14 +975,7 @@ static int ahash_final_ctx(struct ahash_request *req) append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, LDST_SGF); - - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, - digestsize); - if (dma_mapping_error(jrdev, edesc->dst_dma)) { - dev_err(jrdev, "unable to map dst\n"); - ret = -ENOMEM; - goto unmap_ctx; - } + append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -876,7 +988,7 @@ static int ahash_final_ctx(struct ahash_request *req) return -EINPROGRESS; unmap_ctx: - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); kfree(edesc); return ret; } @@ -930,7 +1042,7 @@ static int ahash_finup_ctx(struct ahash_request *req) edesc->src_nents = src_nents; ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, - edesc->sec4_sg, DMA_TO_DEVICE); + edesc->sec4_sg, DMA_BIDIRECTIONAL); if (ret) goto unmap_ctx; @@ -944,13 +1056,7 @@ static int ahash_finup_ctx(struct ahash_request *req) if (ret) goto unmap_ctx; - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, - digestsize); - if (dma_mapping_error(jrdev, edesc->dst_dma)) { - dev_err(jrdev, "unable to map dst\n"); - ret = -ENOMEM; - goto unmap_ctx; - } + append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -963,7 +1069,7 @@ static int ahash_finup_ctx(struct ahash_request *req) return -EINPROGRESS; unmap_ctx: - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); kfree(edesc); return ret; } @@ -1022,10 +1128,8 @@ static int ahash_digest(struct ahash_request *req) desc = edesc->hw_desc; - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, - digestsize); - if (dma_mapping_error(jrdev, edesc->dst_dma)) { - dev_err(jrdev, "unable to map dst\n"); + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); + if (ret) { ahash_unmap(jrdev, edesc, req, digestsize); kfree(edesc); return -ENOMEM; @@ -1040,7 +1144,7 @@ static int ahash_digest(struct ahash_request *req) if (!ret) { ret = -EINPROGRESS; } else { - ahash_unmap(jrdev, edesc, req, digestsize); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); kfree(edesc); } @@ -1071,20 +1175,20 @@ static int ahash_final_no_ctx(struct ahash_request *req) desc = edesc->hw_desc; - state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, state->buf_dma)) { - dev_err(jrdev, "unable to map src\n"); - goto unmap; - } + if (buflen) { + state->buf_dma = dma_map_single(jrdev, buf, buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, state->buf_dma)) { + dev_err(jrdev, "unable to map src\n"); + goto unmap; + } - append_seq_in_ptr(desc, state->buf_dma, buflen, 0); + append_seq_in_ptr(desc, state->buf_dma, buflen, 0); + } - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, - digestsize); - if (dma_mapping_error(jrdev, edesc->dst_dma)) { - dev_err(jrdev, "unable to map dst\n"); + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); + if (ret) goto unmap; - } #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -1095,7 +1199,7 @@ static int ahash_final_no_ctx(struct ahash_request *req) if (!ret) { ret = -EINPROGRESS; } else { - ahash_unmap(jrdev, edesc, req, digestsize); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); kfree(edesc); } @@ -1118,6 +1222,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) GFP_KERNEL : GFP_ATOMIC; u8 *buf = current_buf(state); int *buflen = current_buflen(state); + int blocksize = crypto_ahash_blocksize(ahash); u8 *next_buf = alt_buf(state); int *next_buflen = alt_buflen(state); int in_len = *buflen + req->nbytes, to_hash; @@ -1126,9 +1231,20 @@ static int ahash_update_no_ctx(struct ahash_request *req) u32 *desc; int ret = 0; - *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); + *next_buflen = in_len & (blocksize - 1); to_hash = in_len - *next_buflen; + /* + * For XCBC and CMAC, if to_hash is multiple of block size, + * keep last block in internal buffer + */ + if ((is_xcbc_aes(ctx->adata.algtype) || + is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && + (*next_buflen == 0)) { + *next_buflen = blocksize; + to_hash -= blocksize; + } + if (to_hash) { src_nents = sg_nents_for_len(req->src, req->nbytes - *next_buflen); @@ -1294,12 +1410,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req) goto unmap; } - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, - digestsize); - if (dma_mapping_error(jrdev, edesc->dst_dma)) { - dev_err(jrdev, "unable to map dst\n"); + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); + if (ret) goto unmap; - } #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -1310,7 +1423,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) if (!ret) { ret = -EINPROGRESS; } else { - ahash_unmap(jrdev, edesc, req, digestsize); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); kfree(edesc); } @@ -1334,15 +1447,26 @@ static int ahash_update_first(struct ahash_request *req) u8 *next_buf = alt_buf(state); int *next_buflen = alt_buflen(state); int to_hash; + int blocksize = crypto_ahash_blocksize(ahash); u32 *desc; int src_nents, mapped_nents; struct ahash_edesc *edesc; int ret = 0; - *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - - 1); + *next_buflen = req->nbytes & (blocksize - 1); to_hash = req->nbytes - *next_buflen; + /* + * For XCBC and CMAC, if to_hash is multiple of block size, + * keep last block in internal buffer + */ + if ((is_xcbc_aes(ctx->adata.algtype) || + is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && + (*next_buflen == 0)) { + *next_buflen = blocksize; + to_hash -= blocksize; + } + if (to_hash) { src_nents = sg_nents_for_len(req->src, req->nbytes - *next_buflen); @@ -1442,6 +1566,7 @@ static int ahash_init(struct ahash_request *req) state->final = ahash_final_no_ctx; state->ctx_dma = 0; + state->ctx_dma_len = 0; state->current_buf = 0; state->buf_dma = 0; state->buflen_0 = 0; @@ -1650,6 +1775,44 @@ static struct caam_hash_template driver_hash[] = { }, }, .alg_type = OP_ALG_ALGSEL_MD5, + }, { + .hmac_name = "xcbc(aes)", + .hmac_driver_name = "xcbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = axcbc_setkey, + .halg = { + .digestsize = AES_BLOCK_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC, + }, { + .hmac_name = "cmac(aes)", + .hmac_driver_name = "cmac-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = acmac_setkey, + .halg = { + .digestsize = AES_BLOCK_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC, }, }; @@ -1691,14 +1854,45 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) } priv = dev_get_drvdata(ctx->jrdev->parent); - ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + + if (is_xcbc_aes(caam_hash->alg_type)) { + ctx->dir = DMA_TO_DEVICE; + ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; + ctx->ctx_len = 48; + + ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, + ARRAY_SIZE(ctx->key), + DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { + dev_err(ctx->jrdev, "unable to map key\n"); + caam_jr_free(ctx->jrdev); + return -ENOMEM; + } + } else if (is_cmac_aes(caam_hash->alg_type)) { + ctx->dir = DMA_TO_DEVICE; + ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; + ctx->ctx_len = 32; + } else { + ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; + ctx->ctx_len = runninglen[(ctx->adata.algtype & + OP_ALG_ALGSEL_SUBMASK) >> + OP_ALG_ALGSEL_SHIFT]; + } dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, - offsetof(struct caam_hash_ctx, - sh_desc_update_dma), + offsetof(struct caam_hash_ctx, key), ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(ctx->jrdev, dma_addr)) { dev_err(ctx->jrdev, "unable to map shared descriptors\n"); + + if (is_xcbc_aes(caam_hash->alg_type)) + dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, + ARRAY_SIZE(ctx->key), + DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); + caam_jr_free(ctx->jrdev); return -ENOMEM; } @@ -1712,16 +1906,14 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, sh_desc_digest); - /* copy descriptor header template value */ - ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; - - ctx->ctx_len = runninglen[(ctx->adata.algtype & - OP_ALG_ALGSEL_SUBMASK) >> - OP_ALG_ALGSEL_SHIFT]; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct caam_hash_state)); - return ahash_set_sh_desc(ahash); + + /* + * For keyed hash algorithms shared descriptors + * will be created later in setkey() callback + */ + return alg->setkey ? 0 : ahash_set_sh_desc(ahash); } static void caam_hash_cra_exit(struct crypto_tfm *tfm) @@ -1729,9 +1921,12 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, - offsetof(struct caam_hash_ctx, - sh_desc_update_dma), + offsetof(struct caam_hash_ctx, key), ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); + if (is_xcbc_aes(ctx->adata.algtype)) + dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, + ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); caam_jr_free(ctx->jrdev); } @@ -1797,11 +1992,10 @@ static int __init caam_algapi_hash_init(void) { struct device_node *dev_node; struct platform_device *pdev; - struct device *ctrldev; int i = 0, err = 0; struct caam_drv_private *priv; unsigned int md_limit = SHA512_DIGEST_SIZE; - u32 cha_inst, cha_vid; + u32 md_inst, md_vid; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); if (!dev_node) { @@ -1816,33 +2010,45 @@ static int __init caam_algapi_hash_init(void) return -ENODEV; } - ctrldev = &pdev->dev; - priv = dev_get_drvdata(ctrldev); + priv = dev_get_drvdata(&pdev->dev); of_node_put(dev_node); /* * If priv is NULL, it's probably because the caam driver wasn't * properly initialized (e.g. RNG4 init failed). Thus, bail out here. */ - if (!priv) - return -ENODEV; + if (!priv) { + err = -ENODEV; + goto out_put_dev; + } /* * Register crypto algorithms the device supports. First, identify * presence and attributes of MD block. */ - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); + if (priv->era < 10) { + md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) & + CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & + CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + } else { + u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha); + + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + md_inst = mdha & CHA_VER_NUM_MASK; + } /* * Skip registration of any hashing algorithms if MD block * is not present. */ - if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT)) - return -ENODEV; + if (!md_inst) { + err = -ENODEV; + goto out_put_dev; + } /* Limit digest size based on LP256 */ - if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256) + if (md_vid == CHA_VER_VID_MD_LP256) md_limit = SHA256_DIGEST_SIZE; INIT_LIST_HEAD(&hash_list); @@ -1853,14 +2059,16 @@ static int __init caam_algapi_hash_init(void) struct caam_hash_template *alg = driver_hash + i; /* If MD size is not supported by device, skip registration */ - if (alg->template_ahash.halg.digestsize > md_limit) + if (is_mdha(alg->alg_type) && + alg->template_ahash.halg.digestsize > md_limit) continue; /* register hmac version */ t_alg = caam_hash_alloc(alg, true); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); - pr_warn("%s alg allocation failed\n", alg->driver_name); + pr_warn("%s alg allocation failed\n", + alg->hmac_driver_name); continue; } @@ -1873,6 +2081,9 @@ static int __init caam_algapi_hash_init(void) } else list_add_tail(&t_alg->entry, &hash_list); + if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES) + continue; + /* register unkeyed version */ t_alg = caam_hash_alloc(alg, false); if (IS_ERR(t_alg)) { @@ -1891,6 +2102,8 @@ static int __init caam_algapi_hash_init(void) list_add_tail(&t_alg->entry, &hash_list); } +out_put_dev: + put_device(&pdev->dev); return err; } diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c index a12f7959a2c3..71d018343ee4 100644 --- a/drivers/crypto/caam/caamhash_desc.c +++ b/drivers/crypto/caam/caamhash_desc.c @@ -2,7 +2,7 @@ /* * Shared descriptors for ahash algorithms * - * Copyright 2017 NXP + * Copyright 2017-2019 NXP */ #include "compat.h" @@ -75,6 +75,72 @@ void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, } EXPORT_SYMBOL(cnstr_shdsc_ahash); +/** + * cnstr_shdsc_sk_hash - shared descriptor for symmetric key cipher-based + * hash algorithms + * @desc: pointer to buffer used for descriptor construction + * @adata: pointer to authentication transform definitions. + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE} + * @digestsize: algorithm's digest size + * @ctx_len: size of Context Register + * @key_dma: I/O Virtual Address of the key + */ +void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, + int digestsize, int ctx_len, dma_addr_t key_dma) +{ + u32 *skip_key_load; + + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + + /* Skip loading of key, context if already shared */ + skip_key_load = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); + + if (state == OP_ALG_AS_INIT || state == OP_ALG_AS_INITFINAL) { + append_key_as_imm(desc, adata->key_virt, adata->keylen, + adata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + } else { /* UPDATE, FINALIZE */ + if (is_xcbc_aes(adata->algtype)) + /* Load K1 */ + append_key(desc, adata->key_dma, adata->keylen, + CLASS_1 | KEY_DEST_CLASS_REG | KEY_ENC); + else /* CMAC */ + append_key_as_imm(desc, adata->key_virt, adata->keylen, + adata->keylen, CLASS_1 | + KEY_DEST_CLASS_REG); + /* Restore context */ + append_seq_load(desc, ctx_len, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT); + } + + set_jump_tgt_here(desc, skip_key_load); + + /* Class 1 operation */ + append_operation(desc, adata->algtype | state | OP_ALG_ENCRYPT); + + /* + * Load from buf and/or src and write to req->result or state->context + * Calculate remaining bytes to read + */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* Read remaining bytes */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_LAST1 | + FIFOLD_TYPE_MSG | FIFOLDST_VLF); + + /* + * Save context: + * - xcbc: partial hash, keys K2 and K3 + * - cmac: partial hash, constant L = E(K,0) + */ + append_seq_store(desc, digestsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT); + if (is_xcbc_aes(adata->algtype) && state == OP_ALG_AS_INIT) + /* Save K1 */ + append_fifo_store(desc, key_dma, adata->keylen, + LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK); +} +EXPORT_SYMBOL(cnstr_shdsc_sk_hash); + MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("FSL CAAM ahash descriptors support"); MODULE_AUTHOR("NXP Semiconductors"); diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h index 631fc1ac312c..6947ee1f200c 100644 --- a/drivers/crypto/caam/caamhash_desc.h +++ b/drivers/crypto/caam/caamhash_desc.h @@ -15,7 +15,15 @@ #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) +static inline bool is_xcbc_aes(u32 algtype) +{ + return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) == + (OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC); +} + void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, int digestsize, int ctx_len, bool import_ctx, int era); +void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, + int digestsize, int ctx_len, dma_addr_t key_dma); #endif /* _CAAMHASH_DESC_H_ */ diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 4fc209cbbeab..58285642306e 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -3,6 +3,7 @@ * caam - Freescale FSL CAAM support for Public Key Cryptography * * Copyright 2016 Freescale Semiconductor, Inc. + * Copyright 2018 NXP * * There is no Shared Descriptor for PKC so that the Job Descriptor must carry * all the desired key parameters, input and output pointers. @@ -1017,7 +1018,7 @@ static int __init caam_pkc_init(void) struct platform_device *pdev; struct device *ctrldev; struct caam_drv_private *priv; - u32 cha_inst, pk_inst; + u32 pk_inst; int err; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); @@ -1041,16 +1042,23 @@ static int __init caam_pkc_init(void) * If priv is NULL, it's probably because the caam driver wasn't * properly initialized (e.g. RNG4 init failed). Thus, bail out here. */ - if (!priv) - return -ENODEV; + if (!priv) { + err = -ENODEV; + goto out_put_dev; + } /* Determine public key hardware accelerator presence. */ - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); - pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; + if (priv->era < 10) + pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & + CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; + else + pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK; /* Do not register algorithms if PKHA is not present. */ - if (!pk_inst) - return -ENODEV; + if (!pk_inst) { + err = -ENODEV; + goto out_put_dev; + } err = crypto_register_akcipher(&caam_rsa); if (err) @@ -1059,6 +1067,8 @@ static int __init caam_pkc_init(void) else dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); +out_put_dev: + put_device(ctrldev); return err; } diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 4318b0aa6fb9..95eb5402c59f 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -3,6 +3,7 @@ * caam - Freescale FSL CAAM support for hw_random * * Copyright 2011 Freescale Semiconductor, Inc. + * Copyright 2018 NXP * * Based on caamalg.c crypto API driver. * @@ -307,8 +308,8 @@ static int __init caam_rng_init(void) struct device *dev; struct device_node *dev_node; struct platform_device *pdev; - struct device *ctrldev; struct caam_drv_private *priv; + u32 rng_inst; int err; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); @@ -324,25 +325,35 @@ static int __init caam_rng_init(void) return -ENODEV; } - ctrldev = &pdev->dev; - priv = dev_get_drvdata(ctrldev); + priv = dev_get_drvdata(&pdev->dev); of_node_put(dev_node); /* * If priv is NULL, it's probably because the caam driver wasn't * properly initialized (e.g. RNG4 init failed). Thus, bail out here. */ - if (!priv) - return -ENODEV; + if (!priv) { + err = -ENODEV; + goto out_put_dev; + } /* Check for an instantiated RNG before registration */ - if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK)) - return -ENODEV; + if (priv->era < 10) + rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; + else + rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK; + + if (!rng_inst) { + err = -ENODEV; + goto out_put_dev; + } dev = caam_jr_alloc(); if (IS_ERR(dev)) { pr_err("Job Ring Device allocation for transform failed\n"); - return PTR_ERR(dev); + err = PTR_ERR(dev); + goto out_put_dev; } rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL); if (!rng_ctx) { @@ -353,6 +364,7 @@ static int __init caam_rng_init(void) if (err) goto free_rng_ctx; + put_device(&pdev->dev); dev_info(dev, "registering rng-caam\n"); return hwrng_register(&caam_rng); @@ -360,6 +372,8 @@ free_rng_ctx: kfree(rng_ctx); free_caam_alloc: caam_jr_free(dev); +out_put_dev: + put_device(&pdev->dev); return err; } diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 9604ff7a335e..8639b2df0371 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h @@ -36,11 +36,14 @@ #include <crypto/gcm.h> #include <crypto/sha.h> #include <crypto/md5.h> +#include <crypto/chacha.h> +#include <crypto/poly1305.h> #include <crypto/internal/aead.h> #include <crypto/authenc.h> #include <crypto/akcipher.h> #include <crypto/scatterwalk.h> #include <crypto/skcipher.h> +#include <crypto/arc4.h> #include <crypto/internal/skcipher.h> #include <crypto/internal/hash.h> #include <crypto/internal/rsa.h> diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 3fc793193821..858bdc9ab4a3 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -3,6 +3,7 @@ * Controller-level driver, kernel property detection, initialization * * Copyright 2008-2012 Freescale Semiconductor, Inc. + * Copyright 2018 NXP */ #include <linux/device.h> @@ -17,12 +18,8 @@ #include "desc_constr.h" #include "ctrl.h" -bool caam_little_end; -EXPORT_SYMBOL(caam_little_end); bool caam_dpaa2; EXPORT_SYMBOL(caam_dpaa2); -bool caam_imx; -EXPORT_SYMBOL(caam_imx); #ifdef CONFIG_CAAM_QI #include "qi.h" @@ -106,7 +103,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl; struct caam_deco __iomem *deco = ctrlpriv->deco; unsigned int timeout = 100000; - u32 deco_dbg_reg, flags; + u32 deco_dbg_reg, deco_state, flags; int i; @@ -149,13 +146,22 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, timeout = 10000000; do { deco_dbg_reg = rd_reg32(&deco->desc_dbg); + + if (ctrlpriv->era < 10) + deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >> + DESC_DBG_DECO_STAT_SHIFT; + else + deco_state = (rd_reg32(&deco->dbg_exec) & + DESC_DER_DECO_STAT_MASK) >> + DESC_DER_DECO_STAT_SHIFT; + /* * If an error occured in the descriptor, then * the DECO status field will be set to 0x0D */ - if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) == - DESC_DBG_DECO_STAT_HOST_ERR) + if (deco_state == DECO_STAT_HOST_ERR) break; + cpu_relax(); } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout); @@ -491,7 +497,7 @@ static int caam_probe(struct platform_device *pdev) struct caam_perfmon *perfmon; #endif u32 scfgr, comp_params; - u32 cha_vid_ls; + u8 rng_vid; int pg_size; int BLOCK_OFFSET = 0; @@ -733,15 +739,19 @@ static int caam_probe(struct platform_device *pdev) goto caam_remove; } - cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls); + if (ctrlpriv->era < 10) + rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) & + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; + else + rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >> + CHA_VER_VID_SHIFT; /* * If SEC has RNG version >= 4 and RNG state handle has not been * already instantiated, do RNG instantiation * In case of SoCs with Management Complex, RNG is managed by MC f/w. */ - if (!ctrlpriv->mc_en && - (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { + if (!ctrlpriv->mc_en && rng_vid >= 4) { ctrlpriv->rng4_sh_init = rd_reg32(&ctrl->r4tst[0].rdsta); /* @@ -849,27 +859,18 @@ static int caam_probe(struct platform_device *pdev) /* Internal covering keys (useful in non-secure mode only) */ ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0]; ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32); - ctrlpriv->ctl_kek = debugfs_create_blob("kek", - S_IRUSR | - S_IRGRP | S_IROTH, - ctrlpriv->ctl, - &ctrlpriv->ctl_kek_wrap); + debugfs_create_blob("kek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl, + &ctrlpriv->ctl_kek_wrap); ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0]; ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32); - ctrlpriv->ctl_tkek = debugfs_create_blob("tkek", - S_IRUSR | - S_IRGRP | S_IROTH, - ctrlpriv->ctl, - &ctrlpriv->ctl_tkek_wrap); + debugfs_create_blob("tkek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl, + &ctrlpriv->ctl_tkek_wrap); ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0]; ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32); - ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk", - S_IRUSR | - S_IRGRP | S_IROTH, - ctrlpriv->ctl, - &ctrlpriv->ctl_tdsk_wrap); + debugfs_create_blob("tdsk", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl, + &ctrlpriv->ctl_tdsk_wrap); #endif return 0; diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index f76ff160a02c..4b6854bf896a 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h @@ -4,6 +4,7 @@ * Definitions to support CAAM descriptor instruction generation * * Copyright 2008-2011 Freescale Semiconductor, Inc. + * Copyright 2018 NXP */ #ifndef DESC_H @@ -242,6 +243,7 @@ #define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT) #define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT) #define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_INFO_FIFO_SM (0x71 << LDST_SRCDST_SHIFT) #define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT) /* Offset in source/destination */ @@ -284,6 +286,12 @@ #define LDLEN_SET_OFIFO_OFFSET_SHIFT 0 #define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT) +/* Special Length definitions when dst=sm, nfifo-{sm,m} */ +#define LDLEN_MATH0 0 +#define LDLEN_MATH1 1 +#define LDLEN_MATH2 2 +#define LDLEN_MATH3 3 + /* * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE * Command Constructs @@ -408,6 +416,7 @@ #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT) #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT) /* @@ -1133,6 +1142,12 @@ #define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT) #define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT) +/* version register fields */ +#define OP_VER_CCHA_NUM 0x000000ff /* Number CCHAs instantiated */ +#define OP_VER_CCHA_MISC 0x0000ff00 /* CCHA Miscellaneous Information */ +#define OP_VER_CCHA_REV 0x00ff0000 /* CCHA Revision Number */ +#define OP_VER_CCHA_VID 0xff000000 /* CCHA Version ID */ + #define OP_ALG_ALGSEL_SHIFT 16 #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT) @@ -1140,6 +1155,7 @@ #define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) @@ -1152,6 +1168,8 @@ #define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_CHACHA20 (0xD0 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_POLY1305 (0xE0 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_AAI_SHIFT 4 #define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT) @@ -1199,6 +1217,11 @@ #define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT) #define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT) +/* Chacha20 AAI set */ +#define OP_ALG_AAI_AEAD (0x002 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_KEYSTREAM (0x001 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_BC8 (0x008 << OP_ALG_AAI_SHIFT) + /* hmac/smac AAI set */ #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT) #define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT) @@ -1387,6 +1410,7 @@ #define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT) #define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT) #define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT) +#define MOVE_SRC_AUX_ABLK (0x0a << MOVE_SRC_SHIFT) #define MOVE_DEST_SHIFT 16 #define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT) @@ -1413,6 +1437,10 @@ #define MOVELEN_MRSEL_SHIFT 0 #define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT) +#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT) +#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT) +#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT) +#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT) /* * MATH Command Constructs @@ -1589,6 +1617,7 @@ #define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT) #define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT) #define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_POLY (0xB << NFIFOENTRY_DTYPE_SHIFT) #define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT) #define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT) diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index d4256fa4a1d6..2980b8ef1fb1 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h @@ -189,6 +189,7 @@ static inline u32 *append_##cmd(u32 * const desc, u32 options) \ } APPEND_CMD_RET(jump, JUMP) APPEND_CMD_RET(move, MOVE) +APPEND_CMD_RET(move_len, MOVE_LEN) static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd) { @@ -327,7 +328,11 @@ static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \ u32 options) \ { \ PRINT_POS; \ - append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \ + if (options & LDST_LEN_MASK) \ + append_cmd(desc, CMD_##op | IMMEDIATE | options); \ + else \ + append_cmd(desc, CMD_##op | IMMEDIATE | options | \ + sizeof(type)); \ append_cmd(desc, immediate); \ } APPEND_CMD_RAW_IMM(load, LOAD, u32); diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 7e8d690f2827..21a70fd32f5d 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c @@ -50,6 +50,12 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, #endif /* DEBUG */ EXPORT_SYMBOL(caam_dump_sg); +bool caam_little_end; +EXPORT_SYMBOL(caam_little_end); + +bool caam_imx; +EXPORT_SYMBOL(caam_imx); + static const struct { u8 value; const char *error_text; diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h index 67ea94079837..8c6b83e02a70 100644 --- a/drivers/crypto/caam/error.h +++ b/drivers/crypto/caam/error.h @@ -7,6 +7,9 @@ #ifndef CAAM_ERROR_H #define CAAM_ERROR_H + +#include "desc.h" + #define CAAM_ERROR_STR_MAX 302 void caam_strstatus(struct device *dev, u32 status, bool qi_v2); @@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2); void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, int rowsize, int groupsize, struct scatterlist *sg, size_t tlen, bool ascii); + +static inline bool is_mdha(u32 algtype) +{ + return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) == + OP_ALG_CHA_MDHA; +} #endif /* CAAM_ERROR_H */ diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index babc78abd155..5869ad58d497 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -106,7 +106,6 @@ struct caam_drv_private { struct dentry *dfs_root; struct dentry *ctl; /* controller dir */ struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap; - struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk; #endif }; diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index 312b5f042f31..8d0713fae6ac 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c @@ -48,7 +48,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, { u32 *desc; struct split_key_result result; - dma_addr_t dma_addr_in, dma_addr_out; + dma_addr_t dma_addr; int ret = -ENOMEM; adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK); @@ -71,22 +71,17 @@ int gen_split_key(struct device *jrdev, u8 *key_out, return ret; } - dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen, - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, dma_addr_in)) { - dev_err(jrdev, "unable to map key input memory\n"); - goto out_free; - } + memcpy(key_out, key_in, keylen); - dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad, - DMA_FROM_DEVICE); - if (dma_mapping_error(jrdev, dma_addr_out)) { - dev_err(jrdev, "unable to map key output memory\n"); - goto out_unmap_in; + dma_addr = dma_map_single(jrdev, key_out, adata->keylen_pad, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(jrdev, dma_addr)) { + dev_err(jrdev, "unable to map key memory\n"); + goto out_free; } init_job_desc(desc, 0); - append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); + append_key(desc, dma_addr, keylen, CLASS_2 | KEY_DEST_CLASS_REG); /* Sets MDHA up into an HMAC-INIT */ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) | @@ -104,12 +99,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, * FIFO_STORE with the explicit split-key content store * (0x26 output type) */ - append_fifo_store(desc, dma_addr_out, adata->keylen, + append_fifo_store(desc, dma_addr, adata->keylen, LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); #ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -129,10 +122,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, #endif } - dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad, - DMA_FROM_DEVICE); -out_unmap_in: - dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); + dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL); out_free: kfree(desc); return ret; diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index b84e6c8b1e13..7cb8b1755e57 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -318,7 +318,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc) /* Create a new req FQ in parked state */ new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq, drv_ctx->context_a, 0); - if (unlikely(IS_ERR_OR_NULL(new_fq))) { + if (IS_ERR_OR_NULL(new_fq)) { dev_err(qidev, "FQ allocation for shdesc update failed\n"); return PTR_ERR(new_fq); } @@ -431,7 +431,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, /* Attach request FQ */ drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc, QMAN_INITFQ_FLAG_SCHED); - if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) { + if (IS_ERR_OR_NULL(drv_ctx->req_fq)) { dev_err(qidev, "create_caam_req_fq failed\n"); dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL); kfree(drv_ctx); diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 457815f965c0..3cd0822ea819 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -3,6 +3,7 @@ * CAAM hardware register-level view * * Copyright 2008-2011 Freescale Semiconductor, Inc. + * Copyright 2018 NXP */ #ifndef REGS_H @@ -211,6 +212,47 @@ struct jr_outentry { u32 jrstatus; /* Status for completed descriptor */ } __packed; +/* Version registers (Era 10+) e80-eff */ +struct version_regs { + u32 crca; /* CRCA_VERSION */ + u32 afha; /* AFHA_VERSION */ + u32 kfha; /* KFHA_VERSION */ + u32 pkha; /* PKHA_VERSION */ + u32 aesa; /* AESA_VERSION */ + u32 mdha; /* MDHA_VERSION */ + u32 desa; /* DESA_VERSION */ + u32 snw8a; /* SNW8A_VERSION */ + u32 snw9a; /* SNW9A_VERSION */ + u32 zuce; /* ZUCE_VERSION */ + u32 zuca; /* ZUCA_VERSION */ + u32 ccha; /* CCHA_VERSION */ + u32 ptha; /* PTHA_VERSION */ + u32 rng; /* RNG_VERSION */ + u32 trng; /* TRNG_VERSION */ + u32 aaha; /* AAHA_VERSION */ + u32 rsvd[10]; + u32 sr; /* SR_VERSION */ + u32 dma; /* DMA_VERSION */ + u32 ai; /* AI_VERSION */ + u32 qi; /* QI_VERSION */ + u32 jr; /* JR_VERSION */ + u32 deco; /* DECO_VERSION */ +}; + +/* Version registers bitfields */ + +/* Number of CHAs instantiated */ +#define CHA_VER_NUM_MASK 0xffull +/* CHA Miscellaneous Information */ +#define CHA_VER_MISC_SHIFT 8 +#define CHA_VER_MISC_MASK (0xffull << CHA_VER_MISC_SHIFT) +/* CHA Revision Number */ +#define CHA_VER_REV_SHIFT 16 +#define CHA_VER_REV_MASK (0xffull << CHA_VER_REV_SHIFT) +/* CHA Version ID */ +#define CHA_VER_VID_SHIFT 24 +#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT) + /* * caam_perfmon - Performance Monitor/Secure Memory Status/ * CAAM Global Status/Component Version IDs @@ -223,15 +265,13 @@ struct jr_outentry { #define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT) /* - * CHA version IDs / instantiation bitfields + * CHA version IDs / instantiation bitfields (< Era 10) * Defined for use with the cha_id fields in perfmon, but the same shift/mask * selectors can be used to pull out the number of instantiated blocks within * cha_num fields in perfmon because the locations are the same. */ #define CHA_ID_LS_AES_SHIFT 0 #define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT) -#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT) -#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT) #define CHA_ID_LS_DES_SHIFT 4 #define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT) @@ -241,9 +281,6 @@ struct jr_outentry { #define CHA_ID_LS_MD_SHIFT 12 #define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT) -#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT) -#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT) -#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT) #define CHA_ID_LS_RNG_SHIFT 16 #define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT) @@ -269,6 +306,13 @@ struct jr_outentry { #define CHA_ID_MS_JR_SHIFT 28 #define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT) +/* Specific CHA version IDs */ +#define CHA_VER_VID_AES_LP 0x3ull +#define CHA_VER_VID_AES_HP 0x4ull +#define CHA_VER_VID_MD_LP256 0x0ull +#define CHA_VER_VID_MD_LP512 0x1ull +#define CHA_VER_VID_MD_HP 0x2ull + struct sec_vid { u16 ip_id; u8 maj_rev; @@ -479,8 +523,10 @@ struct caam_ctrl { struct rng4tst r4tst[2]; }; - u32 rsvd9[448]; + u32 rsvd9[416]; + /* Version registers - introduced with era 10 e80-eff */ + struct version_regs vreg; /* Performance Monitor f00-fff */ struct caam_perfmon perfmon; }; @@ -570,8 +616,10 @@ struct caam_job_ring { u32 rsvd11; u32 jrcommand; /* JRCRx - JobR command */ - u32 rsvd12[932]; + u32 rsvd12[900]; + /* Version registers - introduced with era 10 e80-eff */ + struct version_regs vreg; /* Performance Monitor f00-fff */ struct caam_perfmon perfmon; }; @@ -878,13 +926,19 @@ struct caam_deco { u32 rsvd29[48]; u32 descbuf[64]; /* DxDESB - Descriptor buffer */ u32 rscvd30[193]; -#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000 #define DESC_DBG_DECO_STAT_VALID 0x80000000 #define DESC_DBG_DECO_STAT_MASK 0x00F00000 +#define DESC_DBG_DECO_STAT_SHIFT 20 u32 desc_dbg; /* DxDDR - DECO Debug Register */ - u32 rsvd31[126]; + u32 rsvd31[13]; +#define DESC_DER_DECO_STAT_MASK 0x000F0000 +#define DESC_DER_DECO_STAT_SHIFT 16 + u32 dbg_exec; /* DxDER - DECO Debug Exec Register */ + u32 rsvd32[112]; }; +#define DECO_STAT_HOST_ERR 0xD + #define DECO_JQCR_WHL 0x20000000 #define DECO_JQCR_FOUR 0x10000000 diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c index 06ad85ab5e86..a876535529d1 100644 --- a/drivers/crypto/cavium/cpt/cptpf_main.c +++ b/drivers/crypto/cavium/cpt/cptpf_main.c @@ -278,8 +278,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) mcode->num_cores = is_ae ? 6 : 10; /* Allocate DMAable space */ - mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size, - &mcode->phys_base, GFP_KERNEL); + mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size, + &mcode->phys_base, GFP_KERNEL); if (!mcode->code) { dev_err(dev, "Unable to allocate space for microcode"); ret = -ENOMEM; diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c index 5c796ed55eba..2ca431ed1db8 100644 --- a/drivers/crypto/cavium/cpt/cptvf_main.c +++ b/drivers/crypto/cavium/cpt/cptvf_main.c @@ -236,9 +236,10 @@ static int alloc_command_queues(struct cpt_vf *cptvf, c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : rem_q_size; - curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev, - c_size + CPT_NEXT_CHUNK_PTR_SIZE, - &curr->dma_addr, GFP_KERNEL); + curr->head = (u8 *)dma_alloc_coherent(&pdev->dev, + c_size + CPT_NEXT_CHUNK_PTR_SIZE, + &curr->dma_addr, + GFP_KERNEL); if (!curr->head) { dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", i, queue->nchunks); diff --git a/drivers/crypto/cavium/nitrox/Makefile b/drivers/crypto/cavium/nitrox/Makefile index e12954791673..f83991aaf820 100644 --- a/drivers/crypto/cavium/nitrox/Makefile +++ b/drivers/crypto/cavium/nitrox/Makefile @@ -6,7 +6,10 @@ n5pf-objs := nitrox_main.o \ nitrox_lib.o \ nitrox_hal.o \ nitrox_reqmgr.o \ - nitrox_algs.o + nitrox_algs.o \ + nitrox_mbx.o \ + nitrox_skcipher.o \ + nitrox_aead.o n5pf-$(CONFIG_PCI_IOV) += nitrox_sriov.o n5pf-$(CONFIG_DEBUG_FS) += nitrox_debugfs.o diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c new file mode 100644 index 000000000000..4f43eacd2557 --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/kernel.h> +#include <linux/printk.h> +#include <linux/crypto.h> +#include <linux/rtnetlink.h> + +#include <crypto/aead.h> +#include <crypto/authenc.h> +#include <crypto/des.h> +#include <crypto/sha.h> +#include <crypto/internal/aead.h> +#include <crypto/scatterwalk.h> +#include <crypto/gcm.h> + +#include "nitrox_dev.h" +#include "nitrox_common.h" +#include "nitrox_req.h" + +#define GCM_AES_SALT_SIZE 4 + +/** + * struct nitrox_crypt_params - Params to set nitrox crypto request. + * @cryptlen: Encryption/Decryption data length + * @authlen: Assoc data length + Cryptlen + * @srclen: Input buffer length + * @dstlen: Output buffer length + * @iv: IV data + * @ivsize: IV data length + * @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT) + */ +struct nitrox_crypt_params { + unsigned int cryptlen; + unsigned int authlen; + unsigned int srclen; + unsigned int dstlen; + u8 *iv; + int ivsize; + u8 ctrl_arg; +}; + +union gph_p3 { + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u16 iv_offset : 8; + u16 auth_offset : 8; +#else + u16 auth_offset : 8; + u16 iv_offset : 8; +#endif + }; + u16 param; +}; + +static int nitrox_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + int aes_keylen; + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + struct flexi_crypto_context *fctx; + union fc_ctx_flags flags; + + aes_keylen = flexi_aes_keylen(keylen); + if (aes_keylen < 0) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + /* fill crypto context */ + fctx = nctx->u.fctx; + flags.f = be64_to_cpu(fctx->flags.f); + flags.w0.aes_keylen = aes_keylen; + fctx->flags.f = cpu_to_be64(flags.f); + + /* copy enc key to context */ + memset(&fctx->crypto, 0, sizeof(fctx->crypto)); + memcpy(fctx->crypto.u.key, key, keylen); + + return 0; +} + +static int nitrox_aead_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + struct flexi_crypto_context *fctx = nctx->u.fctx; + union fc_ctx_flags flags; + + flags.f = be64_to_cpu(fctx->flags.f); + flags.w0.mac_len = authsize; + fctx->flags.f = cpu_to_be64(flags.f); + + aead->authsize = authsize; + + return 0; +} + +static int alloc_src_sglist(struct aead_request *areq, char *iv, int ivsize, + int buflen) +{ + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); + int nents = sg_nents_for_len(areq->src, buflen) + 1; + int ret; + + if (nents < 0) + return nents; + + /* Allocate buffer to hold IV and input scatterlist array */ + ret = alloc_src_req_buf(nkreq, nents, ivsize); + if (ret) + return ret; + + nitrox_creq_copy_iv(nkreq->src, iv, ivsize); + nitrox_creq_set_src_sg(nkreq, nents, ivsize, areq->src, buflen); + + return 0; +} + +static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen) +{ + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); + int nents = sg_nents_for_len(areq->dst, buflen) + 3; + int ret; + + if (nents < 0) + return nents; + + /* Allocate buffer to hold ORH, COMPLETION and output scatterlist + * array + */ + ret = alloc_dst_req_buf(nkreq, nents); + if (ret) + return ret; + + nitrox_creq_set_orh(nkreq); + nitrox_creq_set_comp(nkreq); + nitrox_creq_set_dst_sg(nkreq, nents, ivsize, areq->dst, buflen); + + return 0; +} + +static void free_src_sglist(struct aead_request *areq) +{ + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); + + kfree(nkreq->src); +} + +static void free_dst_sglist(struct aead_request *areq) +{ + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); + + kfree(nkreq->dst); +} + +static int nitrox_set_creq(struct aead_request *areq, + struct nitrox_crypt_params *params) +{ + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); + struct se_crypto_request *creq = &nkreq->creq; + struct crypto_aead *aead = crypto_aead_reqtfm(areq); + union gph_p3 param3; + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + int ret; + + creq->flags = areq->base.flags; + creq->gfp = (areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + + creq->ctrl.value = 0; + creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC; + creq->ctrl.s.arg = params->ctrl_arg; + + creq->gph.param0 = cpu_to_be16(params->cryptlen); + creq->gph.param1 = cpu_to_be16(params->authlen); + creq->gph.param2 = cpu_to_be16(params->ivsize + areq->assoclen); + param3.iv_offset = 0; + param3.auth_offset = params->ivsize; + creq->gph.param3 = cpu_to_be16(param3.param); + + creq->ctx_handle = nctx->u.ctx_handle; + creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context); + + ret = alloc_src_sglist(areq, params->iv, params->ivsize, + params->srclen); + if (ret) + return ret; + + ret = alloc_dst_sglist(areq, params->ivsize, params->dstlen); + if (ret) { + free_src_sglist(areq); + return ret; + } + + return 0; +} + +static void nitrox_aead_callback(void *arg, int err) +{ + struct aead_request *areq = arg; + + free_src_sglist(areq); + free_dst_sglist(areq); + if (err) { + pr_err_ratelimited("request failed status 0x%0x\n", err); + err = -EINVAL; + } + + areq->base.complete(&areq->base, err); +} + +static int nitrox_aes_gcm_enc(struct aead_request *areq) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(areq); + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); + struct se_crypto_request *creq = &nkreq->creq; + struct flexi_crypto_context *fctx = nctx->u.fctx; + struct nitrox_crypt_params params; + int ret; + + memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE); + + memset(¶ms, 0, sizeof(params)); + params.cryptlen = areq->cryptlen; + params.authlen = areq->assoclen + params.cryptlen; + params.srclen = params.authlen; + params.dstlen = params.srclen + aead->authsize; + params.iv = &areq->iv[GCM_AES_SALT_SIZE]; + params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE; + params.ctrl_arg = ENCRYPT; + ret = nitrox_set_creq(areq, ¶ms); + if (ret) + return ret; + + /* send the crypto request */ + return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback, + areq); +} + +static int nitrox_aes_gcm_dec(struct aead_request *areq) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(areq); + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); + struct se_crypto_request *creq = &nkreq->creq; + struct flexi_crypto_context *fctx = nctx->u.fctx; + struct nitrox_crypt_params params; + int ret; + + memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE); + + memset(¶ms, 0, sizeof(params)); + params.cryptlen = areq->cryptlen - aead->authsize; + params.authlen = areq->assoclen + params.cryptlen; + params.srclen = areq->cryptlen + areq->assoclen; + params.dstlen = params.srclen - aead->authsize; + params.iv = &areq->iv[GCM_AES_SALT_SIZE]; + params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE; + params.ctrl_arg = DECRYPT; + ret = nitrox_set_creq(areq, ¶ms); + if (ret) + return ret; + + /* send the crypto request */ + return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback, + areq); +} + +static int nitrox_aead_init(struct crypto_aead *aead) +{ + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + struct crypto_ctx_hdr *chdr; + + /* get the first device */ + nctx->ndev = nitrox_get_first_device(); + if (!nctx->ndev) + return -ENODEV; + + /* allocate nitrox crypto context */ + chdr = crypto_alloc_context(nctx->ndev); + if (!chdr) { + nitrox_put_device(nctx->ndev); + return -ENOMEM; + } + nctx->chdr = chdr; + nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr + + sizeof(struct ctx_hdr)); + nctx->u.fctx->flags.f = 0; + + return 0; +} + +static int nitrox_aes_gcm_init(struct crypto_aead *aead) +{ + int ret; + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + union fc_ctx_flags *flags; + + ret = nitrox_aead_init(aead); + if (ret) + return ret; + + flags = &nctx->u.fctx->flags; + flags->w0.cipher_type = CIPHER_AES_GCM; + flags->w0.hash_type = AUTH_NULL; + flags->w0.iv_source = IV_FROM_DPTR; + /* ask microcode to calculate ipad/opad */ + flags->w0.auth_input_type = 1; + flags->f = be64_to_cpu(flags->f); + + crypto_aead_set_reqsize(aead, sizeof(struct aead_request) + + sizeof(struct nitrox_kcrypt_request)); + + return 0; +} + +static void nitrox_aead_exit(struct crypto_aead *aead) +{ + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + + /* free the nitrox crypto context */ + if (nctx->u.ctx_handle) { + struct flexi_crypto_context *fctx = nctx->u.fctx; + + memzero_explicit(&fctx->crypto, sizeof(struct crypto_keys)); + memzero_explicit(&fctx->auth, sizeof(struct auth_keys)); + crypto_free_context((void *)nctx->chdr); + } + nitrox_put_device(nctx->ndev); + + nctx->u.ctx_handle = 0; + nctx->ndev = NULL; +} + +static struct aead_alg nitrox_aeads[] = { { + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "n5_aes_gcm", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .setkey = nitrox_aes_gcm_setkey, + .setauthsize = nitrox_aead_setauthsize, + .encrypt = nitrox_aes_gcm_enc, + .decrypt = nitrox_aes_gcm_dec, + .init = nitrox_aes_gcm_init, + .exit = nitrox_aead_exit, + .ivsize = GCM_AES_IV_SIZE, + .maxauthsize = AES_BLOCK_SIZE, +} }; + +int nitrox_register_aeads(void) +{ + return crypto_register_aeads(nitrox_aeads, ARRAY_SIZE(nitrox_aeads)); +} + +void nitrox_unregister_aeads(void) +{ + crypto_unregister_aeads(nitrox_aeads, ARRAY_SIZE(nitrox_aeads)); +} diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c index 2ae6124e5da6..d646ae5f29b0 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_algs.c +++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c @@ -1,458 +1,24 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/crypto.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/printk.h> - -#include <crypto/aes.h> -#include <crypto/skcipher.h> -#include <crypto/ctr.h> -#include <crypto/des.h> -#include <crypto/xts.h> - -#include "nitrox_dev.h" #include "nitrox_common.h" -#include "nitrox_req.h" - -#define PRIO 4001 - -struct nitrox_cipher { - const char *name; - enum flexi_cipher value; -}; - -/** - * supported cipher list - */ -static const struct nitrox_cipher flexi_cipher_table[] = { - { "null", CIPHER_NULL }, - { "cbc(des3_ede)", CIPHER_3DES_CBC }, - { "ecb(des3_ede)", CIPHER_3DES_ECB }, - { "cbc(aes)", CIPHER_AES_CBC }, - { "ecb(aes)", CIPHER_AES_ECB }, - { "cfb(aes)", CIPHER_AES_CFB }, - { "rfc3686(ctr(aes))", CIPHER_AES_CTR }, - { "xts(aes)", CIPHER_AES_XTS }, - { "cts(cbc(aes))", CIPHER_AES_CBC_CTS }, - { NULL, CIPHER_INVALID } -}; - -static enum flexi_cipher flexi_cipher_type(const char *name) -{ - const struct nitrox_cipher *cipher = flexi_cipher_table; - - while (cipher->name) { - if (!strcmp(cipher->name, name)) - break; - cipher++; - } - return cipher->value; -} - -static int flexi_aes_keylen(int keylen) -{ - int aes_keylen; - - switch (keylen) { - case AES_KEYSIZE_128: - aes_keylen = 1; - break; - case AES_KEYSIZE_192: - aes_keylen = 2; - break; - case AES_KEYSIZE_256: - aes_keylen = 3; - break; - default: - aes_keylen = -EINVAL; - break; - } - return aes_keylen; -} - -static int nitrox_skcipher_init(struct crypto_skcipher *tfm) -{ - struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); - void *fctx; - - /* get the first device */ - nctx->ndev = nitrox_get_first_device(); - if (!nctx->ndev) - return -ENODEV; - - /* allocate nitrox crypto context */ - fctx = crypto_alloc_context(nctx->ndev); - if (!fctx) { - nitrox_put_device(nctx->ndev); - return -ENOMEM; - } - nctx->u.ctx_handle = (uintptr_t)fctx; - crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) + - sizeof(struct nitrox_kcrypt_request)); - return 0; -} - -static void nitrox_skcipher_exit(struct crypto_skcipher *tfm) -{ - struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); - - /* free the nitrox crypto context */ - if (nctx->u.ctx_handle) { - struct flexi_crypto_context *fctx = nctx->u.fctx; - - memset(&fctx->crypto, 0, sizeof(struct crypto_keys)); - memset(&fctx->auth, 0, sizeof(struct auth_keys)); - crypto_free_context((void *)fctx); - } - nitrox_put_device(nctx->ndev); - - nctx->u.ctx_handle = 0; - nctx->ndev = NULL; -} -static inline int nitrox_skcipher_setkey(struct crypto_skcipher *cipher, - int aes_keylen, const u8 *key, - unsigned int keylen) -{ - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); - struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); - struct flexi_crypto_context *fctx; - enum flexi_cipher cipher_type; - const char *name; - - name = crypto_tfm_alg_name(tfm); - cipher_type = flexi_cipher_type(name); - if (unlikely(cipher_type == CIPHER_INVALID)) { - pr_err("unsupported cipher: %s\n", name); - return -EINVAL; - } - - /* fill crypto context */ - fctx = nctx->u.fctx; - fctx->flags = 0; - fctx->w0.cipher_type = cipher_type; - fctx->w0.aes_keylen = aes_keylen; - fctx->w0.iv_source = IV_FROM_DPTR; - fctx->flags = cpu_to_be64(*(u64 *)&fctx->w0); - /* copy the key to context */ - memcpy(fctx->crypto.u.key, key, keylen); - - return 0; -} - -static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, - unsigned int keylen) +int nitrox_crypto_register(void) { - int aes_keylen; + int err; - aes_keylen = flexi_aes_keylen(keylen); - if (aes_keylen < 0) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); -} + err = nitrox_register_skciphers(); + if (err) + return err; -static void nitrox_skcipher_callback(struct skcipher_request *skreq, - int err) -{ + err = nitrox_register_aeads(); if (err) { - pr_err_ratelimited("request failed status 0x%0x\n", err); - err = -EINVAL; - } - skcipher_request_complete(skreq, err); -} - -static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc) -{ - struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq); - struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher); - struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); - int ivsize = crypto_skcipher_ivsize(cipher); - struct se_crypto_request *creq; - - creq = &nkreq->creq; - creq->flags = skreq->base.flags; - creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC; - - /* fill the request */ - creq->ctrl.value = 0; - creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC; - creq->ctrl.s.arg = (enc ? ENCRYPT : DECRYPT); - /* param0: length of the data to be encrypted */ - creq->gph.param0 = cpu_to_be16(skreq->cryptlen); - creq->gph.param1 = 0; - /* param2: encryption data offset */ - creq->gph.param2 = cpu_to_be16(ivsize); - creq->gph.param3 = 0; - - creq->ctx_handle = nctx->u.ctx_handle; - creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context); - - /* copy the iv */ - memcpy(creq->iv, skreq->iv, ivsize); - creq->ivsize = ivsize; - creq->src = skreq->src; - creq->dst = skreq->dst; - - nkreq->nctx = nctx; - nkreq->skreq = skreq; - - /* send the crypto request */ - return nitrox_process_se_request(nctx->ndev, creq, - nitrox_skcipher_callback, skreq); -} - -static int nitrox_aes_encrypt(struct skcipher_request *skreq) -{ - return nitrox_skcipher_crypt(skreq, true); -} - -static int nitrox_aes_decrypt(struct skcipher_request *skreq) -{ - return nitrox_skcipher_crypt(skreq, false); -} - -static int nitrox_3des_setkey(struct crypto_skcipher *cipher, - const u8 *key, unsigned int keylen) -{ - if (keylen != DES3_EDE_KEY_SIZE) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - return nitrox_skcipher_setkey(cipher, 0, key, keylen); -} - -static int nitrox_3des_encrypt(struct skcipher_request *skreq) -{ - return nitrox_skcipher_crypt(skreq, true); -} - -static int nitrox_3des_decrypt(struct skcipher_request *skreq) -{ - return nitrox_skcipher_crypt(skreq, false); -} - -static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher, - const u8 *key, unsigned int keylen) -{ - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); - struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); - struct flexi_crypto_context *fctx; - int aes_keylen, ret; - - ret = xts_check_key(tfm, key, keylen); - if (ret) - return ret; - - keylen /= 2; - - aes_keylen = flexi_aes_keylen(keylen); - if (aes_keylen < 0) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; + nitrox_unregister_skciphers(); + return err; } - fctx = nctx->u.fctx; - /* copy KEY2 */ - memcpy(fctx->auth.u.key2, (key + keylen), keylen); - - return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); -} - -static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher, - const u8 *key, unsigned int keylen) -{ - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); - struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); - struct flexi_crypto_context *fctx; - int aes_keylen; - - if (keylen < CTR_RFC3686_NONCE_SIZE) - return -EINVAL; - - fctx = nctx->u.fctx; - - memcpy(fctx->crypto.iv, key + (keylen - CTR_RFC3686_NONCE_SIZE), - CTR_RFC3686_NONCE_SIZE); - - keylen -= CTR_RFC3686_NONCE_SIZE; - - aes_keylen = flexi_aes_keylen(keylen); - if (aes_keylen < 0) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); -} - -static struct skcipher_alg nitrox_skciphers[] = { { - .base = { - .cra_name = "cbc(aes)", - .cra_driver_name = "n5_cbc(aes)", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = nitrox_aes_setkey, - .encrypt = nitrox_aes_encrypt, - .decrypt = nitrox_aes_decrypt, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, -}, { - .base = { - .cra_name = "ecb(aes)", - .cra_driver_name = "n5_ecb(aes)", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = nitrox_aes_setkey, - .encrypt = nitrox_aes_encrypt, - .decrypt = nitrox_aes_decrypt, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, -}, { - .base = { - .cra_name = "cfb(aes)", - .cra_driver_name = "n5_cfb(aes)", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = nitrox_aes_setkey, - .encrypt = nitrox_aes_encrypt, - .decrypt = nitrox_aes_decrypt, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, -}, { - .base = { - .cra_name = "xts(aes)", - .cra_driver_name = "n5_xts(aes)", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - .min_keysize = 2 * AES_MIN_KEY_SIZE, - .max_keysize = 2 * AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = nitrox_aes_xts_setkey, - .encrypt = nitrox_aes_encrypt, - .decrypt = nitrox_aes_decrypt, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, -}, { - .base = { - .cra_name = "rfc3686(ctr(aes))", - .cra_driver_name = "n5_rfc3686(ctr(aes))", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, - .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, - .ivsize = CTR_RFC3686_IV_SIZE, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, - .setkey = nitrox_aes_ctr_rfc3686_setkey, - .encrypt = nitrox_aes_encrypt, - .decrypt = nitrox_aes_decrypt, -}, { - .base = { - .cra_name = "cts(cbc(aes))", - .cra_driver_name = "n5_cts(cbc(aes))", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - }, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = nitrox_aes_setkey, - .encrypt = nitrox_aes_encrypt, - .decrypt = nitrox_aes_decrypt, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, -}, { - .base = { - .cra_name = "cbc(des3_ede)", - .cra_driver_name = "n5_cbc(des3_ede)", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, - .setkey = nitrox_3des_setkey, - .encrypt = nitrox_3des_encrypt, - .decrypt = nitrox_3des_decrypt, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, -}, { - .base = { - .cra_name = "ecb(des3_ede)", - .cra_driver_name = "n5_ecb(des3_ede)", - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, - .setkey = nitrox_3des_setkey, - .encrypt = nitrox_3des_encrypt, - .decrypt = nitrox_3des_decrypt, - .init = nitrox_skcipher_init, - .exit = nitrox_skcipher_exit, -} - -}; - -int nitrox_crypto_register(void) -{ - return crypto_register_skciphers(nitrox_skciphers, - ARRAY_SIZE(nitrox_skciphers)); + return 0; } void nitrox_crypto_unregister(void) { - crypto_unregister_skciphers(nitrox_skciphers, - ARRAY_SIZE(nitrox_skciphers)); + nitrox_unregister_aeads(); + nitrox_unregister_skciphers(); } diff --git a/drivers/crypto/cavium/nitrox/nitrox_common.h b/drivers/crypto/cavium/nitrox/nitrox_common.h index 863143a8336b..e4be69d7e6e5 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_common.h +++ b/drivers/crypto/cavium/nitrox/nitrox_common.h @@ -7,6 +7,10 @@ int nitrox_crypto_register(void); void nitrox_crypto_unregister(void); +int nitrox_register_aeads(void); +void nitrox_unregister_aeads(void); +int nitrox_register_skciphers(void); +void nitrox_unregister_skciphers(void); void *crypto_alloc_context(struct nitrox_device *ndev); void crypto_free_context(void *ctx); struct nitrox_device *nitrox_get_first_device(void); @@ -19,7 +23,7 @@ void pkt_slc_resp_tasklet(unsigned long data); int nitrox_process_se_request(struct nitrox_device *ndev, struct se_crypto_request *req, completion_t cb, - struct skcipher_request *skreq); + void *cb_arg); void backlog_qflush_work(struct work_struct *work); diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h index 1ad27b1a87c5..a2a452642b38 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_csr.h +++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h @@ -54,7 +54,13 @@ #define NPS_STATS_PKT_DMA_WR_CNT 0x1000190 /* NPS packet registers */ -#define NPS_PKT_INT 0x1040018 +#define NPS_PKT_INT 0x1040018 +#define NPS_PKT_MBOX_INT_LO 0x1040020 +#define NPS_PKT_MBOX_INT_LO_ENA_W1C 0x1040030 +#define NPS_PKT_MBOX_INT_LO_ENA_W1S 0x1040038 +#define NPS_PKT_MBOX_INT_HI 0x1040040 +#define NPS_PKT_MBOX_INT_HI_ENA_W1C 0x1040050 +#define NPS_PKT_MBOX_INT_HI_ENA_W1S 0x1040058 #define NPS_PKT_IN_RERR_HI 0x1040108 #define NPS_PKT_IN_RERR_HI_ENA_W1S 0x1040120 #define NPS_PKT_IN_RERR_LO 0x1040128 @@ -74,6 +80,10 @@ #define NPS_PKT_SLC_RERR_LO_ENA_W1S 0x1040240 #define NPS_PKT_SLC_ERR_TYPE 0x1040248 #define NPS_PKT_SLC_ERR_TYPE_ENA_W1S 0x1040260 +/* Mailbox PF->VF PF Accessible Data registers */ +#define NPS_PKT_MBOX_PF_VF_PFDATAX(_i) (0x1040800 + ((_i) * 0x8)) +#define NPS_PKT_MBOX_VF_PF_PFDATAX(_i) (0x1040C00 + ((_i) * 0x8)) + #define NPS_PKT_SLC_CTLX(_i) (0x10000 + ((_i) * 0x40000)) #define NPS_PKT_SLC_CNTSX(_i) (0x10008 + ((_i) * 0x40000)) #define NPS_PKT_SLC_INT_LEVELSX(_i) (0x10010 + ((_i) * 0x40000)) diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c index 5f3cd5fafe04..848ec93d4333 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c +++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c @@ -13,18 +13,7 @@ static int firmware_show(struct seq_file *s, void *v) return 0; } -static int firmware_open(struct inode *inode, struct file *file) -{ - return single_open(file, firmware_show, inode->i_private); -} - -static const struct file_operations firmware_fops = { - .owner = THIS_MODULE, - .open = firmware_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(firmware); static int device_show(struct seq_file *s, void *v) { @@ -41,18 +30,7 @@ static int device_show(struct seq_file *s, void *v) return 0; } -static int nitrox_open(struct inode *inode, struct file *file) -{ - return single_open(file, device_show, inode->i_private); -} - -static const struct file_operations nitrox_fops = { - .owner = THIS_MODULE, - .open = nitrox_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(device); static int stats_show(struct seq_file *s, void *v) { @@ -69,18 +47,7 @@ static int stats_show(struct seq_file *s, void *v) return 0; } -static int nitrox_stats_open(struct inode *inode, struct file *file) -{ - return single_open(file, stats_show, inode->i_private); -} - -static const struct file_operations nitrox_stats_fops = { - .owner = THIS_MODULE, - .open = nitrox_stats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(stats); void nitrox_debugfs_exit(struct nitrox_device *ndev) { @@ -88,28 +55,14 @@ void nitrox_debugfs_exit(struct nitrox_device *ndev) ndev->debugfs_dir = NULL; } -int nitrox_debugfs_init(struct nitrox_device *ndev) +void nitrox_debugfs_init(struct nitrox_device *ndev) { - struct dentry *dir, *f; + struct dentry *dir; dir = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (!dir) - return -ENOMEM; ndev->debugfs_dir = dir; - f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops); - if (!f) - goto err; - f = debugfs_create_file("device", 0400, dir, ndev, &nitrox_fops); - if (!f) - goto err; - f = debugfs_create_file("stats", 0400, dir, ndev, &nitrox_stats_fops); - if (!f) - goto err; - - return 0; - -err: - nitrox_debugfs_exit(ndev); - return -ENODEV; + debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops); + debugfs_create_file("device", 0400, dir, ndev, &device_fops); + debugfs_create_file("stats", 0400, dir, ndev, &stats_fops); } diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h new file mode 100644 index 000000000000..f177b79bbab0 --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef __NITROX_DEBUGFS_H +#define __NITROX_DEBUGFS_H + +#include "nitrox_dev.h" + +#ifdef CONFIG_DEBUG_FS +void nitrox_debugfs_init(struct nitrox_device *ndev); +void nitrox_debugfs_exit(struct nitrox_device *ndev); +#else +static inline void nitrox_debugfs_init(struct nitrox_device *ndev) +{ +} + +static inline void nitrox_debugfs_exit(struct nitrox_device *ndev) +{ +} +#endif /* !CONFIG_DEBUG_FS */ + +#endif /* __NITROX_DEBUGFS_H */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h index 283e252385fb..0338877b828f 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h @@ -8,6 +8,8 @@ #include <linux/if.h> #define VERSION_LEN 32 +/* Maximum queues in PF mode */ +#define MAX_PF_QUEUES 64 /** * struct nitrox_cmdq - NITROX command queue @@ -103,6 +105,61 @@ struct nitrox_q_vector { }; }; +/** + * mbox_msg - Mailbox message data + * @type: message type + * @opcode: message opcode + * @data: message data + */ +union mbox_msg { + u64 value; + struct { + u64 type: 2; + u64 opcode: 6; + u64 data: 58; + }; + struct { + u64 type: 2; + u64 opcode: 6; + u64 chipid: 8; + u64 vfid: 8; + } id; +}; + +/** + * nitrox_vfdev - NITROX VF device instance in PF + * @state: VF device state + * @vfno: VF number + * @nr_queues: number of queues enabled in VF + * @ring: ring to communicate with VF + * @msg: Mailbox message data from VF + * @mbx_resp: Mailbox counters + */ +struct nitrox_vfdev { + atomic_t state; + int vfno; + int nr_queues; + int ring; + union mbox_msg msg; + atomic64_t mbx_resp; +}; + +/** + * struct nitrox_iov - SR-IOV information + * @num_vfs: number of VF(s) enabled + * @max_vf_queues: Maximum number of queues allowed for VF + * @vfdev: VF(s) devices + * @pf2vf_wq: workqueue for PF2VF communication + * @msix: MSI-X entry for PF in SR-IOV case + */ +struct nitrox_iov { + int num_vfs; + int max_vf_queues; + struct nitrox_vfdev *vfdev; + struct workqueue_struct *pf2vf_wq; + struct msix_entry msix; +}; + /* * NITROX Device states */ @@ -150,6 +207,9 @@ enum vf_mode { * @ctx_pool: DMA pool for crypto context * @pkt_inq: Packet input rings * @qvec: MSI-X queue vectors information + * @iov: SR-IOV informatin + * @num_vecs: number of MSI-X vectors + * @stats: request statistics * @hw: hardware information * @debugfs_dir: debugfs directory */ @@ -168,13 +228,13 @@ struct nitrox_device { int node; u16 qlen; u16 nr_queues; - int num_vfs; enum vf_mode mode; struct dma_pool *ctx_pool; struct nitrox_cmdq *pkt_inq; struct nitrox_q_vector *qvec; + struct nitrox_iov iov; int num_vecs; struct nitrox_stats stats; @@ -213,17 +273,9 @@ static inline bool nitrox_ready(struct nitrox_device *ndev) return atomic_read(&ndev->state) == __NDEV_READY; } -#ifdef CONFIG_DEBUG_FS -int nitrox_debugfs_init(struct nitrox_device *ndev); -void nitrox_debugfs_exit(struct nitrox_device *ndev); -#else -static inline int nitrox_debugfs_init(struct nitrox_device *ndev) +static inline bool nitrox_vfdev_ready(struct nitrox_vfdev *vfdev) { - return 0; + return atomic_read(&vfdev->state) == __NDEV_READY; } -static inline void nitrox_debugfs_exit(struct nitrox_device *ndev) -{ } -#endif - #endif /* __NITROX_DEV_H */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c index a9b82387cf53..c08d9f33a3b1 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_hal.c +++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c @@ -5,10 +5,11 @@ #include "nitrox_csr.h" #define PLL_REF_CLK 50 +#define MAX_CSR_RETRIES 10 /** * emu_enable_cores - Enable EMU cluster cores. - * @ndev: N5 device + * @ndev: NITROX device */ static void emu_enable_cores(struct nitrox_device *ndev) { @@ -33,7 +34,7 @@ static void emu_enable_cores(struct nitrox_device *ndev) /** * nitrox_config_emu_unit - configure EMU unit. - * @ndev: N5 device + * @ndev: NITROX device */ void nitrox_config_emu_unit(struct nitrox_device *ndev) { @@ -63,29 +64,26 @@ void nitrox_config_emu_unit(struct nitrox_device *ndev) static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring) { union nps_pkt_in_instr_ctl pkt_in_ctl; - union nps_pkt_in_instr_baoff_dbell pkt_in_dbell; union nps_pkt_in_done_cnts pkt_in_cnts; + int max_retries = MAX_CSR_RETRIES; u64 offset; + /* step 1: disable the ring, clear enable bit */ offset = NPS_PKT_IN_INSTR_CTLX(ring); - /* disable the ring */ pkt_in_ctl.value = nitrox_read_csr(ndev, offset); pkt_in_ctl.s.enb = 0; nitrox_write_csr(ndev, offset, pkt_in_ctl.value); - usleep_range(100, 150); - /* wait to clear [ENB] */ + /* step 2: wait to clear [ENB] */ + usleep_range(100, 150); do { pkt_in_ctl.value = nitrox_read_csr(ndev, offset); - } while (pkt_in_ctl.s.enb); - - /* clear off door bell counts */ - offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring); - pkt_in_dbell.value = 0; - pkt_in_dbell.s.dbell = 0xffffffff; - nitrox_write_csr(ndev, offset, pkt_in_dbell.value); + if (!pkt_in_ctl.s.enb) + break; + udelay(50); + } while (max_retries--); - /* clear done counts */ + /* step 3: clear done counts */ offset = NPS_PKT_IN_DONE_CNTSX(ring); pkt_in_cnts.value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, pkt_in_cnts.value); @@ -95,6 +93,7 @@ static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring) void enable_pkt_input_ring(struct nitrox_device *ndev, int ring) { union nps_pkt_in_instr_ctl pkt_in_ctl; + int max_retries = MAX_CSR_RETRIES; u64 offset; /* 64-byte instruction size */ @@ -107,12 +106,15 @@ void enable_pkt_input_ring(struct nitrox_device *ndev, int ring) /* wait for set [ENB] */ do { pkt_in_ctl.value = nitrox_read_csr(ndev, offset); - } while (!pkt_in_ctl.s.enb); + if (pkt_in_ctl.s.enb) + break; + udelay(50); + } while (max_retries--); } /** * nitrox_config_pkt_input_rings - configure Packet Input Rings - * @ndev: N5 device + * @ndev: NITROX device */ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev) { @@ -121,11 +123,14 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev) for (i = 0; i < ndev->nr_queues; i++) { struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i]; union nps_pkt_in_instr_rsize pkt_in_rsize; + union nps_pkt_in_instr_baoff_dbell pkt_in_dbell; u64 offset; reset_pkt_input_ring(ndev, i); - /* configure ring base address 16-byte aligned, + /** + * step 4: + * configure ring base address 16-byte aligned, * size and interrupt threshold. */ offset = NPS_PKT_IN_INSTR_BADDRX(i); @@ -141,6 +146,13 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev) offset = NPS_PKT_IN_INT_LEVELSX(i); nitrox_write_csr(ndev, offset, 0xffffffff); + /* step 5: clear off door bell counts */ + offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i); + pkt_in_dbell.value = 0; + pkt_in_dbell.s.dbell = 0xffffffff; + nitrox_write_csr(ndev, offset, pkt_in_dbell.value); + + /* enable the ring */ enable_pkt_input_ring(ndev, i); } } @@ -149,21 +161,26 @@ static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port) { union nps_pkt_slc_ctl pkt_slc_ctl; union nps_pkt_slc_cnts pkt_slc_cnts; + int max_retries = MAX_CSR_RETRIES; u64 offset; - /* disable slc port */ + /* step 1: disable slc port */ offset = NPS_PKT_SLC_CTLX(port); pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); pkt_slc_ctl.s.enb = 0; nitrox_write_csr(ndev, offset, pkt_slc_ctl.value); - usleep_range(100, 150); + /* step 2 */ + usleep_range(100, 150); /* wait to clear [ENB] */ do { pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); - } while (pkt_slc_ctl.s.enb); + if (!pkt_slc_ctl.s.enb) + break; + udelay(50); + } while (max_retries--); - /* clear slc counters */ + /* step 3: clear slc counters */ offset = NPS_PKT_SLC_CNTSX(port); pkt_slc_cnts.value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, pkt_slc_cnts.value); @@ -173,12 +190,12 @@ static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port) void enable_pkt_solicit_port(struct nitrox_device *ndev, int port) { union nps_pkt_slc_ctl pkt_slc_ctl; + int max_retries = MAX_CSR_RETRIES; u64 offset; offset = NPS_PKT_SLC_CTLX(port); pkt_slc_ctl.value = 0; pkt_slc_ctl.s.enb = 1; - /* * 8 trailing 0x00 bytes will be added * to the end of the outgoing packet. @@ -191,23 +208,27 @@ void enable_pkt_solicit_port(struct nitrox_device *ndev, int port) /* wait to set [ENB] */ do { pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); - } while (!pkt_slc_ctl.s.enb); + if (pkt_slc_ctl.s.enb) + break; + udelay(50); + } while (max_retries--); } -static void config_single_pkt_solicit_port(struct nitrox_device *ndev, - int port) +static void config_pkt_solicit_port(struct nitrox_device *ndev, int port) { union nps_pkt_slc_int_levels pkt_slc_int; u64 offset; reset_pkt_solicit_port(ndev, port); + /* step 4: configure interrupt levels */ offset = NPS_PKT_SLC_INT_LEVELSX(port); pkt_slc_int.value = 0; /* time interrupt threshold */ pkt_slc_int.s.timet = 0x3fffff; nitrox_write_csr(ndev, offset, pkt_slc_int.value); + /* enable the solicit port */ enable_pkt_solicit_port(ndev, port); } @@ -216,12 +237,12 @@ void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev) int i; for (i = 0; i < ndev->nr_queues; i++) - config_single_pkt_solicit_port(ndev, i); + config_pkt_solicit_port(ndev, i); } /** * enable_nps_interrupts - enable NPS interrutps - * @ndev: N5 device. + * @ndev: NITROX device. * * This includes NPS core, packet in and slc interrupts. */ @@ -284,8 +305,8 @@ void nitrox_config_pom_unit(struct nitrox_device *ndev) } /** - * nitrox_config_rand_unit - enable N5 random number unit - * @ndev: N5 device + * nitrox_config_rand_unit - enable NITROX random number unit + * @ndev: NITROX device */ void nitrox_config_rand_unit(struct nitrox_device *ndev) { @@ -361,6 +382,7 @@ void invalidate_lbc(struct nitrox_device *ndev) { union lbc_inval_ctl lbc_ctl; union lbc_inval_status lbc_stat; + int max_retries = MAX_CSR_RETRIES; u64 offset; /* invalidate LBC */ @@ -370,10 +392,12 @@ void invalidate_lbc(struct nitrox_device *ndev) nitrox_write_csr(ndev, offset, lbc_ctl.value); offset = LBC_INVAL_STATUS; - do { lbc_stat.value = nitrox_read_csr(ndev, offset); - } while (!lbc_stat.s.done); + if (lbc_stat.s.done) + break; + udelay(50); + } while (max_retries--); } void nitrox_config_lbc_unit(struct nitrox_device *ndev) @@ -467,3 +491,31 @@ void nitrox_get_hwinfo(struct nitrox_device *ndev) /* copy partname */ strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname)); } + +void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev) +{ + u64 value = ~0ULL; + u64 reg_addr; + + /* Mailbox interrupt low enable set register */ + reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1S; + nitrox_write_csr(ndev, reg_addr, value); + + /* Mailbox interrupt high enable set register */ + reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1S; + nitrox_write_csr(ndev, reg_addr, value); +} + +void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev) +{ + u64 value = ~0ULL; + u64 reg_addr; + + /* Mailbox interrupt low enable clear register */ + reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1C; + nitrox_write_csr(ndev, reg_addr, value); + + /* Mailbox interrupt high enable clear register */ + reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1C; + nitrox_write_csr(ndev, reg_addr, value); +} diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.h b/drivers/crypto/cavium/nitrox/nitrox_hal.h index 489ee64c119e..d6606418ba38 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_hal.h +++ b/drivers/crypto/cavium/nitrox/nitrox_hal.h @@ -19,5 +19,7 @@ void enable_pkt_input_ring(struct nitrox_device *ndev, int ring); void enable_pkt_solicit_port(struct nitrox_device *ndev, int port); void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode); void nitrox_get_hwinfo(struct nitrox_device *ndev); +void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev); +void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev); #endif /* __NITROX_HAL_H */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c index 88a77b8fb3fb..3dec570a190a 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_isr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c @@ -7,12 +7,14 @@ #include "nitrox_csr.h" #include "nitrox_common.h" #include "nitrox_hal.h" +#include "nitrox_mbx.h" /** * One vector for each type of ring * - NPS packet ring, AQMQ ring and ZQMQ ring */ #define NR_RING_VECTORS 3 +#define NR_NON_RING_VECTORS 1 /* base entry for packet ring/port */ #define PKT_RING_MSIX_BASE 0 #define NON_RING_MSIX_BASE 192 @@ -219,7 +221,8 @@ static void nps_core_int_tasklet(unsigned long data) */ static irqreturn_t nps_core_int_isr(int irq, void *data) { - struct nitrox_device *ndev = data; + struct nitrox_q_vector *qvec = data; + struct nitrox_device *ndev = qvec->ndev; union nps_core_int_active core_int; core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE); @@ -245,6 +248,10 @@ static irqreturn_t nps_core_int_isr(int irq, void *data) if (core_int.s.bmi) clear_bmi_err_intr(ndev); + /* Mailbox interrupt */ + if (core_int.s.mbox) + nitrox_pf2vf_mbox_handler(ndev); + /* If more work callback the ISR, set resend */ core_int.s.resend = 1; nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value); @@ -275,6 +282,7 @@ void nitrox_unregister_interrupts(struct nitrox_device *ndev) qvec->valid = false; } kfree(ndev->qvec); + ndev->qvec = NULL; pci_free_irq_vectors(pdev); } @@ -321,6 +329,7 @@ int nitrox_register_interrupts(struct nitrox_device *ndev) if (qvec->ring >= ndev->nr_queues) break; + qvec->cmdq = &ndev->pkt_inq[qvec->ring]; snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring); /* get the vector number */ vec = pci_irq_vector(pdev, i); @@ -335,13 +344,13 @@ int nitrox_register_interrupts(struct nitrox_device *ndev) tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet, (unsigned long)qvec); - qvec->cmdq = &ndev->pkt_inq[qvec->ring]; qvec->valid = true; } /* request irqs for non ring vectors */ i = NON_RING_MSIX_BASE; qvec = &ndev->qvec[i]; + qvec->ndev = ndev; snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i); /* get the vector number */ @@ -356,7 +365,6 @@ int nitrox_register_interrupts(struct nitrox_device *ndev) tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet, (unsigned long)qvec); - qvec->ndev = ndev; qvec->valid = true; return 0; @@ -365,3 +373,81 @@ irq_fail: nitrox_unregister_interrupts(ndev); return ret; } + +void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev) +{ + struct pci_dev *pdev = ndev->pdev; + int i; + + for (i = 0; i < ndev->num_vecs; i++) { + struct nitrox_q_vector *qvec; + int vec; + + qvec = ndev->qvec + i; + if (!qvec->valid) + continue; + + vec = ndev->iov.msix.vector; + irq_set_affinity_hint(vec, NULL); + free_irq(vec, qvec); + + tasklet_disable(&qvec->resp_tasklet); + tasklet_kill(&qvec->resp_tasklet); + qvec->valid = false; + } + kfree(ndev->qvec); + ndev->qvec = NULL; + pci_disable_msix(pdev); +} + +int nitrox_sriov_register_interupts(struct nitrox_device *ndev) +{ + struct pci_dev *pdev = ndev->pdev; + struct nitrox_q_vector *qvec; + int vec, cpu; + int ret; + + /** + * only non ring vectors i.e Entry 192 is available + * for PF in SR-IOV mode. + */ + ndev->iov.msix.entry = NON_RING_MSIX_BASE; + ret = pci_enable_msix_exact(pdev, &ndev->iov.msix, NR_NON_RING_VECTORS); + if (ret) { + dev_err(DEV(ndev), "failed to allocate nps-core-int%d\n", + NON_RING_MSIX_BASE); + return ret; + } + + qvec = kcalloc(NR_NON_RING_VECTORS, sizeof(*qvec), GFP_KERNEL); + if (!qvec) { + pci_disable_msix(pdev); + return -ENOMEM; + } + qvec->ndev = ndev; + + ndev->qvec = qvec; + ndev->num_vecs = NR_NON_RING_VECTORS; + snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", + NON_RING_MSIX_BASE); + + vec = ndev->iov.msix.vector; + ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec); + if (ret) { + dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", + NON_RING_MSIX_BASE); + goto iov_irq_fail; + } + cpu = num_online_cpus(); + irq_set_affinity_hint(vec, get_cpu_mask(cpu)); + + tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet, + (unsigned long)qvec); + qvec->valid = true; + + return 0; + +iov_irq_fail: + nitrox_sriov_unregister_interrupts(ndev); + return ret; +} diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.h b/drivers/crypto/cavium/nitrox/nitrox_isr.h index 63418a6cc52c..1062c9336c1f 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_isr.h +++ b/drivers/crypto/cavium/nitrox/nitrox_isr.h @@ -6,5 +6,7 @@ int nitrox_register_interrupts(struct nitrox_device *ndev); void nitrox_unregister_interrupts(struct nitrox_device *ndev); +int nitrox_sriov_register_interupts(struct nitrox_device *ndev); +void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev); #endif /* __NITROX_ISR_H */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index 2260efa42308..4ace9bcd603a 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c @@ -25,9 +25,9 @@ static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) struct nitrox_device *ndev = cmdq->ndev; cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; - cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize, - &cmdq->unalign_dma, - GFP_KERNEL); + cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize, + &cmdq->unalign_dma, + GFP_KERNEL); if (!cmdq->unalign_base) return -ENOMEM; @@ -158,12 +158,19 @@ static void destroy_crypto_dma_pool(struct nitrox_device *ndev) void *crypto_alloc_context(struct nitrox_device *ndev) { struct ctx_hdr *ctx; + struct crypto_ctx_hdr *chdr; void *vaddr; dma_addr_t dma; + chdr = kmalloc(sizeof(*chdr), GFP_KERNEL); + if (!chdr) + return NULL; + vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma); - if (!vaddr) + if (!vaddr) { + kfree(chdr); return NULL; + } /* fill meta data */ ctx = vaddr; @@ -171,7 +178,11 @@ void *crypto_alloc_context(struct nitrox_device *ndev) ctx->dma = dma; ctx->ctx_dma = dma + sizeof(struct ctx_hdr); - return ((u8 *)vaddr + sizeof(struct ctx_hdr)); + chdr->pool = ndev->ctx_pool; + chdr->dma = dma; + chdr->vaddr = vaddr; + + return chdr; } /** @@ -180,13 +191,14 @@ void *crypto_alloc_context(struct nitrox_device *ndev) */ void crypto_free_context(void *ctx) { - struct ctx_hdr *ctxp; + struct crypto_ctx_hdr *ctxp; if (!ctx) return; - ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr)); - dma_pool_free(ctxp->pool, ctxp, ctxp->dma); + ctxp = ctx; + dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma); + kfree(ctxp); } /** diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index 6595c95af9f1..faa78f651238 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c @@ -1,6 +1,5 @@ #include <linux/aer.h> #include <linux/delay.h> -#include <linux/debugfs.h> #include <linux/firmware.h> #include <linux/list.h> #include <linux/module.h> @@ -13,9 +12,9 @@ #include "nitrox_csr.h" #include "nitrox_hal.h" #include "nitrox_isr.h" +#include "nitrox_debugfs.h" #define CNN55XX_DEV_ID 0x12 -#define MAX_PF_QUEUES 64 #define UCODE_HLEN 48 #define SE_GROUP 0 @@ -405,9 +404,7 @@ static int nitrox_probe(struct pci_dev *pdev, if (err) goto pf_hw_fail; - err = nitrox_debugfs_init(ndev); - if (err) - goto pf_hw_fail; + nitrox_debugfs_init(ndev); /* clear the statistics */ atomic64_set(&ndev->stats.posted, 0); diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c new file mode 100644 index 000000000000..02ee95064841 --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/workqueue.h> + +#include "nitrox_csr.h" +#include "nitrox_hal.h" +#include "nitrox_dev.h" + +#define RING_TO_VFNO(_x, _y) ((_x) / (_y)) + +/** + * mbx_msg_type - Mailbox message types + */ +enum mbx_msg_type { + MBX_MSG_TYPE_NOP, + MBX_MSG_TYPE_REQ, + MBX_MSG_TYPE_ACK, + MBX_MSG_TYPE_NACK, +}; + +/** + * mbx_msg_opcode - Mailbox message opcodes + */ +enum mbx_msg_opcode { + MSG_OP_VF_MODE = 1, + MSG_OP_VF_UP, + MSG_OP_VF_DOWN, + MSG_OP_CHIPID_VFID, +}; + +struct pf2vf_work { + struct nitrox_vfdev *vfdev; + struct nitrox_device *ndev; + struct work_struct pf2vf_resp; +}; + +static inline u64 pf2vf_read_mbox(struct nitrox_device *ndev, int ring) +{ + u64 reg_addr; + + reg_addr = NPS_PKT_MBOX_VF_PF_PFDATAX(ring); + return nitrox_read_csr(ndev, reg_addr); +} + +static inline void pf2vf_write_mbox(struct nitrox_device *ndev, u64 value, + int ring) +{ + u64 reg_addr; + + reg_addr = NPS_PKT_MBOX_PF_VF_PFDATAX(ring); + nitrox_write_csr(ndev, reg_addr, value); +} + +static void pf2vf_send_response(struct nitrox_device *ndev, + struct nitrox_vfdev *vfdev) +{ + union mbox_msg msg; + + msg.value = vfdev->msg.value; + + switch (vfdev->msg.opcode) { + case MSG_OP_VF_MODE: + msg.data = ndev->mode; + break; + case MSG_OP_VF_UP: + vfdev->nr_queues = vfdev->msg.data; + atomic_set(&vfdev->state, __NDEV_READY); + break; + case MSG_OP_CHIPID_VFID: + msg.id.chipid = ndev->idx; + msg.id.vfid = vfdev->vfno; + break; + case MSG_OP_VF_DOWN: + vfdev->nr_queues = 0; + atomic_set(&vfdev->state, __NDEV_NOT_READY); + break; + default: + msg.type = MBX_MSG_TYPE_NOP; + break; + } + + if (msg.type == MBX_MSG_TYPE_NOP) + return; + + /* send ACK to VF */ + msg.type = MBX_MSG_TYPE_ACK; + pf2vf_write_mbox(ndev, msg.value, vfdev->ring); + + vfdev->msg.value = 0; + atomic64_inc(&vfdev->mbx_resp); +} + +static void pf2vf_resp_handler(struct work_struct *work) +{ + struct pf2vf_work *pf2vf_resp = container_of(work, struct pf2vf_work, + pf2vf_resp); + struct nitrox_vfdev *vfdev = pf2vf_resp->vfdev; + struct nitrox_device *ndev = pf2vf_resp->ndev; + + switch (vfdev->msg.type) { + case MBX_MSG_TYPE_REQ: + /* process the request from VF */ + pf2vf_send_response(ndev, vfdev); + break; + case MBX_MSG_TYPE_ACK: + case MBX_MSG_TYPE_NACK: + break; + }; + + kfree(pf2vf_resp); +} + +void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev) +{ + struct nitrox_vfdev *vfdev; + struct pf2vf_work *pfwork; + u64 value, reg_addr; + u32 i; + int vfno; + + /* loop for VF(0..63) */ + reg_addr = NPS_PKT_MBOX_INT_LO; + value = nitrox_read_csr(ndev, reg_addr); + for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) { + /* get the vfno from ring */ + vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues); + vfdev = ndev->iov.vfdev + vfno; + vfdev->ring = i; + /* fill the vf mailbox data */ + vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring); + pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC); + if (!pfwork) + continue; + + pfwork->vfdev = vfdev; + pfwork->ndev = ndev; + INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler); + queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp); + /* clear the corresponding vf bit */ + nitrox_write_csr(ndev, reg_addr, BIT_ULL(i)); + } + + /* loop for VF(64..127) */ + reg_addr = NPS_PKT_MBOX_INT_HI; + value = nitrox_read_csr(ndev, reg_addr); + for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) { + /* get the vfno from ring */ + vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues); + vfdev = ndev->iov.vfdev + vfno; + vfdev->ring = (i + 64); + /* fill the vf mailbox data */ + vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring); + + pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC); + if (!pfwork) + continue; + + pfwork->vfdev = vfdev; + pfwork->ndev = ndev; + INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler); + queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp); + /* clear the corresponding vf bit */ + nitrox_write_csr(ndev, reg_addr, BIT_ULL(i)); + } +} + +int nitrox_mbox_init(struct nitrox_device *ndev) +{ + struct nitrox_vfdev *vfdev; + int i; + + ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs, + sizeof(struct nitrox_vfdev), GFP_KERNEL); + if (!ndev->iov.vfdev) + return -ENOMEM; + + for (i = 0; i < ndev->iov.num_vfs; i++) { + vfdev = ndev->iov.vfdev + i; + vfdev->vfno = i; + } + + /* allocate pf2vf response workqueue */ + ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0); + if (!ndev->iov.pf2vf_wq) { + kfree(ndev->iov.vfdev); + return -ENOMEM; + } + /* enable pf2vf mailbox interrupts */ + enable_pf2vf_mbox_interrupts(ndev); + + return 0; +} + +void nitrox_mbox_cleanup(struct nitrox_device *ndev) +{ + /* disable pf2vf mailbox interrupts */ + disable_pf2vf_mbox_interrupts(ndev); + /* destroy workqueue */ + if (ndev->iov.pf2vf_wq) + destroy_workqueue(ndev->iov.pf2vf_wq); + + kfree(ndev->iov.vfdev); + ndev->iov.pf2vf_wq = NULL; + ndev->iov.vfdev = NULL; +} diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.h b/drivers/crypto/cavium/nitrox/nitrox_mbx.h new file mode 100644 index 000000000000..5008399775a9 --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.h @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef __NITROX_MBX_H +#define __NITROX_MBX_H + +int nitrox_mbox_init(struct nitrox_device *ndev); +void nitrox_mbox_cleanup(struct nitrox_device *ndev); +void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev); + +#endif /* __NITROX_MBX_H */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h index d091b6f5f5dd..76c0f0be7233 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_req.h +++ b/drivers/crypto/cavium/nitrox/nitrox_req.h @@ -7,6 +7,9 @@ #include "nitrox_dev.h" +#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL +#define PRIO 4001 + /** * struct gphdr - General purpose Header * @param0: first parameter. @@ -46,13 +49,6 @@ union se_req_ctrl { } s; }; -struct nitrox_sglist { - u16 len; - u16 raz0; - u32 raz1; - dma_addr_t dma; -}; - #define MAX_IV_LEN 16 /** @@ -62,8 +58,10 @@ struct nitrox_sglist { * @ctx_handle: Crypto context handle. * @gph: GP Header * @ctrl: Request Information. - * @in: Input sglist - * @out: Output sglist + * @orh: ORH address + * @comp: completion address + * @src: Input sglist + * @dst: Output sglist */ struct se_crypto_request { u8 opcode; @@ -73,9 +71,8 @@ struct se_crypto_request { struct gphdr gph; union se_req_ctrl ctrl; - - u8 iv[MAX_IV_LEN]; - u16 ivsize; + u64 *orh; + u64 *comp; struct scatterlist *src; struct scatterlist *dst; @@ -110,6 +107,18 @@ enum flexi_cipher { CIPHER_INVALID }; +enum flexi_auth { + AUTH_NULL = 0, + AUTH_MD5, + AUTH_SHA1, + AUTH_SHA2_SHA224, + AUTH_SHA2_SHA256, + AUTH_SHA2_SHA384, + AUTH_SHA2_SHA512, + AUTH_GMAC, + AUTH_INVALID +}; + /** * struct crypto_keys - Crypto keys * @key: Encryption key or KEY1 for AES-XTS @@ -136,6 +145,32 @@ struct auth_keys { u8 opad[64]; }; +union fc_ctx_flags { + __be64 f; + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 cipher_type : 4; + u64 reserved_59 : 1; + u64 aes_keylen : 2; + u64 iv_source : 1; + u64 hash_type : 4; + u64 reserved_49_51 : 3; + u64 auth_input_type: 1; + u64 mac_len : 8; + u64 reserved_0_39 : 40; +#else + u64 reserved_0_39 : 40; + u64 mac_len : 8; + u64 auth_input_type: 1; + u64 reserved_49_51 : 3; + u64 hash_type : 4; + u64 iv_source : 1; + u64 aes_keylen : 2; + u64 reserved_59 : 1; + u64 cipher_type : 4; +#endif + } w0; +}; /** * struct flexi_crypto_context - Crypto context * @cipher_type: Encryption cipher type @@ -150,49 +185,30 @@ struct auth_keys { * @auth: Authentication keys */ struct flexi_crypto_context { - union { - __be64 flags; - struct { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 cipher_type : 4; - u64 reserved_59 : 1; - u64 aes_keylen : 2; - u64 iv_source : 1; - u64 hash_type : 4; - u64 reserved_49_51 : 3; - u64 auth_input_type: 1; - u64 mac_len : 8; - u64 reserved_0_39 : 40; -#else - u64 reserved_0_39 : 40; - u64 mac_len : 8; - u64 auth_input_type: 1; - u64 reserved_49_51 : 3; - u64 hash_type : 4; - u64 iv_source : 1; - u64 aes_keylen : 2; - u64 reserved_59 : 1; - u64 cipher_type : 4; -#endif - } w0; - }; - + union fc_ctx_flags flags; struct crypto_keys crypto; struct auth_keys auth; }; +struct crypto_ctx_hdr { + struct dma_pool *pool; + dma_addr_t dma; + void *vaddr; +}; + struct nitrox_crypto_ctx { struct nitrox_device *ndev; union { u64 ctx_handle; struct flexi_crypto_context *fctx; } u; + struct crypto_ctx_hdr *chdr; }; struct nitrox_kcrypt_request { struct se_crypto_request creq; - struct nitrox_crypto_ctx *nctx; - struct skcipher_request *skreq; + u8 *src; + u8 *dst; }; /** @@ -369,26 +385,19 @@ struct nitrox_sgcomp { /* * strutct nitrox_sgtable - SG list information - * @map_cnt: Number of buffers mapped - * @nr_comp: Number of sglist components + * @sgmap_cnt: Number of buffers mapped * @total_bytes: Total bytes in sglist. - * @len: Total sglist components length. - * @dma: DMA address of sglist component. - * @dir: DMA direction. - * @buf: crypto request buffer. - * @sglist: SG list of input/output buffers. + * @sgcomp_len: Total sglist components length. + * @sgcomp_dma: DMA address of sglist component. + * @sg: crypto request buffer. * @sgcomp: sglist component for NITROX. */ struct nitrox_sgtable { - u8 map_bufs_cnt; - u8 nr_sgcomp; + u8 sgmap_cnt; u16 total_bytes; - u32 len; - dma_addr_t dma; - enum dma_data_direction dir; - - struct scatterlist *buf; - struct nitrox_sglist *sglist; + u32 sgcomp_len; + dma_addr_t sgcomp_dma; + struct scatterlist *sg; struct nitrox_sgcomp *sgcomp; }; @@ -398,13 +407,11 @@ struct nitrox_sgtable { #define COMP_HLEN 8 struct resp_hdr { - u64 orh; - dma_addr_t orh_dma; - u64 completion; - dma_addr_t completion_dma; + u64 *orh; + u64 *completion; }; -typedef void (*completion_t)(struct skcipher_request *skreq, int err); +typedef void (*completion_t)(void *arg, int err); /** * struct nitrox_softreq - Represents the NIROX Request. @@ -427,7 +434,6 @@ struct nitrox_softreq { u32 flags; gfp_t gfp; atomic_t status; - bool inplace; struct nitrox_device *ndev; struct nitrox_cmdq *cmdq; @@ -440,7 +446,201 @@ struct nitrox_softreq { unsigned long tstamp; completion_t callback; - struct skcipher_request *skreq; + void *cb_arg; }; +static inline int flexi_aes_keylen(int keylen) +{ + int aes_keylen; + + switch (keylen) { + case AES_KEYSIZE_128: + aes_keylen = 1; + break; + case AES_KEYSIZE_192: + aes_keylen = 2; + break; + case AES_KEYSIZE_256: + aes_keylen = 3; + break; + default: + aes_keylen = -EINVAL; + break; + } + return aes_keylen; +} + +static inline void *alloc_req_buf(int nents, int extralen, gfp_t gfp) +{ + size_t size; + + size = sizeof(struct scatterlist) * nents; + size += extralen; + + return kzalloc(size, gfp); +} + +/** + * create_single_sg - Point SG entry to the data + * @sg: Destination SG list + * @buf: Data + * @buflen: Data length + * + * Returns next free entry in the destination SG list + **/ +static inline struct scatterlist *create_single_sg(struct scatterlist *sg, + void *buf, int buflen) +{ + sg_set_buf(sg, buf, buflen); + sg++; + return sg; +} + +/** + * create_multi_sg - Create multiple sg entries with buflen data length from + * source sglist + * @to_sg: Destination SG list + * @from_sg: Source SG list + * @buflen: Data length + * + * Returns next free entry in the destination SG list + **/ +static inline struct scatterlist *create_multi_sg(struct scatterlist *to_sg, + struct scatterlist *from_sg, + int buflen) +{ + struct scatterlist *sg = to_sg; + unsigned int sglen; + + for (; buflen; buflen -= sglen) { + sglen = from_sg->length; + if (sglen > buflen) + sglen = buflen; + + sg_set_buf(sg, sg_virt(from_sg), sglen); + from_sg = sg_next(from_sg); + sg++; + } + + return sg; +} + +static inline void set_orh_value(u64 *orh) +{ + WRITE_ONCE(*orh, PENDING_SIG); +} + +static inline void set_comp_value(u64 *comp) +{ + WRITE_ONCE(*comp, PENDING_SIG); +} + +static inline int alloc_src_req_buf(struct nitrox_kcrypt_request *nkreq, + int nents, int ivsize) +{ + struct se_crypto_request *creq = &nkreq->creq; + + nkreq->src = alloc_req_buf(nents, ivsize, creq->gfp); + if (!nkreq->src) + return -ENOMEM; + + return 0; +} + +static inline void nitrox_creq_copy_iv(char *dst, char *src, int size) +{ + memcpy(dst, src, size); +} + +static inline struct scatterlist *nitrox_creq_src_sg(char *iv, int ivsize) +{ + return (struct scatterlist *)(iv + ivsize); +} + +static inline void nitrox_creq_set_src_sg(struct nitrox_kcrypt_request *nkreq, + int nents, int ivsize, + struct scatterlist *src, int buflen) +{ + char *iv = nkreq->src; + struct scatterlist *sg; + struct se_crypto_request *creq = &nkreq->creq; + + creq->src = nitrox_creq_src_sg(iv, ivsize); + sg = creq->src; + sg_init_table(sg, nents); + + /* Input format: + * +----+----------------+ + * | IV | SRC sg entries | + * +----+----------------+ + */ + + /* IV */ + sg = create_single_sg(sg, iv, ivsize); + /* SRC entries */ + create_multi_sg(sg, src, buflen); +} + +static inline int alloc_dst_req_buf(struct nitrox_kcrypt_request *nkreq, + int nents) +{ + int extralen = ORH_HLEN + COMP_HLEN; + struct se_crypto_request *creq = &nkreq->creq; + + nkreq->dst = alloc_req_buf(nents, extralen, creq->gfp); + if (!nkreq->dst) + return -ENOMEM; + + return 0; +} + +static inline void nitrox_creq_set_orh(struct nitrox_kcrypt_request *nkreq) +{ + struct se_crypto_request *creq = &nkreq->creq; + + creq->orh = (u64 *)(nkreq->dst); + set_orh_value(creq->orh); +} + +static inline void nitrox_creq_set_comp(struct nitrox_kcrypt_request *nkreq) +{ + struct se_crypto_request *creq = &nkreq->creq; + + creq->comp = (u64 *)(nkreq->dst + ORH_HLEN); + set_comp_value(creq->comp); +} + +static inline struct scatterlist *nitrox_creq_dst_sg(char *dst) +{ + return (struct scatterlist *)(dst + ORH_HLEN + COMP_HLEN); +} + +static inline void nitrox_creq_set_dst_sg(struct nitrox_kcrypt_request *nkreq, + int nents, int ivsize, + struct scatterlist *dst, int buflen) +{ + struct se_crypto_request *creq = &nkreq->creq; + struct scatterlist *sg; + char *iv = nkreq->src; + + creq->dst = nitrox_creq_dst_sg(nkreq->dst); + sg = creq->dst; + sg_init_table(sg, nents); + + /* Output format: + * +-----+----+----------------+-----------------+ + * | ORH | IV | DST sg entries | COMPLETION Bytes| + * +-----+----+----------------+-----------------+ + */ + + /* ORH */ + sg = create_single_sg(sg, creq->orh, ORH_HLEN); + /* IV */ + sg = create_single_sg(sg, iv, ivsize); + /* DST entries */ + sg = create_multi_sg(sg, dst, buflen); + /* COMPLETION Bytes */ + create_single_sg(sg, creq->comp, COMP_HLEN); +} + #endif /* __NITROX_REQ_H */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index 3987cd84c033..4c97478d44bd 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c @@ -13,7 +13,6 @@ #define FDATA_SIZE 32 /* Base destination port for the solicited requests */ #define SOLICIT_BASE_DPORT 256 -#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL #define REQ_NOT_POSTED 1 #define REQ_BACKLOG 2 @@ -52,58 +51,26 @@ static inline int incr_index(int index, int count, int max) return index; } -/** - * dma_free_sglist - unmap and free the sg lists. - * @ndev: N5 device - * @sgtbl: SG table - */ static void softreq_unmap_sgbufs(struct nitrox_softreq *sr) { struct nitrox_device *ndev = sr->ndev; struct device *dev = DEV(ndev); - struct nitrox_sglist *sglist; - - /* unmap in sgbuf */ - sglist = sr->in.sglist; - if (!sglist) - goto out_unmap; - - /* unmap iv */ - dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL); - /* unmpa src sglist */ - dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir); - /* unamp gather component */ - dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE); - kfree(sr->in.sglist); + + + dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL); + dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len, + DMA_TO_DEVICE); kfree(sr->in.sgcomp); - sr->in.sglist = NULL; - sr->in.buf = NULL; - sr->in.map_bufs_cnt = 0; - -out_unmap: - /* unmap out sgbuf */ - sglist = sr->out.sglist; - if (!sglist) - return; - - /* unmap orh */ - dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir); - - /* unmap dst sglist */ - if (!sr->inplace) { - dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3), - sr->out.dir); - } - /* unmap completion */ - dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir); + sr->in.sg = NULL; + sr->in.sgmap_cnt = 0; - /* unmap scatter component */ - dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE); - kfree(sr->out.sglist); + dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt, + DMA_BIDIRECTIONAL); + dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len, + DMA_TO_DEVICE); kfree(sr->out.sgcomp); - sr->out.sglist = NULL; - sr->out.buf = NULL; - sr->out.map_bufs_cnt = 0; + sr->out.sg = NULL; + sr->out.sgmap_cnt = 0; } static void softreq_destroy(struct nitrox_softreq *sr) @@ -116,7 +83,7 @@ static void softreq_destroy(struct nitrox_softreq *sr) * create_sg_component - create SG componets for N5 device. * @sr: Request structure * @sgtbl: SG table - * @nr_comp: total number of components required + * @map_nents: number of dma mapped entries * * Component structure * @@ -140,7 +107,7 @@ static int create_sg_component(struct nitrox_softreq *sr, { struct nitrox_device *ndev = sr->ndev; struct nitrox_sgcomp *sgcomp; - struct nitrox_sglist *sglist; + struct scatterlist *sg; dma_addr_t dma; size_t sz_comp; int i, j, nr_sgcomp; @@ -154,17 +121,15 @@ static int create_sg_component(struct nitrox_softreq *sr, return -ENOMEM; sgtbl->sgcomp = sgcomp; - sgtbl->nr_sgcomp = nr_sgcomp; - sglist = sgtbl->sglist; + sg = sgtbl->sg; /* populate device sg component */ for (i = 0; i < nr_sgcomp; i++) { - for (j = 0; j < 4; j++) { - sgcomp->len[j] = cpu_to_be16(sglist->len); - sgcomp->dma[j] = cpu_to_be64(sglist->dma); - sglist++; + for (j = 0; j < 4 && sg; j++) { + sgcomp[i].len[j] = cpu_to_be16(sg_dma_len(sg)); + sgcomp[i].dma[j] = cpu_to_be64(sg_dma_address(sg)); + sg = sg_next(sg); } - sgcomp++; } /* map the device sg component */ dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE); @@ -174,8 +139,8 @@ static int create_sg_component(struct nitrox_softreq *sr, return -ENOMEM; } - sgtbl->dma = dma; - sgtbl->len = sz_comp; + sgtbl->sgcomp_dma = dma; + sgtbl->sgcomp_len = sz_comp; return 0; } @@ -193,66 +158,27 @@ static int dma_map_inbufs(struct nitrox_softreq *sr, { struct device *dev = DEV(sr->ndev); struct scatterlist *sg = req->src; - struct nitrox_sglist *glist; int i, nents, ret = 0; - dma_addr_t dma; - size_t sz; - - nents = sg_nents(req->src); - - /* creater gather list IV and src entries */ - sz = roundup((1 + nents), 4) * sizeof(*glist); - glist = kzalloc(sz, sr->gfp); - if (!glist) - return -ENOMEM; - sr->in.sglist = glist; - /* map IV */ - dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, dma)) { - ret = -EINVAL; - goto iv_map_err; - } + nents = dma_map_sg(dev, req->src, sg_nents(req->src), + DMA_BIDIRECTIONAL); + if (!nents) + return -EINVAL; - sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; - /* map src entries */ - nents = dma_map_sg(dev, req->src, nents, sr->in.dir); - if (!nents) { - ret = -EINVAL; - goto src_map_err; - } - sr->in.buf = req->src; - - /* store the mappings */ - glist->len = req->ivsize; - glist->dma = dma; - glist++; - sr->in.total_bytes += req->ivsize; - - for_each_sg(req->src, sg, nents, i) { - glist->len = sg_dma_len(sg); - glist->dma = sg_dma_address(sg); - sr->in.total_bytes += glist->len; - glist++; - } - /* roundup map count to align with entires in sg component */ - sr->in.map_bufs_cnt = (1 + nents); + for_each_sg(req->src, sg, nents, i) + sr->in.total_bytes += sg_dma_len(sg); - /* create NITROX gather component */ - ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt); + sr->in.sg = req->src; + sr->in.sgmap_cnt = nents; + ret = create_sg_component(sr, &sr->in, sr->in.sgmap_cnt); if (ret) goto incomp_err; return 0; incomp_err: - dma_unmap_sg(dev, req->src, nents, sr->in.dir); - sr->in.map_bufs_cnt = 0; -src_map_err: - dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL); -iv_map_err: - kfree(sr->in.sglist); - sr->in.sglist = NULL; + dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL); + sr->in.sgmap_cnt = 0; return ret; } @@ -260,104 +186,25 @@ static int dma_map_outbufs(struct nitrox_softreq *sr, struct se_crypto_request *req) { struct device *dev = DEV(sr->ndev); - struct nitrox_sglist *glist = sr->in.sglist; - struct nitrox_sglist *slist; - struct scatterlist *sg; - int i, nents, map_bufs_cnt, ret = 0; - size_t sz; - - nents = sg_nents(req->dst); - - /* create scatter list ORH, IV, dst entries and Completion header */ - sz = roundup((3 + nents), 4) * sizeof(*slist); - slist = kzalloc(sz, sr->gfp); - if (!slist) - return -ENOMEM; - - sr->out.sglist = slist; - sr->out.dir = DMA_BIDIRECTIONAL; - /* map ORH */ - sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN, - sr->out.dir); - if (dma_mapping_error(dev, sr->resp.orh_dma)) { - ret = -EINVAL; - goto orh_map_err; - } - - /* map completion */ - sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion, - COMP_HLEN, sr->out.dir); - if (dma_mapping_error(dev, sr->resp.completion_dma)) { - ret = -EINVAL; - goto compl_map_err; - } - - sr->inplace = (req->src == req->dst) ? true : false; - /* out place */ - if (!sr->inplace) { - nents = dma_map_sg(dev, req->dst, nents, sr->out.dir); - if (!nents) { - ret = -EINVAL; - goto dst_map_err; - } - } - sr->out.buf = req->dst; - - /* store the mappings */ - /* orh */ - slist->len = ORH_HLEN; - slist->dma = sr->resp.orh_dma; - slist++; - - /* copy the glist mappings */ - if (sr->inplace) { - nents = sr->in.map_bufs_cnt - 1; - map_bufs_cnt = sr->in.map_bufs_cnt; - while (map_bufs_cnt--) { - slist->len = glist->len; - slist->dma = glist->dma; - slist++; - glist++; - } - } else { - /* copy iv mapping */ - slist->len = glist->len; - slist->dma = glist->dma; - slist++; - /* copy remaining maps */ - for_each_sg(req->dst, sg, nents, i) { - slist->len = sg_dma_len(sg); - slist->dma = sg_dma_address(sg); - slist++; - } - } - - /* completion */ - slist->len = COMP_HLEN; - slist->dma = sr->resp.completion_dma; + int nents, ret = 0; - sr->out.map_bufs_cnt = (3 + nents); + nents = dma_map_sg(dev, req->dst, sg_nents(req->dst), + DMA_BIDIRECTIONAL); + if (!nents) + return -EINVAL; - ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt); + sr->out.sg = req->dst; + sr->out.sgmap_cnt = nents; + ret = create_sg_component(sr, &sr->out, sr->out.sgmap_cnt); if (ret) goto outcomp_map_err; return 0; outcomp_map_err: - if (!sr->inplace) - dma_unmap_sg(dev, req->dst, nents, sr->out.dir); - sr->out.map_bufs_cnt = 0; - sr->out.buf = NULL; -dst_map_err: - dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir); - sr->resp.completion_dma = 0; -compl_map_err: - dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir); - sr->resp.orh_dma = 0; -orh_map_err: - kfree(sr->out.sglist); - sr->out.sglist = NULL; + dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL); + sr->out.sgmap_cnt = 0; + sr->out.sg = NULL; return ret; } @@ -422,6 +269,8 @@ static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen) smp_mb__after_atomic(); return true; } + /* sync with other cpus */ + smp_mb__after_atomic(); return false; } @@ -477,8 +326,6 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) spin_lock_bh(&cmdq->backlog_qlock); list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { - struct skcipher_request *skreq; - /* submit until space available */ if (unlikely(cmdq_full(cmdq, ndev->qlen))) { ret = -ENOSPC; @@ -490,12 +337,8 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) /* sync with other cpus */ smp_mb__after_atomic(); - skreq = sr->skreq; /* post the command */ post_se_instr(sr, cmdq); - - /* backlog requests are posted, wakeup with -EINPROGRESS */ - skcipher_request_complete(skreq, -EINPROGRESS); } spin_unlock_bh(&cmdq->backlog_qlock); @@ -518,7 +361,7 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr) } /* add to backlog list */ backlog_list_add(sr, cmdq); - return -EBUSY; + return -EINPROGRESS; } post_se_instr(sr, cmdq); @@ -535,7 +378,7 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr) int nitrox_process_se_request(struct nitrox_device *ndev, struct se_crypto_request *req, completion_t callback, - struct skcipher_request *skreq) + void *cb_arg) { struct nitrox_softreq *sr; dma_addr_t ctx_handle = 0; @@ -552,12 +395,12 @@ int nitrox_process_se_request(struct nitrox_device *ndev, sr->flags = req->flags; sr->gfp = req->gfp; sr->callback = callback; - sr->skreq = skreq; + sr->cb_arg = cb_arg; atomic_set(&sr->status, REQ_NOT_POSTED); - WRITE_ONCE(sr->resp.orh, PENDING_SIG); - WRITE_ONCE(sr->resp.completion, PENDING_SIG); + sr->resp.orh = req->orh; + sr->resp.completion = req->comp; ret = softreq_map_iobuf(sr, req); if (ret) { @@ -598,13 +441,13 @@ int nitrox_process_se_request(struct nitrox_device *ndev, /* fill the packet instruction */ /* word 0 */ - sr->instr.dptr0 = cpu_to_be64(sr->in.dma); + sr->instr.dptr0 = cpu_to_be64(sr->in.sgcomp_dma); /* word 1 */ sr->instr.ih.value = 0; sr->instr.ih.s.g = 1; - sr->instr.ih.s.gsz = sr->in.map_bufs_cnt; - sr->instr.ih.s.ssz = sr->out.map_bufs_cnt; + sr->instr.ih.s.gsz = sr->in.sgmap_cnt; + sr->instr.ih.s.ssz = sr->out.sgmap_cnt; sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr); sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes; sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value); @@ -626,11 +469,11 @@ int nitrox_process_se_request(struct nitrox_device *ndev, /* word 4 */ sr->instr.slc.value[0] = 0; - sr->instr.slc.s.ssz = sr->out.map_bufs_cnt; + sr->instr.slc.s.ssz = sr->out.sgmap_cnt; sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]); /* word 5 */ - sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma); + sr->instr.slc.s.rptr = cpu_to_be64(sr->out.sgcomp_dma); /* * No conversion for front data, @@ -664,6 +507,24 @@ void backlog_qflush_work(struct work_struct *work) post_backlog_cmds(cmdq); } +static bool sr_completed(struct nitrox_softreq *sr) +{ + u64 orh = READ_ONCE(*sr->resp.orh); + unsigned long timeout = jiffies + msecs_to_jiffies(1); + + if ((orh != PENDING_SIG) && (orh & 0xff)) + return true; + + while (READ_ONCE(*sr->resp.completion) == PENDING_SIG) { + if (time_after(jiffies, timeout)) { + pr_err("comp not done\n"); + return false; + } + } + + return true; +} + /** * process_request_list - process completed requests * @ndev: N5 device @@ -675,9 +536,9 @@ static void process_response_list(struct nitrox_cmdq *cmdq) { struct nitrox_device *ndev = cmdq->ndev; struct nitrox_softreq *sr; - struct skcipher_request *skreq; - completion_t callback; int req_completed = 0, err = 0, budget; + completion_t callback; + void *cb_arg; /* check all pending requests */ budget = atomic_read(&cmdq->pending_count); @@ -691,13 +552,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq) break; /* check orh and completion bytes updates */ - if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) { + if (!sr_completed(sr)) { /* request not completed, check for timeout */ if (!cmd_timeout(sr->tstamp, ndev->timeout)) break; dev_err_ratelimited(DEV(ndev), "Request timeout, orh 0x%016llx\n", - READ_ONCE(sr->resp.orh)); + READ_ONCE(*sr->resp.orh)); } atomic_dec(&cmdq->pending_count); atomic64_inc(&ndev->stats.completed); @@ -705,16 +566,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq) smp_mb__after_atomic(); /* remove from response list */ response_list_del(sr, cmdq); - - callback = sr->callback; - skreq = sr->skreq; - /* ORH error code */ - err = READ_ONCE(sr->resp.orh) & 0xff; + err = READ_ONCE(*sr->resp.orh) & 0xff; + callback = sr->callback; + cb_arg = sr->cb_arg; softreq_destroy(sr); - if (callback) - callback(skreq, err); + callback(cb_arg, err); req_completed++; } diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c new file mode 100644 index 000000000000..d4935d6cefdd --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c @@ -0,0 +1,498 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/crypto.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/printk.h> + +#include <crypto/aes.h> +#include <crypto/skcipher.h> +#include <crypto/ctr.h> +#include <crypto/des.h> +#include <crypto/xts.h> + +#include "nitrox_dev.h" +#include "nitrox_common.h" +#include "nitrox_req.h" + +struct nitrox_cipher { + const char *name; + enum flexi_cipher value; +}; + +/** + * supported cipher list + */ +static const struct nitrox_cipher flexi_cipher_table[] = { + { "null", CIPHER_NULL }, + { "cbc(des3_ede)", CIPHER_3DES_CBC }, + { "ecb(des3_ede)", CIPHER_3DES_ECB }, + { "cbc(aes)", CIPHER_AES_CBC }, + { "ecb(aes)", CIPHER_AES_ECB }, + { "cfb(aes)", CIPHER_AES_CFB }, + { "rfc3686(ctr(aes))", CIPHER_AES_CTR }, + { "xts(aes)", CIPHER_AES_XTS }, + { "cts(cbc(aes))", CIPHER_AES_CBC_CTS }, + { NULL, CIPHER_INVALID } +}; + +static enum flexi_cipher flexi_cipher_type(const char *name) +{ + const struct nitrox_cipher *cipher = flexi_cipher_table; + + while (cipher->name) { + if (!strcmp(cipher->name, name)) + break; + cipher++; + } + return cipher->value; +} + +static int nitrox_skcipher_init(struct crypto_skcipher *tfm) +{ + struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); + struct crypto_ctx_hdr *chdr; + + /* get the first device */ + nctx->ndev = nitrox_get_first_device(); + if (!nctx->ndev) + return -ENODEV; + + /* allocate nitrox crypto context */ + chdr = crypto_alloc_context(nctx->ndev); + if (!chdr) { + nitrox_put_device(nctx->ndev); + return -ENOMEM; + } + nctx->chdr = chdr; + nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr + + sizeof(struct ctx_hdr)); + crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) + + sizeof(struct nitrox_kcrypt_request)); + return 0; +} + +static void nitrox_skcipher_exit(struct crypto_skcipher *tfm) +{ + struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); + + /* free the nitrox crypto context */ + if (nctx->u.ctx_handle) { + struct flexi_crypto_context *fctx = nctx->u.fctx; + + memzero_explicit(&fctx->crypto, sizeof(struct crypto_keys)); + memzero_explicit(&fctx->auth, sizeof(struct auth_keys)); + crypto_free_context((void *)nctx->chdr); + } + nitrox_put_device(nctx->ndev); + + nctx->u.ctx_handle = 0; + nctx->ndev = NULL; +} + +static inline int nitrox_skcipher_setkey(struct crypto_skcipher *cipher, + int aes_keylen, const u8 *key, + unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); + struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); + struct flexi_crypto_context *fctx; + union fc_ctx_flags *flags; + enum flexi_cipher cipher_type; + const char *name; + + name = crypto_tfm_alg_name(tfm); + cipher_type = flexi_cipher_type(name); + if (unlikely(cipher_type == CIPHER_INVALID)) { + pr_err("unsupported cipher: %s\n", name); + return -EINVAL; + } + + /* fill crypto context */ + fctx = nctx->u.fctx; + flags = &fctx->flags; + flags->f = 0; + flags->w0.cipher_type = cipher_type; + flags->w0.aes_keylen = aes_keylen; + flags->w0.iv_source = IV_FROM_DPTR; + flags->f = cpu_to_be64(*(u64 *)&flags->w0); + /* copy the key to context */ + memcpy(fctx->crypto.u.key, key, keylen); + + return 0; +} + +static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, + unsigned int keylen) +{ + int aes_keylen; + + aes_keylen = flexi_aes_keylen(keylen); + if (aes_keylen < 0) { + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); +} + +static int alloc_src_sglist(struct skcipher_request *skreq, int ivsize) +{ + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); + int nents = sg_nents(skreq->src) + 1; + int ret; + + /* Allocate buffer to hold IV and input scatterlist array */ + ret = alloc_src_req_buf(nkreq, nents, ivsize); + if (ret) + return ret; + + nitrox_creq_copy_iv(nkreq->src, skreq->iv, ivsize); + nitrox_creq_set_src_sg(nkreq, nents, ivsize, skreq->src, + skreq->cryptlen); + + return 0; +} + +static int alloc_dst_sglist(struct skcipher_request *skreq, int ivsize) +{ + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); + int nents = sg_nents(skreq->dst) + 3; + int ret; + + /* Allocate buffer to hold ORH, COMPLETION and output scatterlist + * array + */ + ret = alloc_dst_req_buf(nkreq, nents); + if (ret) + return ret; + + nitrox_creq_set_orh(nkreq); + nitrox_creq_set_comp(nkreq); + nitrox_creq_set_dst_sg(nkreq, nents, ivsize, skreq->dst, + skreq->cryptlen); + + return 0; +} + +static void free_src_sglist(struct skcipher_request *skreq) +{ + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); + + kfree(nkreq->src); +} + +static void free_dst_sglist(struct skcipher_request *skreq) +{ + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); + + kfree(nkreq->dst); +} + +static void nitrox_skcipher_callback(void *arg, int err) +{ + struct skcipher_request *skreq = arg; + + free_src_sglist(skreq); + free_dst_sglist(skreq); + if (err) { + pr_err_ratelimited("request failed status 0x%0x\n", err); + err = -EINVAL; + } + + skcipher_request_complete(skreq, err); +} + +static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc) +{ + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq); + struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher); + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); + int ivsize = crypto_skcipher_ivsize(cipher); + struct se_crypto_request *creq; + int ret; + + creq = &nkreq->creq; + creq->flags = skreq->base.flags; + creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + + /* fill the request */ + creq->ctrl.value = 0; + creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC; + creq->ctrl.s.arg = (enc ? ENCRYPT : DECRYPT); + /* param0: length of the data to be encrypted */ + creq->gph.param0 = cpu_to_be16(skreq->cryptlen); + creq->gph.param1 = 0; + /* param2: encryption data offset */ + creq->gph.param2 = cpu_to_be16(ivsize); + creq->gph.param3 = 0; + + creq->ctx_handle = nctx->u.ctx_handle; + creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context); + + ret = alloc_src_sglist(skreq, ivsize); + if (ret) + return ret; + + ret = alloc_dst_sglist(skreq, ivsize); + if (ret) { + free_src_sglist(skreq); + return ret; + } + + /* send the crypto request */ + return nitrox_process_se_request(nctx->ndev, creq, + nitrox_skcipher_callback, skreq); +} + +static int nitrox_aes_encrypt(struct skcipher_request *skreq) +{ + return nitrox_skcipher_crypt(skreq, true); +} + +static int nitrox_aes_decrypt(struct skcipher_request *skreq) +{ + return nitrox_skcipher_crypt(skreq, false); +} + +static int nitrox_3des_setkey(struct crypto_skcipher *cipher, + const u8 *key, unsigned int keylen) +{ + if (keylen != DES3_EDE_KEY_SIZE) { + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + return nitrox_skcipher_setkey(cipher, 0, key, keylen); +} + +static int nitrox_3des_encrypt(struct skcipher_request *skreq) +{ + return nitrox_skcipher_crypt(skreq, true); +} + +static int nitrox_3des_decrypt(struct skcipher_request *skreq) +{ + return nitrox_skcipher_crypt(skreq, false); +} + +static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); + struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); + struct flexi_crypto_context *fctx; + int aes_keylen, ret; + + ret = xts_check_key(tfm, key, keylen); + if (ret) + return ret; + + keylen /= 2; + + aes_keylen = flexi_aes_keylen(keylen); + if (aes_keylen < 0) { + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + fctx = nctx->u.fctx; + /* copy KEY2 */ + memcpy(fctx->auth.u.key2, (key + keylen), keylen); + + return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); +} + +static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); + struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); + struct flexi_crypto_context *fctx; + int aes_keylen; + + if (keylen < CTR_RFC3686_NONCE_SIZE) + return -EINVAL; + + fctx = nctx->u.fctx; + + memcpy(fctx->crypto.iv, key + (keylen - CTR_RFC3686_NONCE_SIZE), + CTR_RFC3686_NONCE_SIZE); + + keylen -= CTR_RFC3686_NONCE_SIZE; + + aes_keylen = flexi_aes_keylen(keylen); + if (aes_keylen < 0) { + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); +} + +static struct skcipher_alg nitrox_skciphers[] = { { + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "n5_cbc(aes)", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = nitrox_aes_setkey, + .encrypt = nitrox_aes_encrypt, + .decrypt = nitrox_aes_decrypt, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, +}, { + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "n5_ecb(aes)", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = nitrox_aes_setkey, + .encrypt = nitrox_aes_encrypt, + .decrypt = nitrox_aes_decrypt, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, +}, { + .base = { + .cra_name = "cfb(aes)", + .cra_driver_name = "n5_cfb(aes)", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = nitrox_aes_setkey, + .encrypt = nitrox_aes_encrypt, + .decrypt = nitrox_aes_decrypt, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, +}, { + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "n5_xts(aes)", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = nitrox_aes_xts_setkey, + .encrypt = nitrox_aes_encrypt, + .decrypt = nitrox_aes_decrypt, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, +}, { + .base = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "n5_rfc3686(ctr(aes))", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, + .setkey = nitrox_aes_ctr_rfc3686_setkey, + .encrypt = nitrox_aes_encrypt, + .decrypt = nitrox_aes_decrypt, +}, { + .base = { + .cra_name = "cts(cbc(aes))", + .cra_driver_name = "n5_cts(cbc(aes))", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = nitrox_aes_setkey, + .encrypt = nitrox_aes_encrypt, + .decrypt = nitrox_aes_decrypt, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, +}, { + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "n5_cbc(des3_ede)", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = nitrox_3des_setkey, + .encrypt = nitrox_3des_encrypt, + .decrypt = nitrox_3des_decrypt, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, +}, { + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "n5_ecb(des3_ede)", + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = nitrox_3des_setkey, + .encrypt = nitrox_3des_encrypt, + .decrypt = nitrox_3des_decrypt, + .init = nitrox_skcipher_init, + .exit = nitrox_skcipher_exit, +} + +}; + +int nitrox_register_skciphers(void) +{ + return crypto_register_skciphers(nitrox_skciphers, + ARRAY_SIZE(nitrox_skciphers)); +} + +void nitrox_unregister_skciphers(void) +{ + crypto_unregister_skciphers(nitrox_skciphers, + ARRAY_SIZE(nitrox_skciphers)); +} diff --git a/drivers/crypto/cavium/nitrox/nitrox_sriov.c b/drivers/crypto/cavium/nitrox/nitrox_sriov.c index 30c0aa874583..bf439d8256ba 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_sriov.c +++ b/drivers/crypto/cavium/nitrox/nitrox_sriov.c @@ -6,7 +6,12 @@ #include "nitrox_hal.h" #include "nitrox_common.h" #include "nitrox_isr.h" +#include "nitrox_mbx.h" +/** + * num_vfs_valid - validate VF count + * @num_vfs: number of VF(s) + */ static inline bool num_vfs_valid(int num_vfs) { bool valid = false; @@ -48,7 +53,32 @@ static inline enum vf_mode num_vfs_to_mode(int num_vfs) return mode; } -static void pf_sriov_cleanup(struct nitrox_device *ndev) +static inline int vf_mode_to_nr_queues(enum vf_mode mode) +{ + int nr_queues = 0; + + switch (mode) { + case __NDEV_MODE_PF: + nr_queues = MAX_PF_QUEUES; + break; + case __NDEV_MODE_VF16: + nr_queues = 8; + break; + case __NDEV_MODE_VF32: + nr_queues = 4; + break; + case __NDEV_MODE_VF64: + nr_queues = 2; + break; + case __NDEV_MODE_VF128: + nr_queues = 1; + break; + } + + return nr_queues; +} + +static void nitrox_pf_cleanup(struct nitrox_device *ndev) { /* PF has no queues in SR-IOV mode */ atomic_set(&ndev->state, __NDEV_NOT_READY); @@ -60,7 +90,11 @@ static void pf_sriov_cleanup(struct nitrox_device *ndev) nitrox_common_sw_cleanup(ndev); } -static int pf_sriov_init(struct nitrox_device *ndev) +/** + * nitrox_pf_reinit - re-initialize PF resources once SR-IOV is disabled + * @ndev: NITROX device + */ +static int nitrox_pf_reinit(struct nitrox_device *ndev) { int err; @@ -86,6 +120,33 @@ static int pf_sriov_init(struct nitrox_device *ndev) return nitrox_crypto_register(); } +static void nitrox_sriov_cleanup(struct nitrox_device *ndev) +{ + /* unregister interrupts for PF in SR-IOV */ + nitrox_sriov_unregister_interrupts(ndev); + nitrox_mbox_cleanup(ndev); +} + +static int nitrox_sriov_init(struct nitrox_device *ndev) +{ + int ret; + + /* register interrupts for PF in SR-IOV */ + ret = nitrox_sriov_register_interupts(ndev); + if (ret) + return ret; + + ret = nitrox_mbox_init(ndev); + if (ret) + goto sriov_init_fail; + + return 0; + +sriov_init_fail: + nitrox_sriov_cleanup(ndev); + return ret; +} + static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs) { struct nitrox_device *ndev = pci_get_drvdata(pdev); @@ -106,17 +167,32 @@ static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs) } dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs); - ndev->num_vfs = num_vfs; ndev->mode = num_vfs_to_mode(num_vfs); + ndev->iov.num_vfs = num_vfs; + ndev->iov.max_vf_queues = vf_mode_to_nr_queues(ndev->mode); /* set bit in flags */ set_bit(__NDEV_SRIOV_BIT, &ndev->flags); /* cleanup PF resources */ - pf_sriov_cleanup(ndev); + nitrox_pf_cleanup(ndev); - config_nps_core_vfcfg_mode(ndev, ndev->mode); + /* PF SR-IOV mode initialization */ + err = nitrox_sriov_init(ndev); + if (err) + goto iov_fail; + config_nps_core_vfcfg_mode(ndev, ndev->mode); return num_vfs; + +iov_fail: + pci_disable_sriov(pdev); + /* clear bit in flags */ + clear_bit(__NDEV_SRIOV_BIT, &ndev->flags); + ndev->iov.num_vfs = 0; + ndev->mode = __NDEV_MODE_PF; + /* reset back to working mode in PF */ + nitrox_pf_reinit(ndev); + return err; } static int nitrox_sriov_disable(struct pci_dev *pdev) @@ -134,12 +210,16 @@ static int nitrox_sriov_disable(struct pci_dev *pdev) /* clear bit in flags */ clear_bit(__NDEV_SRIOV_BIT, &ndev->flags); - ndev->num_vfs = 0; + ndev->iov.num_vfs = 0; + ndev->iov.max_vf_queues = 0; ndev->mode = __NDEV_MODE_PF; + /* cleanup PF SR-IOV resources */ + nitrox_sriov_cleanup(ndev); + config_nps_core_vfcfg_mode(ndev, ndev->mode); - return pf_sriov_init(ndev); + return nitrox_pf_reinit(ndev); } int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs) diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c index be055b9547f6..a8447a3cf366 100644 --- a/drivers/crypto/cavium/zip/zip_main.c +++ b/drivers/crypto/cavium/zip/zip_main.c @@ -351,6 +351,7 @@ static struct pci_driver zip_driver = { static struct crypto_alg zip_comp_deflate = { .cra_name = "deflate", + .cra_driver_name = "deflate-cavium", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_ctxsize = sizeof(struct zip_kernel_ctx), .cra_priority = 300, @@ -365,6 +366,7 @@ static struct crypto_alg zip_comp_deflate = { static struct crypto_alg zip_comp_lzs = { .cra_name = "lzs", + .cra_driver_name = "lzs-cavium", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_ctxsize = sizeof(struct zip_kernel_ctx), .cra_priority = 300, @@ -384,7 +386,7 @@ static struct scomp_alg zip_scomp_deflate = { .decompress = zip_scomp_decompress, .base = { .cra_name = "deflate", - .cra_driver_name = "deflate-scomp", + .cra_driver_name = "deflate-scomp-cavium", .cra_module = THIS_MODULE, .cra_priority = 300, } @@ -397,7 +399,7 @@ static struct scomp_alg zip_scomp_lzs = { .decompress = zip_scomp_decompress, .base = { .cra_name = "lzs", - .cra_driver_name = "lzs-scomp", + .cra_driver_name = "lzs-scomp-cavium", .cra_module = THIS_MODULE, .cra_priority = 300, } @@ -618,41 +620,23 @@ static const struct file_operations zip_regs_fops = { /* Root directory for thunderx_zip debugfs entry */ static struct dentry *zip_debugfs_root; -static int __init zip_debugfs_init(void) +static void __init zip_debugfs_init(void) { - struct dentry *zip_stats, *zip_clear, *zip_regs; - if (!debugfs_initialized()) - return -ENODEV; + return; zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL); - if (!zip_debugfs_root) - return -ENOMEM; /* Creating files for entries inside thunderx_zip directory */ - zip_stats = debugfs_create_file("zip_stats", 0444, - zip_debugfs_root, - NULL, &zip_stats_fops); - if (!zip_stats) - goto failed_to_create; - - zip_clear = debugfs_create_file("zip_clear", 0444, - zip_debugfs_root, - NULL, &zip_clear_fops); - if (!zip_clear) - goto failed_to_create; - - zip_regs = debugfs_create_file("zip_regs", 0444, - zip_debugfs_root, - NULL, &zip_regs_fops); - if (!zip_regs) - goto failed_to_create; + debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL, + &zip_stats_fops); - return 0; + debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL, + &zip_clear_fops); + + debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL, + &zip_regs_fops); -failed_to_create: - debugfs_remove_recursive(zip_debugfs_root); - return -ENOENT; } static void __exit zip_debugfs_exit(void) @@ -661,13 +645,8 @@ static void __exit zip_debugfs_exit(void) } #else -static int __init zip_debugfs_init(void) -{ - return 0; -} - +static void __init zip_debugfs_init(void) { } static void __exit zip_debugfs_exit(void) { } - #endif /* debugfs - end */ @@ -691,17 +670,10 @@ static int __init zip_init_module(void) } /* comp-decomp statistics are handled with debugfs interface */ - ret = zip_debugfs_init(); - if (ret < 0) { - zip_err("ZIP: debugfs initialization failed\n"); - goto err_crypto_unregister; - } + zip_debugfs_init(); return ret; -err_crypto_unregister: - zip_unregister_compression_device(); - err_pci_unregister: pci_unregister_driver(&zip_driver); return ret; diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index 3c6fe57f91f8..f6e252c1d6fb 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * @@ -346,9 +346,7 @@ static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm) crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx)); - cipher_tfm = crypto_alloc_cipher("aes", 0, - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK); + cipher_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(cipher_tfm)) { pr_warn("could not load aes cipher driver\n"); return PTR_ERR(cipher_tfm); diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c index ae87b741f9d5..c2ff551d215b 100644 --- a/drivers/crypto/ccp/ccp-crypto-des3.c +++ b/drivers/crypto/ccp/ccp-crypto-des3.c @@ -57,7 +57,7 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && - (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 2ca64bb57d2e..10a61cd54fce 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) SHA crypto API support * - * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Gary R Hook <gary.hook@amd.com> diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c index 1a734bd2070a..4bd26af7098d 100644 --- a/drivers/crypto/ccp/ccp-debugfs.c +++ b/drivers/crypto/ccp/ccp-debugfs.c @@ -286,10 +286,7 @@ void ccp5_debugfs_setup(struct ccp_device *ccp) { struct ccp_cmd_queue *cmd_q; char name[MAX_NAME_LEN + 1]; - struct dentry *debugfs_info; - struct dentry *debugfs_stats; struct dentry *debugfs_q_instance; - struct dentry *debugfs_q_stats; int i; if (!debugfs_initialized()) @@ -299,24 +296,14 @@ void ccp5_debugfs_setup(struct ccp_device *ccp) if (!ccp_debugfs_dir) ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); mutex_unlock(&ccp_debugfs_lock); - if (!ccp_debugfs_dir) - return; ccp->debugfs_instance = debugfs_create_dir(ccp->name, ccp_debugfs_dir); - if (!ccp->debugfs_instance) - goto err; - debugfs_info = debugfs_create_file("info", 0400, - ccp->debugfs_instance, ccp, - &ccp_debugfs_info_ops); - if (!debugfs_info) - goto err; + debugfs_create_file("info", 0400, ccp->debugfs_instance, ccp, + &ccp_debugfs_info_ops); - debugfs_stats = debugfs_create_file("stats", 0600, - ccp->debugfs_instance, ccp, - &ccp_debugfs_stats_ops); - if (!debugfs_stats) - goto err; + debugfs_create_file("stats", 0600, ccp->debugfs_instance, ccp, + &ccp_debugfs_stats_ops); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; @@ -325,21 +312,12 @@ void ccp5_debugfs_setup(struct ccp_device *ccp) debugfs_q_instance = debugfs_create_dir(name, ccp->debugfs_instance); - if (!debugfs_q_instance) - goto err; - - debugfs_q_stats = - debugfs_create_file("stats", 0600, - debugfs_q_instance, cmd_q, - &ccp_debugfs_queue_ops); - if (!debugfs_q_stats) - goto err; + + debugfs_create_file("stats", 0600, debugfs_q_instance, cmd_q, + &ccp_debugfs_queue_ops); } return; - -err: - debugfs_remove_recursive(ccp->debugfs_instance); } void ccp5_debugfs_destroy(void) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 44a4d2779b15..c9bfd4f439ce 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -822,9 +822,9 @@ static int ccp5_init(struct ccp_device *ccp) /* Page alignment satisfies our needs for N <= 128 */ BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); - cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize, - &cmd_q->qbase_dma, - GFP_KERNEL); + cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize, + &cmd_q->qbase_dma, + GFP_KERNEL); if (!cmd_q->qbase) { dev_err(dev, "unable to allocate command queue\n"); ret = -ENOMEM; diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 0ea43cdeb05f..267a367bd076 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Gary R Hook <gary.hook@amd.com> diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index d64a78ccc03e..fadf859a14b8 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -1,7 +1,7 @@ /* * AMD Platform Security Processor (PSP) interface * - * Copyright (C) 2016-2017 Advanced Micro Devices, Inc. + * Copyright (C) 2016,2018 Advanced Micro Devices, Inc. * * Author: Brijesh Singh <brijesh.singh@amd.com> * @@ -364,7 +364,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp) goto cmd; /* allocate a physically contiguous buffer to store the CSR blob */ - if (!access_ok(VERIFY_WRITE, input.address, input.length) || + if (!access_ok(input.address, input.length) || input.length > SEV_FW_BLOB_MAX_SIZE) { ret = -EFAULT; goto e_free; @@ -437,6 +437,7 @@ static int sev_get_api_version(void) psp_master->api_major = status->api_major; psp_master->api_minor = status->api_minor; psp_master->build = status->build; + psp_master->sev_state = status->state; return 0; } @@ -644,14 +645,14 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp) /* Allocate a physically contiguous buffer to store the PDH blob. */ if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) || - !access_ok(VERIFY_WRITE, input.pdh_cert_address, input.pdh_cert_len)) { + !access_ok(input.pdh_cert_address, input.pdh_cert_len)) { ret = -EFAULT; goto e_free; } /* Allocate a physically contiguous buffer to store the cert chain blob. */ if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) || - !access_ok(VERIFY_WRITE, input.cert_chain_address, input.cert_chain_len)) { + !access_ok(input.cert_chain_address, input.cert_chain_len)) { ret = -EFAULT; goto e_free; } @@ -857,15 +858,15 @@ static int sev_misc_init(struct psp_device *psp) return 0; } -static int sev_init(struct psp_device *psp) +static int psp_check_sev_support(struct psp_device *psp) { /* Check if device supports SEV feature */ if (!(ioread32(psp->io_regs + psp->vdata->feature_reg) & 1)) { - dev_dbg(psp->dev, "device does not support SEV\n"); - return 1; + dev_dbg(psp->dev, "psp does not support SEV\n"); + return -ENODEV; } - return sev_misc_init(psp); + return 0; } int psp_dev_init(struct sp_device *sp) @@ -890,6 +891,10 @@ int psp_dev_init(struct sp_device *sp) psp->io_regs = sp->io_map; + ret = psp_check_sev_support(psp); + if (ret) + goto e_disable; + /* Disable and clear interrupts until ready */ iowrite32(0, psp->io_regs + psp->vdata->inten_reg); iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg); @@ -901,7 +906,7 @@ int psp_dev_init(struct sp_device *sp) goto e_err; } - ret = sev_init(psp); + ret = sev_misc_init(psp); if (ret) goto e_irq; @@ -923,6 +928,11 @@ e_err: dev_notice(dev, "psp initialization failed\n"); return ret; + +e_disable: + sp->psp_data = NULL; + + return ret; } void psp_dev_destroy(struct sp_device *sp) @@ -964,6 +974,21 @@ void psp_pci_init(void) if (sev_get_api_version()) goto err; + /* + * If platform is not in UNINIT state then firmware upgrade and/or + * platform INIT command will fail. These command require UNINIT state. + * + * In a normal boot we should never run into case where the firmware + * is not in UNINIT state on boot. But in case of kexec boot, a reboot + * may not go through a typical shutdown sequence and may leave the + * firmware in INIT or WORKING state. + */ + + if (psp_master->sev_state != SEV_STATE_UNINIT) { + sev_platform_shutdown(NULL); + psp_master->sev_state = SEV_STATE_UNINIT; + } + if (SEV_VERSION_GREATER_OR_EQUAL(0, 15) && sev_update_firmware(psp_master->dev) == 0) sev_get_api_version(); diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index 8b53a9674ecb..f5afeccf42a1 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -1,7 +1,7 @@ /* * AMD Platform Security Processor (PSP) interface driver * - * Copyright (C) 2017 Advanced Micro Devices, Inc. + * Copyright (C) 2017-2018 Advanced Micro Devices, Inc. * * Author: Brijesh Singh <brijesh.singh@amd.com> * diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c index e0459002eb71..b2879767fc98 100644 --- a/drivers/crypto/ccp/sp-dev.c +++ b/drivers/crypto/ccp/sp-dev.c @@ -1,7 +1,7 @@ /* * AMD Secure Processor driver * - * Copyright (C) 2017 Advanced Micro Devices, Inc. + * Copyright (C) 2017-2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Gary R Hook <gary.hook@amd.com> diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 14398cad1625..5b0790025db3 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -1,7 +1,7 @@ /* * AMD Secure Processor driver * - * Copyright (C) 2017 Advanced Micro Devices, Inc. + * Copyright (C) 2017-2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Gary R Hook <gary.hook@amd.com> diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 7da93e9bebed..41bce0a3f4bb 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -1,7 +1,7 @@ /* * AMD Secure Processor device driver * - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Gary R Hook <gary.hook@amd.com> @@ -226,8 +226,6 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto e_err; - dev_notice(dev, "enabled\n"); - return 0; e_err: @@ -246,8 +244,6 @@ static void sp_pci_remove(struct pci_dev *pdev) sp_destroy(sp); sp_free_irqs(sp); - - dev_notice(dev, "disabled\n"); } #ifdef CONFIG_PM diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c index b75dc7db2d4a..d24228efbaaa 100644 --- a/drivers/crypto/ccp/sp-platform.c +++ b/drivers/crypto/ccp/sp-platform.c @@ -1,7 +1,7 @@ /* * AMD Secure Processor device driver * - * Copyright (C) 2014,2016 Advanced Micro Devices, Inc. + * Copyright (C) 2014,2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index 01b82b82f8b8..a3527c00b29a 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -58,6 +58,7 @@ struct cc_aead_ctx { unsigned int enc_keylen; unsigned int auth_keylen; unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */ + unsigned int hash_len; enum drv_cipher_mode cipher_mode; enum cc_flow_mode flow_mode; enum drv_hash_mode auth_mode; @@ -122,6 +123,13 @@ static void cc_aead_exit(struct crypto_aead *tfm) } } +static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm) +{ + struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); + + return cc_get_default_hash_len(ctx->drvdata); +} + static int cc_aead_init(struct crypto_aead *tfm) { struct aead_alg *alg = crypto_aead_alg(tfm); @@ -196,6 +204,7 @@ static int cc_aead_init(struct crypto_aead *tfm) ctx->auth_state.hmac.ipad_opad = NULL; ctx->auth_state.hmac.padded_authkey = NULL; } + ctx->hash_len = cc_get_aead_hash_len(tfm); return 0; @@ -327,7 +336,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx) /* Load the hash current length*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); - set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz); + set_din_const(&desc[idx], 0, ctx->hash_len); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; @@ -465,7 +474,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, /* Load the hash current length*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hashmode); - set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz); + set_din_const(&desc[idx], 0, ctx->hash_len); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); @@ -540,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct rtattr *rta = (struct rtattr *)key; struct cc_crypto_req cc_req = {}; - struct crypto_authenc_key_param *param; struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; - int rc = -EINVAL; unsigned int seq_len = 0; struct device *dev = drvdata_to_dev(ctx->drvdata); + const u8 *enckey, *authkey; + int rc; dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); @@ -554,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, /* STAT_PHASE_0: Init and sanity checks */ if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ - if (!RTA_OK(rta, keylen)) - goto badkey; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - if (RTA_PAYLOAD(rta) < sizeof(*param)) - goto badkey; - param = RTA_DATA(rta); - ctx->enc_keylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - if (keylen < ctx->enc_keylen) + struct crypto_authenc_keys keys; + + rc = crypto_authenc_extractkeys(&keys, key, keylen); + if (rc) goto badkey; - ctx->auth_keylen = keylen - ctx->enc_keylen; + enckey = keys.enckey; + authkey = keys.authkey; + ctx->enc_keylen = keys.enckeylen; + ctx->auth_keylen = keys.authkeylen; if (ctx->cipher_mode == DRV_CIPHER_CTR) { /* the nonce is stored in bytes at end of key */ + rc = -EINVAL; if (ctx->enc_keylen < (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) goto badkey; /* Copy nonce from last 4 bytes in CTR key to * first 4 bytes in CTR IV */ - memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + - ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, - CTR_RFC3686_NONCE_SIZE); + memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen - + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); /* Set CTR key size */ ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; } } else { /* non-authenc - has just one key */ + enckey = key; + authkey = NULL; ctx->enc_keylen = keylen; ctx->auth_keylen = 0; } @@ -594,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, /* STAT_PHASE_1: Copy key to ctx */ /* Get key material */ - memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); + memcpy(ctx->enckey, enckey, ctx->enc_keylen); if (ctx->enc_keylen == 24) memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { - memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); + memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey, + ctx->auth_keylen); } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ - rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); + rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); if (rc) goto badkey; } @@ -1001,7 +1008,7 @@ static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[], hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode), - ctx->drvdata->hash_len_sz); + ctx->hash_len); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; @@ -1098,7 +1105,7 @@ static void cc_proc_scheme_desc(struct aead_request *req, hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr, - ctx->drvdata->hash_len_sz); + ctx->hash_len); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE1); set_cipher_do(&desc[idx], DO_PAD); @@ -1128,7 +1135,7 @@ static void cc_proc_scheme_desc(struct aead_request *req, hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode), - ctx->drvdata->hash_len_sz); + ctx->hash_len); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); @@ -2358,6 +2365,7 @@ static struct cc_alg_template aead_algs[] = { .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_SHA1, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "authenc(hmac(sha1),cbc(des3_ede))", @@ -2377,6 +2385,7 @@ static struct cc_alg_template aead_algs[] = { .flow_mode = S_DIN_to_DES, .auth_mode = DRV_HASH_SHA1, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "authenc(hmac(sha256),cbc(aes))", @@ -2396,6 +2405,7 @@ static struct cc_alg_template aead_algs[] = { .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_SHA256, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "authenc(hmac(sha256),cbc(des3_ede))", @@ -2415,6 +2425,7 @@ static struct cc_alg_template aead_algs[] = { .flow_mode = S_DIN_to_DES, .auth_mode = DRV_HASH_SHA256, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "authenc(xcbc(aes),cbc(aes))", @@ -2434,6 +2445,7 @@ static struct cc_alg_template aead_algs[] = { .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_XCBC_MAC, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", @@ -2453,6 +2465,7 @@ static struct cc_alg_template aead_algs[] = { .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_SHA1, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", @@ -2472,6 +2485,7 @@ static struct cc_alg_template aead_algs[] = { .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_SHA256, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))", @@ -2491,6 +2505,7 @@ static struct cc_alg_template aead_algs[] = { .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_XCBC_MAC, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "ccm(aes)", @@ -2510,6 +2525,7 @@ static struct cc_alg_template aead_algs[] = { .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_NULL, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "rfc4309(ccm(aes))", @@ -2529,6 +2545,7 @@ static struct cc_alg_template aead_algs[] = { .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_NULL, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "gcm(aes)", @@ -2548,6 +2565,7 @@ static struct cc_alg_template aead_algs[] = { .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_NULL, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "rfc4106(gcm(aes))", @@ -2567,6 +2585,7 @@ static struct cc_alg_template aead_algs[] = { .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_NULL, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "rfc4543(gcm(aes))", @@ -2586,6 +2605,7 @@ static struct cc_alg_template aead_algs[] = { .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_NULL, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, }; @@ -2670,7 +2690,8 @@ int cc_aead_alloc(struct cc_drvdata *drvdata) /* Linux crypto */ for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) { - if (aead_algs[alg].min_hw_rev > drvdata->hw_rev) + if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) || + !(drvdata->std_bodies & aead_algs[alg].std_body)) continue; t_alg = cc_create_aead_alg(&aead_algs[alg], dev); diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index dd948e1df9e5..0ee1c52da0a4 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -156,8 +156,11 @@ static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma, /* Verify there is no memory overflow*/ new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1); - if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) + if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) { + dev_err(dev, "Too many mlli entries. current %d max %d\n", + new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES); return -ENOMEM; + } /*handle buffer longer than 64 kbytes */ while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) { @@ -511,10 +514,8 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, /* Map the src SGL */ rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); - if (rc) { - rc = -ENOMEM; + if (rc) goto cipher_exit; - } if (mapped_nents > 1) req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; @@ -528,12 +529,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, } } else { /* Map the dst sg */ - if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, - &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, - &dummy, &mapped_nents)) { - rc = -ENOMEM; + rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, + &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, + &dummy, &mapped_nents); + if (rc) goto cipher_exit; - } if (mapped_nents > 1) req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; @@ -614,10 +614,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) hw_iv_size, DMA_BIDIRECTIONAL); } - /*In case a pool was set, a table was - *allocated and should be released - */ - if (areq_ctx->mlli_params.curr_pool) { + /* Release pool */ + if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || + areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) && + (areq_ctx->mlli_params.mlli_virt_addr)) { dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", &areq_ctx->mlli_params.mlli_dma_addr, areq_ctx->mlli_params.mlli_virt_addr); @@ -1078,10 +1078,8 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, &areq_ctx->dst.nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, &dst_mapped_nents); - if (rc) { - rc = -ENOMEM; + if (rc) goto chain_data_exit; - } } dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, @@ -1235,11 +1233,10 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) } areq_ctx->ccm_iv0_dma_addr = dma_addr; - if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, - &sg_data, req->assoclen)) { - rc = -ENOMEM; + rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, + &sg_data, req->assoclen); + if (rc) goto aead_map_failure; - } } if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { @@ -1299,10 +1296,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES), &dummy, &mapped_nents); - if (rc) { - rc = -ENOMEM; + if (rc) goto aead_map_failure; - } if (areq_ctx->is_single_pass) { /* @@ -1386,6 +1381,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, struct mlli_params *mlli_params = &areq_ctx->mlli_params; struct buffer_array sg_data; struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; + int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; @@ -1405,18 +1401,18 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, /*TODO: copy data in case that buffer is enough for operation */ /* map the previous buffer */ if (*curr_buff_cnt) { - if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, - &sg_data)) { - return -ENOMEM; - } + rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, + &sg_data); + if (rc) + return rc; } if (src && nbytes > 0 && do_update) { - if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE, - &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, - &dummy, &mapped_nents)) { + rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE, + &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, + &dummy, &mapped_nents); + if (rc) goto unmap_curr_buff; - } if (src && mapped_nents == 1 && areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { memcpy(areq_ctx->buff_sg, src, @@ -1435,7 +1431,8 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, /* add the src data to the sg_data */ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, 0, true, &areq_ctx->mlli_nents); - if (cc_generate_mlli(dev, &sg_data, mlli_params, flags)) + rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); + if (rc) goto fail_unmap_din; } /* change the buffer index for the unmap function */ @@ -1451,7 +1448,7 @@ unmap_curr_buff: if (*curr_buff_cnt) dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); - return -ENOMEM; + return rc; } int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, @@ -1470,6 +1467,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, struct buffer_array sg_data; struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; unsigned int swap_index = 0; + int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; @@ -1514,21 +1512,21 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, } if (*curr_buff_cnt) { - if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, - &sg_data)) { - return -ENOMEM; - } + rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, + &sg_data); + if (rc) + return rc; /* change the buffer index for next operation */ swap_index = 1; } if (update_data_len > *curr_buff_cnt) { - if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt), - DMA_TO_DEVICE, &areq_ctx->in_nents, - LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, - &mapped_nents)) { + rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt), + DMA_TO_DEVICE, &areq_ctx->in_nents, + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, + &mapped_nents); + if (rc) goto unmap_curr_buff; - } if (mapped_nents == 1 && areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { /* only one entry in the SG and no previous data */ @@ -1548,7 +1546,8 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, (update_data_len - *curr_buff_cnt), 0, true, &areq_ctx->mlli_nents); - if (cc_generate_mlli(dev, &sg_data, mlli_params, flags)) + rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); + if (rc) goto fail_unmap_din; } areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); @@ -1562,7 +1561,7 @@ unmap_curr_buff: if (*curr_buff_cnt) dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); - return -ENOMEM; + return rc; } void cc_unmap_hash_request(struct device *dev, void *ctx, diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 7623b29911af..d9c17078517b 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -7,6 +7,7 @@ #include <crypto/internal/skcipher.h> #include <crypto/des.h> #include <crypto/xts.h> +#include <crypto/sm4.h> #include <crypto/scatterwalk.h> #include "cc_driver.h" @@ -79,10 +80,14 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size) default: break; } + break; case S_DIN_to_DES: if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE) return 0; break; + case S_DIN_to_SM4: + if (size == SM4_KEY_SIZE) + return 0; default: break; } @@ -122,6 +127,17 @@ static int validate_data_size(struct cc_cipher_ctx *ctx_p, if (IS_ALIGNED(size, DES_BLOCK_SIZE)) return 0; break; + case S_DIN_to_SM4: + switch (ctx_p->cipher_mode) { + case DRV_CIPHER_CTR: + return 0; + case DRV_CIPHER_ECB: + case DRV_CIPHER_CBC: + if (IS_ALIGNED(size, SM4_BLOCK_SIZE)) + return 0; + default: + break; + } default: break; } @@ -337,7 +353,8 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, dev_dbg(dev, "weak 3DES key"); return -EINVAL; } else if (!des_ekey(tmp, key) && - (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) { + (crypto_tfm_get_flags(tfm) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; dev_dbg(dev, "weak DES key"); return -EINVAL; @@ -522,6 +539,9 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm, case S_DIN_to_DES: flow_mode = DIN_DES_DOUT; break; + case S_DIN_to_SM4: + flow_mode = DIN_SM4_DOUT; + break; default: dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode); return; @@ -634,6 +654,8 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err) unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm); unsigned int len; + cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); + switch (ctx_p->cipher_mode) { case DRV_CIPHER_CBC: /* @@ -663,7 +685,6 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err) break; } - cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); kzfree(req_ctx->iv); skcipher_request_complete(req, err); @@ -781,7 +802,8 @@ static int cc_cipher_decrypt(struct skcipher_request *req) memset(req_ctx, 0, sizeof(*req_ctx)); - if (ctx_p->cipher_mode == DRV_CIPHER_CBC) { + if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) && + (req->cryptlen >= ivsize)) { /* Allocate and save the last IV sized bytes of the source, * which will be lost in case of in-place decryption. @@ -815,6 +837,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_XTS, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "xts512(paes)", @@ -832,6 +855,7 @@ static const struct cc_alg_template skcipher_algs[] = { .flow_mode = S_DIN_to_AES, .data_unit = 512, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "xts4096(paes)", @@ -849,6 +873,7 @@ static const struct cc_alg_template skcipher_algs[] = { .flow_mode = S_DIN_to_AES, .data_unit = 4096, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "essiv(paes)", @@ -865,6 +890,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_ESSIV, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "essiv512(paes)", @@ -882,6 +908,7 @@ static const struct cc_alg_template skcipher_algs[] = { .flow_mode = S_DIN_to_AES, .data_unit = 512, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "essiv4096(paes)", @@ -899,6 +926,7 @@ static const struct cc_alg_template skcipher_algs[] = { .flow_mode = S_DIN_to_AES, .data_unit = 4096, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "bitlocker(paes)", @@ -915,6 +943,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_BITLOCKER, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "bitlocker512(paes)", @@ -932,6 +961,7 @@ static const struct cc_alg_template skcipher_algs[] = { .flow_mode = S_DIN_to_AES, .data_unit = 512, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "bitlocker4096(paes)", @@ -949,6 +979,7 @@ static const struct cc_alg_template skcipher_algs[] = { .flow_mode = S_DIN_to_AES, .data_unit = 4096, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "ecb(paes)", @@ -965,6 +996,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_ECB, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "cbc(paes)", @@ -981,6 +1013,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_CBC, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "ofb(paes)", @@ -997,6 +1030,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_OFB, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "cts(cbc(paes))", @@ -1013,6 +1047,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_CBC_CTS, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "ctr(paes)", @@ -1029,6 +1064,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_CTR, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "xts(aes)", @@ -1045,6 +1081,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_XTS, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "xts512(aes)", @@ -1062,6 +1099,7 @@ static const struct cc_alg_template skcipher_algs[] = { .flow_mode = S_DIN_to_AES, .data_unit = 512, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "xts4096(aes)", @@ -1079,6 +1117,7 @@ static const struct cc_alg_template skcipher_algs[] = { .flow_mode = S_DIN_to_AES, .data_unit = 4096, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "essiv(aes)", @@ -1095,6 +1134,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_ESSIV, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "essiv512(aes)", @@ -1112,6 +1152,7 @@ static const struct cc_alg_template skcipher_algs[] = { .flow_mode = S_DIN_to_AES, .data_unit = 512, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "essiv4096(aes)", @@ -1129,6 +1170,7 @@ static const struct cc_alg_template skcipher_algs[] = { .flow_mode = S_DIN_to_AES, .data_unit = 4096, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "bitlocker(aes)", @@ -1145,6 +1187,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_BITLOCKER, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "bitlocker512(aes)", @@ -1162,6 +1205,7 @@ static const struct cc_alg_template skcipher_algs[] = { .flow_mode = S_DIN_to_AES, .data_unit = 512, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "bitlocker4096(aes)", @@ -1179,6 +1223,7 @@ static const struct cc_alg_template skcipher_algs[] = { .flow_mode = S_DIN_to_AES, .data_unit = 4096, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "ecb(aes)", @@ -1195,6 +1240,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_ECB, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "cbc(aes)", @@ -1211,6 +1257,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_CBC, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "ofb(aes)", @@ -1227,6 +1274,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_OFB, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "cts(cbc(aes))", @@ -1243,6 +1291,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_CBC_CTS, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "ctr(aes)", @@ -1259,6 +1308,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_CTR, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "cbc(des3_ede)", @@ -1275,6 +1325,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_CBC, .flow_mode = S_DIN_to_DES, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "ecb(des3_ede)", @@ -1291,6 +1342,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_ECB, .flow_mode = S_DIN_to_DES, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "cbc(des)", @@ -1307,6 +1359,7 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_CBC, .flow_mode = S_DIN_to_DES, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "ecb(des)", @@ -1323,6 +1376,58 @@ static const struct cc_alg_template skcipher_algs[] = { .cipher_mode = DRV_CIPHER_ECB, .flow_mode = S_DIN_to_DES, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, + }, + { + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-ccree", + .blocksize = SM4_BLOCK_SIZE, + .template_skcipher = { + .setkey = cc_cipher_setkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_CBC, + .flow_mode = S_DIN_to_SM4, + .min_hw_rev = CC_HW_REV_713, + .std_body = CC_STD_OSCCA, + }, + { + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-ccree", + .blocksize = SM4_BLOCK_SIZE, + .template_skcipher = { + .setkey = cc_cipher_setkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = 0, + }, + .cipher_mode = DRV_CIPHER_ECB, + .flow_mode = S_DIN_to_SM4, + .min_hw_rev = CC_HW_REV_713, + .std_body = CC_STD_OSCCA, + }, + { + .name = "ctr(sm4)", + .driver_name = "ctr-sm4-ccree", + .blocksize = SM4_BLOCK_SIZE, + .template_skcipher = { + .setkey = cc_cipher_setkey, + .encrypt = cc_cipher_encrypt, + .decrypt = cc_cipher_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + }, + .cipher_mode = DRV_CIPHER_CTR, + .flow_mode = S_DIN_to_SM4, + .min_hw_rev = CC_HW_REV_713, + .std_body = CC_STD_OSCCA, }, }; @@ -1398,7 +1503,8 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata) dev_dbg(dev, "Number of algorithms = %zu\n", ARRAY_SIZE(skcipher_algs)); for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) { - if (skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) + if ((skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) || + !(drvdata->std_bodies & skcipher_algs[alg].std_body)) continue; dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name); diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h index e032544f4e31..c8dac273c563 100644 --- a/drivers/crypto/ccree/cc_crypto_ctx.h +++ b/drivers/crypto/ccree/cc_crypto_ctx.h @@ -115,7 +115,8 @@ enum drv_hash_mode { DRV_HASH_CBC_MAC = 6, DRV_HASH_XCBC_MAC = 7, DRV_HASH_CMAC = 8, - DRV_HASH_MODE_NUM = 9, + DRV_HASH_SM3 = 9, + DRV_HASH_MODE_NUM = 10, DRV_HASH_RESERVE32B = S32_MAX }; @@ -127,6 +128,7 @@ enum drv_hash_hw_mode { DRV_HASH_HW_SHA512 = 4, DRV_HASH_HW_SHA384 = 12, DRV_HASH_HW_GHASH = 6, + DRV_HASH_HW_SM3 = 14, DRV_HASH_HW_RESERVE32B = S32_MAX }; diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c index 5ca184e42483..5fa05a7bcf36 100644 --- a/drivers/crypto/ccree/cc_debugfs.c +++ b/drivers/crypto/ccree/cc_debugfs.c @@ -39,11 +39,9 @@ static struct debugfs_reg32 debug_regs[] = { CC_DEBUG_REG(AXIM_MON_COMP), }; -int __init cc_debugfs_global_init(void) +void __init cc_debugfs_global_init(void) { cc_debugfs_dir = debugfs_create_dir("ccree", NULL); - - return !cc_debugfs_dir; } void __exit cc_debugfs_global_fini(void) @@ -56,7 +54,6 @@ int cc_debugfs_init(struct cc_drvdata *drvdata) struct device *dev = drvdata_to_dev(drvdata); struct cc_debugfs_ctx *ctx; struct debugfs_regset32 *regset; - struct dentry *file; debug_regs[0].offset = drvdata->sig_offset; debug_regs[1].offset = drvdata->ver_offset; @@ -74,22 +71,9 @@ int cc_debugfs_init(struct cc_drvdata *drvdata) regset->base = drvdata->cc_base; ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir); - if (!ctx->dir) - return -ENFILE; - - file = debugfs_create_regset32("regs", 0400, ctx->dir, regset); - if (!file) { - debugfs_remove(ctx->dir); - return -ENFILE; - } - file = debugfs_create_bool("coherent", 0400, ctx->dir, - &drvdata->coherent); - - if (!file) { - debugfs_remove_recursive(ctx->dir); - return -ENFILE; - } + debugfs_create_regset32("regs", 0400, ctx->dir, regset); + debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent); drvdata->debugfs = ctx; diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h index 5b5320eca7d2..01cbd9a95659 100644 --- a/drivers/crypto/ccree/cc_debugfs.h +++ b/drivers/crypto/ccree/cc_debugfs.h @@ -5,7 +5,7 @@ #define __CC_DEBUGFS_H__ #ifdef CONFIG_DEBUG_FS -int cc_debugfs_global_init(void); +void cc_debugfs_global_init(void); void cc_debugfs_global_fini(void); int cc_debugfs_init(struct cc_drvdata *drvdata); @@ -13,11 +13,7 @@ void cc_debugfs_fini(struct cc_drvdata *drvdata); #else -static inline int cc_debugfs_global_init(void) -{ - return 0; -} - +static inline void cc_debugfs_global_init(void) {} static inline void cc_debugfs_global_fini(void) {} static inline int cc_debugfs_init(struct cc_drvdata *drvdata) diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 1ff229c2aeab..3bcc6c76e090 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -39,23 +39,38 @@ struct cc_hw_data { char *name; enum cc_hw_rev rev; u32 sig; + int std_bodies; }; /* Hardware revisions defs. */ +/* The 703 is a OSCCA only variant of the 713 */ +static const struct cc_hw_data cc703_hw = { + .name = "703", .rev = CC_HW_REV_713, .std_bodies = CC_STD_OSCCA +}; + +static const struct cc_hw_data cc713_hw = { + .name = "713", .rev = CC_HW_REV_713, .std_bodies = CC_STD_ALL +}; + static const struct cc_hw_data cc712_hw = { - .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U + .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U, + .std_bodies = CC_STD_ALL }; static const struct cc_hw_data cc710_hw = { - .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U + .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U, + .std_bodies = CC_STD_ALL }; static const struct cc_hw_data cc630p_hw = { - .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U + .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U, + .std_bodies = CC_STD_ALL }; static const struct of_device_id arm_ccree_dev_of_match[] = { + { .compatible = "arm,cryptocell-703-ree", .data = &cc703_hw }, + { .compatible = "arm,cryptocell-713-ree", .data = &cc713_hw }, { .compatible = "arm,cryptocell-712-ree", .data = &cc712_hw }, { .compatible = "arm,cryptocell-710-ree", .data = &cc710_hw }, { .compatible = "arm,cryptocell-630p-ree", .data = &cc630p_hw }, @@ -88,10 +103,10 @@ static irqreturn_t cc_isr(int irq, void *dev_id) /* read the interrupt status */ irr = cc_ioread(drvdata, CC_REG(HOST_IRR)); dev_dbg(dev, "Got IRR=0x%08X\n", irr); - if (irr == 0) { /* Probably shared interrupt line */ - dev_err(dev, "Got interrupt with empty IRR\n"); + + if (irr == 0) /* Probably shared interrupt line */ return IRQ_NONE; - } + imr = cc_ioread(drvdata, CC_REG(HOST_IMR)); /* clear interrupt - must be before processing events */ @@ -204,14 +219,13 @@ static int init_cc_resources(struct platform_device *plat_dev) hw_rev = (struct cc_hw_data *)dev_id->data; new_drvdata->hw_rev_name = hw_rev->name; new_drvdata->hw_rev = hw_rev->rev; + new_drvdata->std_bodies = hw_rev->std_bodies; if (hw_rev->rev >= CC_HW_REV_712) { - new_drvdata->hash_len_sz = HASH_LEN_SIZE_712; new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP); new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712); new_drvdata->ver_offset = CC_REG(HOST_VERSION_712); } else { - new_drvdata->hash_len_sz = HASH_LEN_SIZE_630; new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8); new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630); new_drvdata->ver_offset = CC_REG(HOST_VERSION_630); @@ -297,15 +311,17 @@ static int init_cc_resources(struct platform_device *plat_dev) return rc; } - /* Verify correct mapping */ - signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset); - if (signature_val != hw_rev->sig) { - dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n", - signature_val, hw_rev->sig); - rc = -EINVAL; - goto post_clk_err; + if (hw_rev->rev <= CC_HW_REV_712) { + /* Verify correct mapping */ + signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset); + if (signature_val != hw_rev->sig) { + dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n", + signature_val, hw_rev->sig); + rc = -EINVAL; + goto post_clk_err; + } + dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val); } - dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val); /* Display HW versions */ dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n", @@ -364,7 +380,7 @@ static int init_cc_resources(struct platform_device *plat_dev) rc = cc_ivgen_init(new_drvdata); if (rc) { dev_err(dev, "cc_ivgen_init failed\n"); - goto post_power_mgr_err; + goto post_buf_mgr_err; } /* Allocate crypto algs */ @@ -387,6 +403,9 @@ static int init_cc_resources(struct platform_device *plat_dev) goto post_hash_err; } + /* All set, we can allow autosuspend */ + cc_pm_go(new_drvdata); + /* If we got here and FIPS mode is enabled * it means all FIPS test passed, so let TEE * know we're good. @@ -401,8 +420,6 @@ post_cipher_err: cc_cipher_free(new_drvdata); post_ivgen_err: cc_ivgen_fini(new_drvdata); -post_power_mgr_err: - cc_pm_fini(new_drvdata); post_buf_mgr_err: cc_buffer_mgr_fini(new_drvdata); post_req_mgr_err: @@ -461,6 +478,14 @@ int cc_clk_on(struct cc_drvdata *drvdata) return 0; } +unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata) +{ + if (drvdata->hw_rev >= CC_HW_REV_712) + return HASH_LEN_SIZE_712; + else + return HASH_LEN_SIZE_630; +} + void cc_clk_off(struct cc_drvdata *drvdata) { struct clk *clk = drvdata->clk; @@ -514,13 +539,8 @@ static struct platform_driver ccree_driver = { static int __init ccree_init(void) { - int ret; - cc_hash_global_init(); - - ret = cc_debugfs_global_init(); - if (ret) - return ret; + cc_debugfs_global_init(); return platform_driver_register(&ccree_driver); } diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h index d608a4faf662..33dbf3e6d15d 100644 --- a/drivers/crypto/ccree/cc_driver.h +++ b/drivers/crypto/ccree/cc_driver.h @@ -36,12 +36,19 @@ extern bool cc_dump_desc; extern bool cc_dump_bytes; -#define DRV_MODULE_VERSION "4.0" +#define DRV_MODULE_VERSION "5.0" enum cc_hw_rev { CC_HW_REV_630 = 630, CC_HW_REV_710 = 710, - CC_HW_REV_712 = 712 + CC_HW_REV_712 = 712, + CC_HW_REV_713 = 713 +}; + +enum cc_std_body { + CC_STD_NIST = 0x1, + CC_STD_OSCCA = 0x2, + CC_STD_ALL = 0x3 }; #define CC_COHERENT_CACHE_PARAMS 0xEEE @@ -104,13 +111,11 @@ struct cc_crypto_req { * @cc_base: virt address of the CC registers * @irq: device IRQ number * @irq_mask: Interrupt mask shadow (1 for masked interrupts) - * @fw_ver: SeP loaded firmware version */ struct cc_drvdata { void __iomem *cc_base; int irq; u32 irq_mask; - u32 fw_ver; struct completion hw_queue_avail; /* wait for HW queue availability */ struct platform_device *plat_dev; cc_sram_addr_t mlli_sram_addr; @@ -127,10 +132,10 @@ struct cc_drvdata { bool coherent; char *hw_rev_name; enum cc_hw_rev hw_rev; - u32 hash_len_sz; u32 axim_mon_offset; u32 sig_offset; u32 ver_offset; + int std_bodies; }; struct cc_crypto_alg { @@ -156,6 +161,7 @@ struct cc_alg_template { int flow_mode; /* Note: currently, refers to the cipher mode only. */ int auth_mode; u32 min_hw_rev; + enum cc_std_body std_body; unsigned int data_unit; struct cc_drvdata *drvdata; }; @@ -182,6 +188,7 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe); void fini_cc_regs(struct cc_drvdata *drvdata); int cc_clk_on(struct cc_drvdata *drvdata); void cc_clk_off(struct cc_drvdata *drvdata); +unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata); static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val) { diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c index b9313306c36f..2c4ddc8fb76b 100644 --- a/drivers/crypto/ccree/cc_hash.c +++ b/drivers/crypto/ccree/cc_hash.c @@ -6,6 +6,7 @@ #include <crypto/algapi.h> #include <crypto/hash.h> #include <crypto/md5.h> +#include <crypto/sm3.h> #include <crypto/internal/hash.h> #include "cc_driver.h" @@ -16,6 +17,7 @@ #define CC_MAX_HASH_SEQ_LEN 12 #define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE +#define CC_SM3_HASH_LEN_SIZE 8 struct cc_hash_handle { cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/ @@ -43,6 +45,9 @@ static u64 sha384_init[] = { static u64 sha512_init[] = { SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4, SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 }; +static const u32 sm3_init[] = { + SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE, + SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA }; static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[], unsigned int *seq_size); @@ -82,6 +87,7 @@ struct cc_hash_ctx { int hash_mode; int hw_mode; int inter_digestsize; + unsigned int hash_len; struct completion setkey_comp; bool is_hmac; }; @@ -138,10 +144,10 @@ static void cc_init_req(struct device *dev, struct ahash_req_ctx *state, ctx->hash_mode == DRV_HASH_SHA384) memcpy(state->digest_bytes_len, digest_len_sha512_init, - ctx->drvdata->hash_len_sz); + ctx->hash_len); else memcpy(state->digest_bytes_len, digest_len_init, - ctx->drvdata->hash_len_sz); + ctx->hash_len); } if (ctx->hash_mode != DRV_HASH_NULL) { @@ -321,7 +327,7 @@ static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req, /* Get final MAC result */ hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); /* TODO */ set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); @@ -367,7 +373,7 @@ static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req, set_cipher_mode(&desc[idx], ctx->hw_mode); set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, ctx->hash_mode), - ctx->drvdata->hash_len_sz); + ctx->hash_len); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); @@ -440,7 +446,7 @@ static int cc_hash_digest(struct ahash_request *req) * digest */ hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); if (is_hmac) { set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT); @@ -454,14 +460,14 @@ static int cc_hash_digest(struct ahash_request *req) /* Load the hash current length */ hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); if (is_hmac) { set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, - ctx->drvdata->hash_len_sz, NS_BIT); + ctx->hash_len, NS_BIT); } else { - set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz); + set_din_const(&desc[idx], 0, ctx->hash_len); if (nbytes) set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); else @@ -478,7 +484,7 @@ static int cc_hash_digest(struct ahash_request *req) hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, - ctx->drvdata->hash_len_sz, NS_BIT, 0); + ctx->hash_len, NS_BIT, 0); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE1); set_cipher_do(&desc[idx], DO_PAD); @@ -504,7 +510,7 @@ static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx, { /* Restore hash digest */ hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT); set_flow_mode(&desc[idx], S_DIN_to_HASH); @@ -513,10 +519,10 @@ static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx, /* Restore hash current length */ hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED); set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, - ctx->drvdata->hash_len_sz, NS_BIT); + ctx->hash_len, NS_BIT); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; @@ -576,7 +582,7 @@ static int cc_hash_update(struct ahash_request *req) /* store the hash digest result in context */ hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 0); set_flow_mode(&desc[idx], S_HASH_to_DOUT); @@ -585,9 +591,9 @@ static int cc_hash_update(struct ahash_request *req) /* store current hash length in context */ hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr, - ctx->drvdata->hash_len_sz, NS_BIT, 1); + ctx->hash_len, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE1); @@ -649,9 +655,9 @@ static int cc_do_finup(struct ahash_request *req, bool update) /* Pad the hash */ hw_desc_init(&desc[idx]); set_cipher_do(&desc[idx], DO_PAD); - set_cipher_mode(&desc[idx], ctx->hw_mode); + set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr, - ctx->drvdata->hash_len_sz, NS_BIT, 0); + ctx->hash_len, NS_BIT, 0); set_setup_mode(&desc[idx], SETUP_WRITE_STATE1); set_flow_mode(&desc[idx], S_HASH_to_DOUT); idx++; @@ -749,7 +755,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, /* Load the hash current length*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); - set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz); + set_din_const(&desc[idx], 0, ctx->hash_len); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); @@ -831,7 +837,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, /* Load the hash current length*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); - set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz); + set_din_const(&desc[idx], 0, ctx->hash_len); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; @@ -1069,6 +1075,16 @@ fail: return -ENOMEM; } +static int cc_get_hash_len(struct crypto_tfm *tfm) +{ + struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->hash_mode == DRV_HASH_SM3) + return CC_SM3_HASH_LEN_SIZE; + else + return cc_get_default_hash_len(ctx->drvdata); +} + static int cc_cra_init(struct crypto_tfm *tfm) { struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm); @@ -1086,7 +1102,7 @@ static int cc_cra_init(struct crypto_tfm *tfm) ctx->hw_mode = cc_alg->hw_mode; ctx->inter_digestsize = cc_alg->inter_digestsize; ctx->drvdata = cc_alg->drvdata; - + ctx->hash_len = cc_get_hash_len(tfm); return cc_alloc_ctx(ctx); } @@ -1465,8 +1481,8 @@ static int cc_hash_export(struct ahash_request *req, void *out) memcpy(out, state->digest_buff, ctx->inter_digestsize); out += ctx->inter_digestsize; - memcpy(out, state->digest_bytes_len, ctx->drvdata->hash_len_sz); - out += ctx->drvdata->hash_len_sz; + memcpy(out, state->digest_bytes_len, ctx->hash_len); + out += ctx->hash_len; memcpy(out, &curr_buff_cnt, sizeof(u32)); out += sizeof(u32); @@ -1494,8 +1510,8 @@ static int cc_hash_import(struct ahash_request *req, const void *in) memcpy(state->digest_buff, in, ctx->inter_digestsize); in += ctx->inter_digestsize; - memcpy(state->digest_bytes_len, in, ctx->drvdata->hash_len_sz); - in += ctx->drvdata->hash_len_sz; + memcpy(state->digest_bytes_len, in, ctx->hash_len); + in += ctx->hash_len; /* Sanity check the data as much as possible */ memcpy(&tmp, in, sizeof(u32)); @@ -1515,6 +1531,7 @@ struct cc_hash_template { char mac_name[CRYPTO_MAX_ALG_NAME]; char mac_driver_name[CRYPTO_MAX_ALG_NAME]; unsigned int blocksize; + bool is_mac; bool synchronize; struct ahash_alg template_ahash; int hash_mode; @@ -1522,6 +1539,7 @@ struct cc_hash_template { int inter_digestsize; struct cc_drvdata *drvdata; u32 min_hw_rev; + enum cc_std_body std_body; }; #define CC_STATE_SIZE(_x) \ @@ -1536,6 +1554,7 @@ static struct cc_hash_template driver_hash[] = { .mac_name = "hmac(sha1)", .mac_driver_name = "hmac-sha1-ccree", .blocksize = SHA1_BLOCK_SIZE, + .is_mac = true, .synchronize = false, .template_ahash = { .init = cc_hash_init, @@ -1555,6 +1574,7 @@ static struct cc_hash_template driver_hash[] = { .hw_mode = DRV_HASH_HW_SHA1, .inter_digestsize = SHA1_DIGEST_SIZE, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "sha256", @@ -1562,6 +1582,7 @@ static struct cc_hash_template driver_hash[] = { .mac_name = "hmac(sha256)", .mac_driver_name = "hmac-sha256-ccree", .blocksize = SHA256_BLOCK_SIZE, + .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, @@ -1580,6 +1601,7 @@ static struct cc_hash_template driver_hash[] = { .hw_mode = DRV_HASH_HW_SHA256, .inter_digestsize = SHA256_DIGEST_SIZE, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "sha224", @@ -1587,6 +1609,7 @@ static struct cc_hash_template driver_hash[] = { .mac_name = "hmac(sha224)", .mac_driver_name = "hmac-sha224-ccree", .blocksize = SHA224_BLOCK_SIZE, + .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, @@ -1605,6 +1628,7 @@ static struct cc_hash_template driver_hash[] = { .hw_mode = DRV_HASH_HW_SHA256, .inter_digestsize = SHA256_DIGEST_SIZE, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .name = "sha384", @@ -1612,6 +1636,7 @@ static struct cc_hash_template driver_hash[] = { .mac_name = "hmac(sha384)", .mac_driver_name = "hmac-sha384-ccree", .blocksize = SHA384_BLOCK_SIZE, + .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, @@ -1630,6 +1655,7 @@ static struct cc_hash_template driver_hash[] = { .hw_mode = DRV_HASH_HW_SHA512, .inter_digestsize = SHA512_DIGEST_SIZE, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "sha512", @@ -1637,6 +1663,7 @@ static struct cc_hash_template driver_hash[] = { .mac_name = "hmac(sha512)", .mac_driver_name = "hmac-sha512-ccree", .blocksize = SHA512_BLOCK_SIZE, + .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, @@ -1655,6 +1682,7 @@ static struct cc_hash_template driver_hash[] = { .hw_mode = DRV_HASH_HW_SHA512, .inter_digestsize = SHA512_DIGEST_SIZE, .min_hw_rev = CC_HW_REV_712, + .std_body = CC_STD_NIST, }, { .name = "md5", @@ -1662,6 +1690,7 @@ static struct cc_hash_template driver_hash[] = { .mac_name = "hmac(md5)", .mac_driver_name = "hmac-md5-ccree", .blocksize = MD5_HMAC_BLOCK_SIZE, + .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, @@ -1680,11 +1709,38 @@ static struct cc_hash_template driver_hash[] = { .hw_mode = DRV_HASH_HW_MD5, .inter_digestsize = MD5_DIGEST_SIZE, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, + }, + { + .name = "sm3", + .driver_name = "sm3-ccree", + .blocksize = SM3_BLOCK_SIZE, + .is_mac = false, + .template_ahash = { + .init = cc_hash_init, + .update = cc_hash_update, + .final = cc_hash_final, + .finup = cc_hash_finup, + .digest = cc_hash_digest, + .export = cc_hash_export, + .import = cc_hash_import, + .setkey = cc_hash_setkey, + .halg = { + .digestsize = SM3_DIGEST_SIZE, + .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE), + }, + }, + .hash_mode = DRV_HASH_SM3, + .hw_mode = DRV_HASH_HW_SM3, + .inter_digestsize = SM3_DIGEST_SIZE, + .min_hw_rev = CC_HW_REV_713, + .std_body = CC_STD_OSCCA, }, { .mac_name = "xcbc(aes)", .mac_driver_name = "xcbc-aes-ccree", .blocksize = AES_BLOCK_SIZE, + .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_mac_update, @@ -1703,11 +1759,13 @@ static struct cc_hash_template driver_hash[] = { .hw_mode = DRV_CIPHER_XCBC_MAC, .inter_digestsize = AES_BLOCK_SIZE, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, { .mac_name = "cmac(aes)", .mac_driver_name = "cmac-aes-ccree", .blocksize = AES_BLOCK_SIZE, + .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_mac_update, @@ -1726,6 +1784,7 @@ static struct cc_hash_template driver_hash[] = { .hw_mode = DRV_CIPHER_CMAC, .inter_digestsize = AES_BLOCK_SIZE, .min_hw_rev = CC_HW_REV_630, + .std_body = CC_STD_NIST, }, }; @@ -1780,6 +1839,7 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata) unsigned int larval_seq_len = 0; struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)]; bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712); + bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713); int rc = 0; /* Copy-to-sram digest-len */ @@ -1845,6 +1905,17 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata) sram_buff_ofs += sizeof(sha256_init); larval_seq_len = 0; + if (sm3_supported) { + cc_set_sram_desc(sm3_init, sram_buff_ofs, + ARRAY_SIZE(sm3_init), larval_seq, + &larval_seq_len); + rc = send_request_init(drvdata, larval_seq, larval_seq_len); + if (rc) + goto init_digest_const_err; + sram_buff_ofs += sizeof(sm3_init); + larval_seq_len = 0; + } + if (large_sha_supported) { cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs, (ARRAY_SIZE(sha384_init) * 2), larval_seq, @@ -1911,6 +1982,9 @@ int cc_hash_alloc(struct cc_drvdata *drvdata) sizeof(sha224_init) + sizeof(sha256_init); + if (drvdata->hw_rev >= CC_HW_REV_713) + sram_size_to_alloc += sizeof(sm3_init); + if (drvdata->hw_rev >= CC_HW_REV_712) sram_size_to_alloc += sizeof(digest_len_sha512_init) + sizeof(sha384_init) + sizeof(sha512_init); @@ -1937,30 +2011,33 @@ int cc_hash_alloc(struct cc_drvdata *drvdata) struct cc_hash_alg *t_alg; int hw_mode = driver_hash[alg].hw_mode; - /* We either support both HASH and MAC or none */ - if (driver_hash[alg].min_hw_rev > drvdata->hw_rev) + /* Check that the HW revision and variants are suitable */ + if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) || + !(drvdata->std_bodies & driver_hash[alg].std_body)) continue; - /* register hmac version */ - t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true); - if (IS_ERR(t_alg)) { - rc = PTR_ERR(t_alg); - dev_err(dev, "%s alg allocation failed\n", - driver_hash[alg].driver_name); - goto fail; - } - t_alg->drvdata = drvdata; - - rc = crypto_register_ahash(&t_alg->ahash_alg); - if (rc) { - dev_err(dev, "%s alg registration failed\n", - driver_hash[alg].driver_name); - kfree(t_alg); - goto fail; - } else { - list_add_tail(&t_alg->entry, &hash_handle->hash_list); + if (driver_hash[alg].is_mac) { + /* register hmac version */ + t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true); + if (IS_ERR(t_alg)) { + rc = PTR_ERR(t_alg); + dev_err(dev, "%s alg allocation failed\n", + driver_hash[alg].driver_name); + goto fail; + } + t_alg->drvdata = drvdata; + + rc = crypto_register_ahash(&t_alg->ahash_alg); + if (rc) { + dev_err(dev, "%s alg registration failed\n", + driver_hash[alg].driver_name); + kfree(t_alg); + goto fail; + } else { + list_add_tail(&t_alg->entry, + &hash_handle->hash_list); + } } - if (hw_mode == DRV_CIPHER_XCBC_MAC || hw_mode == DRV_CIPHER_CMAC) continue; @@ -2027,7 +2104,7 @@ static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[], XCBC_MAC_K1_OFFSET), CC_AES_128_BIT_KEY_SIZE, NS_BIT); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); - set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); + set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[idx], S_DIN_to_AES); @@ -2162,6 +2239,8 @@ static const void *cc_larval_digest(struct device *dev, u32 mode) return sha384_init; case DRV_HASH_SHA512: return sha512_init; + case DRV_HASH_SM3: + return sm3_init; default: dev_err(dev, "Invalid hash mode (%d)\n", mode); return md5_init; @@ -2182,6 +2261,8 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode) struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata; struct cc_hash_handle *hash_handle = _drvdata->hash_handle; struct device *dev = drvdata_to_dev(_drvdata); + bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713); + cc_sram_addr_t addr; switch (mode) { case DRV_HASH_NULL: @@ -2200,19 +2281,31 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode) sizeof(md5_init) + sizeof(sha1_init) + sizeof(sha224_init)); - case DRV_HASH_SHA384: + case DRV_HASH_SM3: return (hash_handle->larval_digest_sram_addr + sizeof(md5_init) + sizeof(sha1_init) + sizeof(sha224_init) + sizeof(sha256_init)); + case DRV_HASH_SHA384: + addr = (hash_handle->larval_digest_sram_addr + + sizeof(md5_init) + + sizeof(sha1_init) + + sizeof(sha224_init) + + sizeof(sha256_init)); + if (sm3_supported) + addr += sizeof(sm3_init); + return addr; case DRV_HASH_SHA512: - return (hash_handle->larval_digest_sram_addr + + addr = (hash_handle->larval_digest_sram_addr + sizeof(md5_init) + sizeof(sha1_init) + sizeof(sha224_init) + sizeof(sha256_init) + sizeof(sha384_init)); + if (sm3_supported) + addr += sizeof(sm3_init); + return addr; default: dev_err(dev, "Invalid hash mode (%d)\n", mode); } diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h index 45985b955d2c..7a9b90db7db7 100644 --- a/drivers/crypto/ccree/cc_hw_queue_defs.h +++ b/drivers/crypto/ccree/cc_hw_queue_defs.h @@ -42,6 +42,7 @@ #define WORD3_QUEUE_LAST_IND CC_GENMASK(3, QUEUE_LAST_IND) #define WORD4_ACK_NEEDED CC_GENMASK(4, ACK_NEEDED) #define WORD4_AES_SEL_N_HASH CC_GENMASK(4, AES_SEL_N_HASH) +#define WORD4_AES_XOR_CRYPTO_KEY CC_GENMASK(4, AES_XOR_CRYPTO_KEY) #define WORD4_BYTES_SWAP CC_GENMASK(4, BYTES_SWAP) #define WORD4_CIPHER_CONF0 CC_GENMASK(4, CIPHER_CONF0) #define WORD4_CIPHER_CONF1 CC_GENMASK(4, CIPHER_CONF1) @@ -107,6 +108,7 @@ enum cc_flow_mode { AES_to_AES_to_HASH_and_DOUT = 13, AES_to_AES_to_HASH = 14, AES_to_HASH_and_AES = 15, + DIN_SM4_DOUT = 16, DIN_AES_AESMAC = 17, HASH_to_DOUT = 18, /* setup flows */ @@ -114,9 +116,11 @@ enum cc_flow_mode { S_DIN_to_AES2 = 33, S_DIN_to_DES = 34, S_DIN_to_RC4 = 35, + S_DIN_to_SM4 = 36, S_DIN_to_HASH = 37, S_AES_to_DOUT = 38, S_AES2_to_DOUT = 39, + S_SM4_to_DOUT = 40, S_RC4_to_DOUT = 41, S_DES_to_DOUT = 42, S_HASH_to_DOUT = 43, @@ -394,6 +398,16 @@ static inline void set_aes_not_hash_mode(struct cc_hw_desc *pdesc) } /* + * Set aes xor crypto key, this in some secenrios select SM3 engine + * + * @pdesc: pointer HW descriptor struct + */ +static inline void set_aes_xor_crypto_key(struct cc_hw_desc *pdesc) +{ + pdesc->word[4] |= FIELD_PREP(WORD4_AES_XOR_CRYPTO_KEY, 1); +} + +/* * Set the DOUT field of a HW descriptors to SRAM mode * Note: No need to check SRAM alignment since host requests do not use SRAM and * adaptor will enforce alignment check. @@ -455,6 +469,22 @@ static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode) } /* + * Set the cipher mode for hash algorithms. + * + * @pdesc: pointer HW descriptor struct + * @cipher_mode: Any one of the modes defined in [CC7x-DESC] + * @hash_mode: specifies which hash is being handled + */ +static inline void set_hash_cipher_mode(struct cc_hw_desc *pdesc, + enum drv_cipher_mode cipher_mode, + enum drv_hash_mode hash_mode) +{ + set_cipher_mode(pdesc, cipher_mode); + if (hash_mode == DRV_HASH_SM3) + set_aes_xor_crypto_key(pdesc); +} + +/* * Set the cipher configuration fields. * * @pdesc: pointer HW descriptor struct diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index d990f472e89f..6ff7e75ad90e 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c @@ -100,20 +100,19 @@ int cc_pm_put_suspend(struct device *dev) int cc_pm_init(struct cc_drvdata *drvdata) { - int rc = 0; struct device *dev = drvdata_to_dev(drvdata); /* must be before the enabling to avoid resdundent suspending */ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev); /* activate the PM module */ - rc = pm_runtime_set_active(dev); - if (rc) - return rc; - /* enable the PM module*/ - pm_runtime_enable(dev); + return pm_runtime_set_active(dev); +} - return rc; +/* enable the PM module*/ +void cc_pm_go(struct cc_drvdata *drvdata) +{ + pm_runtime_enable(drvdata_to_dev(drvdata)); } void cc_pm_fini(struct cc_drvdata *drvdata) diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h index 020a5403c58b..907a6db4d6c0 100644 --- a/drivers/crypto/ccree/cc_pm.h +++ b/drivers/crypto/ccree/cc_pm.h @@ -16,6 +16,7 @@ extern const struct dev_pm_ops ccree_pm; int cc_pm_init(struct cc_drvdata *drvdata); +void cc_pm_go(struct cc_drvdata *drvdata); void cc_pm_fini(struct cc_drvdata *drvdata); int cc_pm_suspend(struct device *dev); int cc_pm_resume(struct device *dev); @@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata) return 0; } +static inline void cc_pm_go(struct cc_drvdata *drvdata) {} + static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} static inline int cc_pm_suspend(struct device *dev) diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile index 639e5718dff4..b7bd980a27d8 100644 --- a/drivers/crypto/chelsio/Makefile +++ b/drivers/crypto/chelsio/Makefile @@ -1,4 +1,4 @@ -ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4 +ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4 obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o chcr-objs := chcr_core.o chcr_algo.o diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index db203f8be429..8d8cf80b9294 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -123,7 +123,7 @@ static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx) static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) { - return ctx->dev->u_ctx; + return container_of(ctx->dev, struct uld_ctx, dev); } static inline int is_ofld_imm(const struct sk_buff *skb) @@ -198,18 +198,43 @@ void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) *err = 0; } -static inline void chcr_handle_aead_resp(struct aead_request *req, +static int chcr_inc_wrcount(struct chcr_dev *dev) +{ + int err = 0; + + spin_lock_bh(&dev->lock_chcr_dev); + if (dev->state == CHCR_DETACH) + err = 1; + else + atomic_inc(&dev->inflight); + + spin_unlock_bh(&dev->lock_chcr_dev); + + return err; +} + +static inline void chcr_dec_wrcount(struct chcr_dev *dev) +{ + atomic_dec(&dev->inflight); +} + +static inline int chcr_handle_aead_resp(struct aead_request *req, unsigned char *input, int err) { struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct chcr_dev *dev = a_ctx(tfm)->dev; chcr_aead_common_exit(req); if (reqctx->verify == VERIFY_SW) { chcr_verify_tag(req, input, &err); reqctx->verify = VERIFY_HW; } + chcr_dec_wrcount(dev); req->base.complete(&req->base, err); + + return err; } static void get_aes_decrypt_key(unsigned char *dec_key, @@ -391,7 +416,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid, static inline void dsgl_walk_add_page(struct dsgl_walk *walk, size_t size, - dma_addr_t *addr) + dma_addr_t addr) { int j; @@ -399,7 +424,7 @@ static inline void dsgl_walk_add_page(struct dsgl_walk *walk, return; j = walk->nents; walk->to->len[j % 8] = htons(size); - walk->to->addr[j % 8] = cpu_to_be64(*addr); + walk->to->addr[j % 8] = cpu_to_be64(addr); j++; if ((j % 8) == 0) walk->to++; @@ -473,16 +498,16 @@ static inline void ulptx_walk_end(struct ulptx_walk *walk) static inline void ulptx_walk_add_page(struct ulptx_walk *walk, size_t size, - dma_addr_t *addr) + dma_addr_t addr) { if (!size) return; if (walk->nents == 0) { walk->sgl->len0 = cpu_to_be32(size); - walk->sgl->addr0 = cpu_to_be64(*addr); + walk->sgl->addr0 = cpu_to_be64(addr); } else { - walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr); + walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr); walk->pair->len[walk->pair_idx] = cpu_to_be32(size); walk->pair_idx = !walk->pair_idx; if (!walk->pair_idx) @@ -717,7 +742,7 @@ static inline void create_wreq(struct chcr_context *ctx, htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16))); chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); chcr_req->wreq.rx_chid_to_rx_q_id = - FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, + FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid, !!lcb, ctx->tx_qidx); chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id, @@ -773,7 +798,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) } chcr_req = __skb_put_zero(skb, transhdr_len); chcr_req->sec_cpl.op_ivinsrtofst = - FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1); + FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1); chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes); chcr_req->sec_cpl.aadstart_cipherstop_hi = @@ -1100,6 +1125,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); struct cipher_wr_param wrparam; + struct chcr_dev *dev = c_ctx(tfm)->dev; int bytes; if (err) @@ -1161,6 +1187,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, unmap: chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); complete: + chcr_dec_wrcount(dev); req->base.complete(&req->base, err); return err; } @@ -1187,7 +1214,10 @@ static int process_cipher(struct ablkcipher_request *req, ablkctx->enckey_len, req->nbytes, ivsize); goto error; } - chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); + + err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); + if (err) + goto error; if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) + AES_MIN_KEY_SIZE + sizeof(struct cpl_rx_phys_dsgl) + @@ -1276,15 +1306,21 @@ error: static int chcr_aes_encrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct chcr_dev *dev = c_ctx(tfm)->dev; struct sk_buff *skb = NULL; int err, isfull = 0; struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); + err = chcr_inc_wrcount(dev); + if (err) + return -ENXIO; if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], c_ctx(tfm)->tx_qidx))) { isfull = 1; - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -ENOSPC; + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { + err = -ENOSPC; + goto error; + } } err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], @@ -1295,15 +1331,23 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req) set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); chcr_send_wr(skb); return isfull ? -EBUSY : -EINPROGRESS; +error: + chcr_dec_wrcount(dev); + return err; } static int chcr_aes_decrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); + struct chcr_dev *dev = c_ctx(tfm)->dev; struct sk_buff *skb = NULL; int err, isfull = 0; + err = chcr_inc_wrcount(dev); + if (err) + return -ENXIO; + if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], c_ctx(tfm)->tx_qidx))) { isfull = 1; @@ -1311,8 +1355,8 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req) return -ENOSPC; } - err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], - &skb, CHCR_DECRYPT_OP); + err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], + &skb, CHCR_DECRYPT_OP); if (err || !skb) return err; skb->dev = u_ctx->lldi.ports[0]; @@ -1324,7 +1368,6 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req) static int chcr_device_init(struct chcr_context *ctx) { struct uld_ctx *u_ctx = NULL; - struct adapter *adap; unsigned int id; int txq_perchan, txq_idx, ntxq; int err = 0, rxq_perchan, rxq_idx; @@ -1333,18 +1376,17 @@ static int chcr_device_init(struct chcr_context *ctx) if (!ctx->dev) { u_ctx = assign_chcr_device(); if (!u_ctx) { + err = -ENXIO; pr_err("chcr device assignment fails\n"); goto out; } - ctx->dev = u_ctx->dev; - adap = padap(ctx->dev); + ctx->dev = &u_ctx->dev; ntxq = u_ctx->lldi.ntxq; rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; txq_perchan = ntxq / u_ctx->lldi.nchan; spin_lock(&ctx->dev->lock_chcr_dev); ctx->tx_chan_id = ctx->dev->tx_channel_id; ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; - ctx->dev->rx_channel_id = 0; spin_unlock(&ctx->dev->lock_chcr_dev); rxq_idx = ctx->tx_chan_id * rxq_perchan; rxq_idx += id % rxq_perchan; @@ -1498,7 +1540,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, chcr_req = __skb_put_zero(skb, transhdr_len); chcr_req->sec_cpl.op_ivinsrtofst = - FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0); + FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0); chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len); chcr_req->sec_cpl.aadstart_cipherstop_hi = @@ -1562,6 +1604,7 @@ static int chcr_ahash_update(struct ahash_request *req) struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); struct uld_ctx *u_ctx = NULL; + struct chcr_dev *dev = h_ctx(rtfm)->dev; struct sk_buff *skb; u8 remainder = 0, bs; unsigned int nbytes = req->nbytes; @@ -1570,12 +1613,6 @@ static int chcr_ahash_update(struct ahash_request *req) bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); u_ctx = ULD_CTX(h_ctx(rtfm)); - if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], - h_ctx(rtfm)->tx_qidx))) { - isfull = 1; - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -ENOSPC; - } if (nbytes + req_ctx->reqlen >= bs) { remainder = (nbytes + req_ctx->reqlen) % bs; @@ -1586,10 +1623,27 @@ static int chcr_ahash_update(struct ahash_request *req) req_ctx->reqlen += nbytes; return 0; } + error = chcr_inc_wrcount(dev); + if (error) + return -ENXIO; + /* Detach state for CHCR means lldi or padap is freed. Increasing + * inflight count for dev guarantees that lldi and padap is valid + */ + if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], + h_ctx(rtfm)->tx_qidx))) { + isfull = 1; + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { + error = -ENOSPC; + goto err; + } + } + chcr_init_hctx_per_wr(req_ctx); error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); - if (error) - return -ENOMEM; + if (error) { + error = -ENOMEM; + goto err; + } get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); params.kctx_len = roundup(params.alg_prm.result_size, 16); params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, @@ -1629,6 +1683,8 @@ static int chcr_ahash_update(struct ahash_request *req) return isfull ? -EBUSY : -EINPROGRESS; unmap: chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); +err: + chcr_dec_wrcount(dev); return error; } @@ -1646,10 +1702,16 @@ static int chcr_ahash_final(struct ahash_request *req) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); + struct chcr_dev *dev = h_ctx(rtfm)->dev; struct hash_wr_param params; struct sk_buff *skb; struct uld_ctx *u_ctx = NULL; u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); + int error = -EINVAL; + + error = chcr_inc_wrcount(dev); + if (error) + return -ENXIO; chcr_init_hctx_per_wr(req_ctx); u_ctx = ULD_CTX(h_ctx(rtfm)); @@ -1686,19 +1748,25 @@ static int chcr_ahash_final(struct ahash_request *req) } params.hash_size = crypto_ahash_digestsize(rtfm); skb = create_hash_wr(req, ¶ms); - if (IS_ERR(skb)) - return PTR_ERR(skb); + if (IS_ERR(skb)) { + error = PTR_ERR(skb); + goto err; + } req_ctx->reqlen = 0; skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); chcr_send_wr(skb); return -EINPROGRESS; +err: + chcr_dec_wrcount(dev); + return error; } static int chcr_ahash_finup(struct ahash_request *req) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); + struct chcr_dev *dev = h_ctx(rtfm)->dev; struct uld_ctx *u_ctx = NULL; struct sk_buff *skb; struct hash_wr_param params; @@ -1707,17 +1775,24 @@ static int chcr_ahash_finup(struct ahash_request *req) bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); u_ctx = ULD_CTX(h_ctx(rtfm)); + error = chcr_inc_wrcount(dev); + if (error) + return -ENXIO; if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], h_ctx(rtfm)->tx_qidx))) { isfull = 1; - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -ENOSPC; + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { + error = -ENOSPC; + goto err; + } } chcr_init_hctx_per_wr(req_ctx); error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); - if (error) - return -ENOMEM; + if (error) { + error = -ENOMEM; + goto err; + } get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); params.kctx_len = roundup(params.alg_prm.result_size, 16); @@ -1774,6 +1849,8 @@ static int chcr_ahash_finup(struct ahash_request *req) return isfull ? -EBUSY : -EINPROGRESS; unmap: chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); +err: + chcr_dec_wrcount(dev); return error; } @@ -1781,6 +1858,7 @@ static int chcr_ahash_digest(struct ahash_request *req) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); + struct chcr_dev *dev = h_ctx(rtfm)->dev; struct uld_ctx *u_ctx = NULL; struct sk_buff *skb; struct hash_wr_param params; @@ -1789,19 +1867,26 @@ static int chcr_ahash_digest(struct ahash_request *req) rtfm->init(req); bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); + error = chcr_inc_wrcount(dev); + if (error) + return -ENXIO; u_ctx = ULD_CTX(h_ctx(rtfm)); if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], h_ctx(rtfm)->tx_qidx))) { isfull = 1; - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -ENOSPC; + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { + error = -ENOSPC; + goto err; + } } chcr_init_hctx_per_wr(req_ctx); error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); - if (error) - return -ENOMEM; + if (error) { + error = -ENOMEM; + goto err; + } get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); params.kctx_len = roundup(params.alg_prm.result_size, 16); @@ -1854,6 +1939,8 @@ static int chcr_ahash_digest(struct ahash_request *req) return isfull ? -EBUSY : -EINPROGRESS; unmap: chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); +err: + chcr_dec_wrcount(dev); return error; } @@ -1925,6 +2012,7 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req, int digestsize, updated_digestsize; struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); + struct chcr_dev *dev = h_ctx(tfm)->dev; if (input == NULL) goto out; @@ -1967,6 +2055,7 @@ unmap: out: + chcr_dec_wrcount(dev); req->base.complete(&req->base, err); } @@ -1983,14 +2072,13 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AEAD: - chcr_handle_aead_resp(aead_request_cast(req), input, err); + err = chcr_handle_aead_resp(aead_request_cast(req), input, err); break; case CRYPTO_ALG_TYPE_ABLKCIPHER: - err = chcr_handle_cipher_resp(ablkcipher_request_cast(req), + chcr_handle_cipher_resp(ablkcipher_request_cast(req), input, err); break; - case CRYPTO_ALG_TYPE_AHASH: chcr_handle_ahash_resp(ahash_request_cast(req), input, err); } @@ -2008,7 +2096,7 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out) memcpy(state->partial_hash, req_ctx->partial_hash, CHCR_HASH_MAX_DIGEST_SIZE); chcr_init_hctx_per_wr(state); - return 0; + return 0; } static int chcr_ahash_import(struct ahash_request *areq, const void *in) @@ -2215,10 +2303,7 @@ static int chcr_aead_common_init(struct aead_request *req) error = -ENOMEM; goto err; } - reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen, - CHCR_SRC_SG_SIZE, 0); - reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen, - CHCR_SRC_SG_SIZE, req->assoclen); + return 0; err: return error; @@ -2249,7 +2334,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) req->base.complete, req->base.data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv); - aead_request_set_ad(subreq, req->assoclen); + aead_request_set_ad(subreq, req->assoclen); return op_type ? crypto_aead_decrypt(subreq) : crypto_aead_encrypt(subreq); } @@ -2268,10 +2353,10 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, struct ulptx_sgl *ulptx; unsigned int transhdr_len; unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm); - unsigned int kctx_len = 0, dnents; - unsigned int assoclen = req->assoclen; + unsigned int kctx_len = 0, dnents, snents; unsigned int authsize = crypto_aead_authsize(tfm); int error = -EINVAL; + u8 *ivptr; int null = 0; gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; @@ -2288,24 +2373,20 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL || subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { null = 1; - assoclen = 0; - reqctx->aad_nents = 0; } - dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); - dnents += sg_nents_xlen(req->dst, req->cryptlen + - (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, - req->assoclen); + dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen + + (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0); dnents += MIN_AUTH_SG; // For IV - + snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen, + CHCR_SRC_SG_SIZE, 0); dst_size = get_space_for_phys_dsgl(dnents); kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4) - sizeof(chcr_req->key_ctx); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); - reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) < + reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) < SGE_MAX_WR_LEN; - temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) - : (sgl_len(reqctx->src_nents + reqctx->aad_nents - + MIN_GCM_SG) * 8); + temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) + : (sgl_len(snents) * 8); transhdr_len += temp; transhdr_len = roundup(transhdr_len, 16); @@ -2315,7 +2396,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, chcr_aead_common_exit(req); return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); } - skb = alloc_skb(SGE_MAX_WR_LEN, flags); + skb = alloc_skb(transhdr_len, flags); if (!skb) { error = -ENOMEM; goto err; @@ -2331,16 +2412,16 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, * to the hardware spec */ chcr_req->sec_cpl.op_ivinsrtofst = - FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2, - assoclen + 1); - chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen); + FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1); + chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen); chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( - assoclen ? 1 : 0, assoclen, - assoclen + IV + 1, + null ? 0 : 1 + IV, + null ? 0 : IV + req->assoclen, + req->assoclen + IV + 1, (temp & 0x1F0) >> 4); chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT( temp & 0xF, - null ? 0 : assoclen + IV + 1, + null ? 0 : req->assoclen + IV + 1, temp, temp); if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL || subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA) @@ -2367,23 +2448,24 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16)); + phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); + ivptr = (u8 *)(phys_cpl + 1) + dst_size; + ulptx = (struct ulptx_sgl *)(ivptr + IV); if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { - memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE); - memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv, + memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE); + memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE); - *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + + *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); } else { - memcpy(reqctx->iv, req->iv, IV); + memcpy(ivptr, req->iv, IV); } - phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); - ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); - chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid); - chcr_add_aead_src_ent(req, ulptx, assoclen); + chcr_add_aead_dst_ent(req, phys_cpl, qid); + chcr_add_aead_src_ent(req, ulptx); atomic_inc(&adap->chcr_stats.cipher_rqst); - temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + - kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); + temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV + + kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0); create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, transhdr_len, temp, 0); reqctx->skb = skb; @@ -2470,8 +2552,7 @@ void chcr_aead_dma_unmap(struct device *dev, } void chcr_add_aead_src_ent(struct aead_request *req, - struct ulptx_sgl *ulptx, - unsigned int assoclen) + struct ulptx_sgl *ulptx) { struct ulptx_walk ulp_walk; struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); @@ -2484,28 +2565,20 @@ void chcr_add_aead_src_ent(struct aead_request *req, buf += reqctx->b0_len; } sg_pcopy_to_buffer(req->src, sg_nents(req->src), - buf, assoclen, 0); - buf += assoclen; - memcpy(buf, reqctx->iv, IV); - buf += IV; - sg_pcopy_to_buffer(req->src, sg_nents(req->src), - buf, req->cryptlen, req->assoclen); + buf, req->cryptlen + req->assoclen, 0); } else { ulptx_walk_init(&ulp_walk, ulptx); if (reqctx->b0_len) ulptx_walk_add_page(&ulp_walk, reqctx->b0_len, - &reqctx->b0_dma); - ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0); - ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma); - ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen, - req->assoclen); + reqctx->b0_dma); + ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen + + req->assoclen, 0); ulptx_walk_end(&ulp_walk); } } void chcr_add_aead_dst_ent(struct aead_request *req, struct cpl_rx_phys_dsgl *phys_cpl, - unsigned int assoclen, unsigned short qid) { struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); @@ -2516,12 +2589,10 @@ void chcr_add_aead_dst_ent(struct aead_request *req, u32 temp; dsgl_walk_init(&dsgl_walk, phys_cpl); - if (reqctx->b0_len) - dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma); - dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0); - dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); - temp = req->cryptlen + (reqctx->op ? -authsize : authsize); - dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); + dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma); + temp = req->assoclen + req->cryptlen + + (reqctx->op ? -authsize : authsize); + dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0); dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id); } @@ -2589,7 +2660,7 @@ void chcr_add_hash_src_ent(struct ahash_request *req, ulptx_walk_init(&ulp_walk, ulptx); if (param->bfr_len) ulptx_walk_add_page(&ulp_walk, param->bfr_len, - &reqctx->hctx_wr.dma_addr); + reqctx->hctx_wr.dma_addr); ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg, param->sg_len, reqctx->hctx_wr.src_ofst); reqctx->hctx_wr.srcsg = ulp_walk.last_sg; @@ -2689,8 +2760,7 @@ static int set_msg_len(u8 *block, unsigned int msglen, int csize) return 0; } -static void generate_b0(struct aead_request *req, - struct chcr_aead_ctx *aeadctx, +static int generate_b0(struct aead_request *req, u8 *ivptr, unsigned short op_type) { unsigned int l, lp, m; @@ -2701,7 +2771,7 @@ static void generate_b0(struct aead_request *req, m = crypto_aead_authsize(aead); - memcpy(b0, reqctx->iv, 16); + memcpy(b0, ivptr, 16); lp = b0[0]; l = lp + 1; @@ -2715,6 +2785,8 @@ static void generate_b0(struct aead_request *req, rc = set_msg_len(b0 + 16 - l, (op_type == CHCR_DECRYPT_OP) ? req->cryptlen - m : req->cryptlen, l); + + return rc; } static inline int crypto_ccm_check_iv(const u8 *iv) @@ -2727,29 +2799,31 @@ static inline int crypto_ccm_check_iv(const u8 *iv) } static int ccm_format_packet(struct aead_request *req, - struct chcr_aead_ctx *aeadctx, + u8 *ivptr, unsigned int sub_type, unsigned short op_type, unsigned int assoclen) { struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); int rc = 0; if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { - reqctx->iv[0] = 3; - memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3); - memcpy(reqctx->iv + 4, req->iv, 8); - memset(reqctx->iv + 12, 0, 4); + ivptr[0] = 3; + memcpy(ivptr + 1, &aeadctx->salt[0], 3); + memcpy(ivptr + 4, req->iv, 8); + memset(ivptr + 12, 0, 4); } else { - memcpy(reqctx->iv, req->iv, 16); + memcpy(ivptr, req->iv, 16); } if (assoclen) *((unsigned short *)(reqctx->scratch_pad + 16)) = htons(assoclen); - generate_b0(req, aeadctx, op_type); + rc = generate_b0(req, ivptr, op_type); /* zero the ctr value */ - memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1); + memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1); return rc; } @@ -2762,7 +2836,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; - unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id; + unsigned int c_id = a_ctx(tfm)->tx_chan_id; unsigned int ccm_xtra; unsigned char tag_offset = 0, auth_offset = 0; unsigned int assoclen; @@ -2775,7 +2849,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, ((assoclen) ? CCM_AAD_FIELD_SIZE : 0); auth_offset = req->cryptlen ? - (assoclen + IV + 1 + ccm_xtra) : 0; + (req->assoclen + IV + 1 + ccm_xtra) : 0; if (op_type == CHCR_DECRYPT_OP) { if (crypto_aead_authsize(tfm) != req->cryptlen) tag_offset = crypto_aead_authsize(tfm); @@ -2785,13 +2859,13 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id, - 2, assoclen + 1 + ccm_xtra); + 2, 1); sec_cpl->pldlen = - htonl(assoclen + IV + req->cryptlen + ccm_xtra); + htonl(req->assoclen + IV + req->cryptlen + ccm_xtra); /* For CCM there wil be b0 always. So AAD start will be 1 always */ sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( - 1, assoclen + ccm_xtra, assoclen - + IV + 1 + ccm_xtra, 0); + 1 + IV, IV + assoclen + ccm_xtra, + req->assoclen + IV + 1 + ccm_xtra, 0); sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, auth_offset, tag_offset, @@ -2838,10 +2912,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, struct cpl_rx_phys_dsgl *phys_cpl; struct ulptx_sgl *ulptx; unsigned int transhdr_len; - unsigned int dst_size = 0, kctx_len, dnents, temp; + unsigned int dst_size = 0, kctx_len, dnents, temp, snents; unsigned int sub_type, assoclen = req->assoclen; unsigned int authsize = crypto_aead_authsize(tfm); int error = -EINVAL; + u8 *ivptr; gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; struct adapter *adap = padap(a_ctx(tfm)->dev); @@ -2857,37 +2932,38 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type); if (error) goto err; - dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); - dnents += sg_nents_xlen(req->dst, req->cryptlen + dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen + (reqctx->op ? -authsize : authsize), - CHCR_DST_SG_SIZE, req->assoclen); + CHCR_DST_SG_SIZE, 0); dnents += MIN_CCM_SG; // For IV and B0 dst_size = get_space_for_phys_dsgl(dnents); + snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen, + CHCR_SRC_SG_SIZE, 0); + snents += MIN_CCM_SG; //For B0 kctx_len = roundup(aeadctx->enckey_len, 16) * 2; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); - reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen + + reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen + reqctx->b0_len) <= SGE_MAX_WR_LEN; - temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen + + temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen + reqctx->b0_len, 16) : - (sgl_len(reqctx->src_nents + reqctx->aad_nents + - MIN_CCM_SG) * 8); + (sgl_len(snents) * 8); transhdr_len += temp; transhdr_len = roundup(transhdr_len, 16); if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE - - reqctx->b0_len, transhdr_len, reqctx->op)) { + reqctx->b0_len, transhdr_len, reqctx->op)) { atomic_inc(&adap->chcr_stats.fallback); chcr_aead_common_exit(req); return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); } - skb = alloc_skb(SGE_MAX_WR_LEN, flags); + skb = alloc_skb(transhdr_len, flags); if (!skb) { error = -ENOMEM; goto err; } - chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len); + chcr_req = __skb_put_zero(skb, transhdr_len); fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op); @@ -2897,16 +2973,17 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, aeadctx->key, aeadctx->enckey_len); phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); - ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); - error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen); + ivptr = (u8 *)(phys_cpl + 1) + dst_size; + ulptx = (struct ulptx_sgl *)(ivptr + IV); + error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen); if (error) goto dstmap_fail; - chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid); - chcr_add_aead_src_ent(req, ulptx, assoclen); + chcr_add_aead_dst_ent(req, phys_cpl, qid); + chcr_add_aead_src_ent(req, ulptx); atomic_inc(&adap->chcr_stats.aead_rqst); - temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + - kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen + + temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV + + kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen + reqctx->b0_len) : 0); create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0, transhdr_len, temp, 0); @@ -2931,10 +3008,11 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, struct chcr_wr *chcr_req; struct cpl_rx_phys_dsgl *phys_cpl; struct ulptx_sgl *ulptx; - unsigned int transhdr_len, dnents = 0; + unsigned int transhdr_len, dnents = 0, snents; unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen; unsigned int authsize = crypto_aead_authsize(tfm); int error = -EINVAL; + u8 *ivptr; gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; struct adapter *adap = padap(a_ctx(tfm)->dev); @@ -2946,19 +3024,19 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, error = chcr_aead_common_init(req); if (error) return ERR_PTR(error); - dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); - dnents += sg_nents_xlen(req->dst, req->cryptlen + + dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen + (reqctx->op ? -authsize : authsize), - CHCR_DST_SG_SIZE, req->assoclen); + CHCR_DST_SG_SIZE, 0); + snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen, + CHCR_SRC_SG_SIZE, 0); dnents += MIN_GCM_SG; // For IV dst_size = get_space_for_phys_dsgl(dnents); kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); - reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <= + reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <= SGE_MAX_WR_LEN; - temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) : - (sgl_len(reqctx->src_nents + - reqctx->aad_nents + MIN_GCM_SG) * 8); + temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) : + (sgl_len(snents) * 8); transhdr_len += temp; transhdr_len = roundup(transhdr_len, 16); if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, @@ -2968,7 +3046,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, chcr_aead_common_exit(req); return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); } - skb = alloc_skb(SGE_MAX_WR_LEN, flags); + skb = alloc_skb(transhdr_len, flags); if (!skb) { error = -ENOMEM; goto err; @@ -2979,15 +3057,15 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, //Offset of tag from end temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize; chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( - a_ctx(tfm)->dev->rx_channel_id, 2, - (assoclen + 1)); + a_ctx(tfm)->tx_chan_id, 2, 1); chcr_req->sec_cpl.pldlen = - htonl(assoclen + IV + req->cryptlen); + htonl(req->assoclen + IV + req->cryptlen); chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( - assoclen ? 1 : 0, assoclen, - assoclen + IV + 1, 0); + assoclen ? 1 + IV : 0, + assoclen ? IV + assoclen : 0, + req->assoclen + IV + 1, 0); chcr_req->sec_cpl.cipherstop_lo_authinsert = - FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1, + FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1, temp, temp); chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op == @@ -3002,25 +3080,26 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE); + phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); + ivptr = (u8 *)(phys_cpl + 1) + dst_size; /* prepare a 16 byte iv */ /* S A L T | IV | 0x00000001 */ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { - memcpy(reqctx->iv, aeadctx->salt, 4); - memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE); + memcpy(ivptr, aeadctx->salt, 4); + memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE); } else { - memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE); + memcpy(ivptr, req->iv, GCM_AES_IV_SIZE); } - *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01); + *((unsigned int *)(ivptr + 12)) = htonl(0x01); - phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); - ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); + ulptx = (struct ulptx_sgl *)(ivptr + 16); - chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid); - chcr_add_aead_src_ent(req, ulptx, assoclen); + chcr_add_aead_dst_ent(req, phys_cpl, qid); + chcr_add_aead_src_ent(req, ulptx); atomic_inc(&adap->chcr_stats.aead_rqst); - temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + - kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); + temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV + + kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0); create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, transhdr_len, temp, reqctx->verify); reqctx->skb = skb; @@ -3118,12 +3197,12 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) aeadctx->mayverify = VERIFY_HW; break; case ICV_12: - aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; - aeadctx->mayverify = VERIFY_HW; + aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; + aeadctx->mayverify = VERIFY_HW; break; case ICV_14: - aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; - aeadctx->mayverify = VERIFY_HW; + aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; + aeadctx->mayverify = VERIFY_HW; break; case ICV_16: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; @@ -3565,27 +3644,42 @@ static int chcr_aead_op(struct aead_request *req, create_wr_t create_wr_fn) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct uld_ctx *u_ctx; struct sk_buff *skb; int isfull = 0; + struct chcr_dev *cdev; - if (!a_ctx(tfm)->dev) { + cdev = a_ctx(tfm)->dev; + if (!cdev) { pr_err("chcr : %s : No crypto device.\n", __func__); return -ENXIO; } + + if (chcr_inc_wrcount(cdev)) { + /* Detach state for CHCR means lldi or padap is freed. + * We cannot increment fallback here. + */ + return chcr_aead_fallback(req, reqctx->op); + } + u_ctx = ULD_CTX(a_ctx(tfm)); if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], a_ctx(tfm)->tx_qidx)) { isfull = 1; - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { + chcr_dec_wrcount(cdev); return -ENOSPC; + } } /* Form a WR from req */ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size); - if (IS_ERR(skb) || !skb) - return PTR_ERR(skb); + if (IS_ERR_OR_NULL(skb)) { + chcr_dec_wrcount(cdev); + return PTR_ERR_OR_ZERO(skb); + } skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx); @@ -3722,7 +3816,6 @@ static struct chcr_alg_template driver_algs[] = { .setkey = chcr_aes_rfc3686_setkey, .encrypt = chcr_aes_encrypt, .decrypt = chcr_aes_decrypt, - .geniv = "seqiv", } } }, @@ -4178,7 +4271,6 @@ static struct chcr_alg_template driver_algs[] = { .setauthsize = chcr_authenc_null_setauthsize, } }, - }; /* diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h index 1871500309e2..ee20dd899e83 100644 --- a/drivers/crypto/chelsio/chcr_algo.h +++ b/drivers/crypto/chelsio/chcr_algo.h @@ -262,7 +262,7 @@ #define MIN_AUTH_SG 1 /* IV */ #define MIN_GCM_SG 1 /* IV */ #define MIN_DIGEST_SG 1 /*Partial Buffer*/ -#define MIN_CCM_SG 2 /*IV+B0*/ +#define MIN_CCM_SG 1 /*IV+B0*/ #define CIP_SPACE_LEFT(len) \ ((SGE_MAX_WR_LEN - CIP_WR_MIN_LEN - (len))) #define HASH_SPACE_LEFT(len) \ diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index 2c472e3c6aeb..239b933d6df6 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c @@ -26,10 +26,7 @@ #include "chcr_core.h" #include "cxgb4_uld.h" -static LIST_HEAD(uld_ctx_list); -static DEFINE_MUTEX(dev_mutex); -static atomic_t dev_count; -static struct uld_ctx *ctx_rr; +static struct chcr_driver_data drv_data; typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input); static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input); @@ -53,6 +50,29 @@ static struct cxgb4_uld_info chcr_uld_info = { #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ }; +static void detach_work_fn(struct work_struct *work) +{ + struct chcr_dev *dev; + + dev = container_of(work, struct chcr_dev, detach_work.work); + + if (atomic_read(&dev->inflight)) { + dev->wqretry--; + if (dev->wqretry) { + pr_debug("Request Inflight Count %d\n", + atomic_read(&dev->inflight)); + + schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); + } else { + WARN(1, "CHCR:%d request Still Pending\n", + atomic_read(&dev->inflight)); + complete(&dev->detach_comp); + } + } else { + complete(&dev->detach_comp); + } +} + struct uld_ctx *assign_chcr_device(void) { struct uld_ctx *u_ctx = NULL; @@ -63,56 +83,74 @@ struct uld_ctx *assign_chcr_device(void) * Although One session must use the same device to * maintain request-response ordering. */ - mutex_lock(&dev_mutex); - if (!list_empty(&uld_ctx_list)) { - u_ctx = ctx_rr; - if (list_is_last(&ctx_rr->entry, &uld_ctx_list)) - ctx_rr = list_first_entry(&uld_ctx_list, - struct uld_ctx, - entry); + mutex_lock(&drv_data.drv_mutex); + if (!list_empty(&drv_data.act_dev)) { + u_ctx = drv_data.last_dev; + if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) + drv_data.last_dev = list_first_entry(&drv_data.act_dev, + struct uld_ctx, entry); else - ctx_rr = list_next_entry(ctx_rr, entry); + drv_data.last_dev = + list_next_entry(drv_data.last_dev, entry); } - mutex_unlock(&dev_mutex); + mutex_unlock(&drv_data.drv_mutex); return u_ctx; } -static int chcr_dev_add(struct uld_ctx *u_ctx) +static void chcr_dev_add(struct uld_ctx *u_ctx) { struct chcr_dev *dev; - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENXIO; + dev = &u_ctx->dev; + dev->state = CHCR_ATTACH; + atomic_set(&dev->inflight, 0); + mutex_lock(&drv_data.drv_mutex); + list_move(&u_ctx->entry, &drv_data.act_dev); + if (!drv_data.last_dev) + drv_data.last_dev = u_ctx; + mutex_unlock(&drv_data.drv_mutex); +} + +static void chcr_dev_init(struct uld_ctx *u_ctx) +{ + struct chcr_dev *dev; + dev = &u_ctx->dev; spin_lock_init(&dev->lock_chcr_dev); - u_ctx->dev = dev; - dev->u_ctx = u_ctx; - atomic_inc(&dev_count); - mutex_lock(&dev_mutex); - list_add_tail(&u_ctx->entry, &uld_ctx_list); - if (!ctx_rr) - ctx_rr = u_ctx; - mutex_unlock(&dev_mutex); - return 0; + INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn); + init_completion(&dev->detach_comp); + dev->state = CHCR_INIT; + dev->wqretry = WQ_RETRY; + atomic_inc(&drv_data.dev_count); + atomic_set(&dev->inflight, 0); + mutex_lock(&drv_data.drv_mutex); + list_add_tail(&u_ctx->entry, &drv_data.inact_dev); + if (!drv_data.last_dev) + drv_data.last_dev = u_ctx; + mutex_unlock(&drv_data.drv_mutex); } -static int chcr_dev_remove(struct uld_ctx *u_ctx) +static int chcr_dev_move(struct uld_ctx *u_ctx) { - if (ctx_rr == u_ctx) { - if (list_is_last(&ctx_rr->entry, &uld_ctx_list)) - ctx_rr = list_first_entry(&uld_ctx_list, - struct uld_ctx, - entry); + struct adapter *adap; + + mutex_lock(&drv_data.drv_mutex); + if (drv_data.last_dev == u_ctx) { + if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) + drv_data.last_dev = list_first_entry(&drv_data.act_dev, + struct uld_ctx, entry); else - ctx_rr = list_next_entry(ctx_rr, entry); + drv_data.last_dev = + list_next_entry(drv_data.last_dev, entry); } - list_del(&u_ctx->entry); - if (list_empty(&uld_ctx_list)) - ctx_rr = NULL; - kfree(u_ctx->dev); - u_ctx->dev = NULL; - atomic_dec(&dev_count); + list_move(&u_ctx->entry, &drv_data.inact_dev); + if (list_empty(&drv_data.act_dev)) + drv_data.last_dev = NULL; + adap = padap(&u_ctx->dev); + memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); + atomic_dec(&drv_data.dev_count); + mutex_unlock(&drv_data.drv_mutex); + return 0; } @@ -131,12 +169,8 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev, ack_err_status = ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4)); - if (ack_err_status) { - if (CHK_MAC_ERR_BIT(ack_err_status) || - CHK_PAD_ERR_BIT(ack_err_status)) - error_status = -EBADMSG; - atomic_inc(&adap->chcr_stats.error); - } + if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status)) + error_status = -EBADMSG; /* call completion callback with failure status */ if (req) { error_status = chcr_handle_resp(req, input, error_status); @@ -144,6 +178,9 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev, pr_err("Incorrect request address from the firmware\n"); return -EFAULT; } + if (error_status) + atomic_inc(&adap->chcr_stats.error); + return 0; } @@ -167,6 +204,7 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld) goto out; } u_ctx->lldi = *lld; + chcr_dev_init(u_ctx); #ifdef CONFIG_CHELSIO_IPSEC_INLINE if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE) chcr_add_xfrmops(lld); @@ -179,7 +217,7 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp, const struct pkt_gl *pgl) { struct uld_ctx *u_ctx = (struct uld_ctx *)handle; - struct chcr_dev *dev = u_ctx->dev; + struct chcr_dev *dev = &u_ctx->dev; const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp; if (rpl->opcode != CPL_FW6_PLD) { @@ -201,6 +239,28 @@ int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev) } #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ +static void chcr_detach_device(struct uld_ctx *u_ctx) +{ + struct chcr_dev *dev = &u_ctx->dev; + + spin_lock_bh(&dev->lock_chcr_dev); + if (dev->state == CHCR_DETACH) { + spin_unlock_bh(&dev->lock_chcr_dev); + pr_debug("Detached Event received for already detach device\n"); + return; + } + dev->state = CHCR_DETACH; + spin_unlock_bh(&dev->lock_chcr_dev); + + if (atomic_read(&dev->inflight) != 0) { + schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); + wait_for_completion(&dev->detach_comp); + } + + // Move u_ctx to inactive_dev list + chcr_dev_move(u_ctx); +} + static int chcr_uld_state_change(void *handle, enum cxgb4_state state) { struct uld_ctx *u_ctx = handle; @@ -208,23 +268,16 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state) switch (state) { case CXGB4_STATE_UP: - if (!u_ctx->dev) { - ret = chcr_dev_add(u_ctx); - if (ret != 0) - return ret; + if (u_ctx->dev.state != CHCR_INIT) { + // ALready Initialised. + return 0; } - if (atomic_read(&dev_count) == 1) - ret = start_crypto(); + chcr_dev_add(u_ctx); + ret = start_crypto(); break; case CXGB4_STATE_DETACH: - if (u_ctx->dev) { - mutex_lock(&dev_mutex); - chcr_dev_remove(u_ctx); - mutex_unlock(&dev_mutex); - } - if (!atomic_read(&dev_count)) - stop_crypto(); + chcr_detach_device(u_ctx); break; case CXGB4_STATE_START_RECOVERY: @@ -237,7 +290,13 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state) static int __init chcr_crypto_init(void) { + INIT_LIST_HEAD(&drv_data.act_dev); + INIT_LIST_HEAD(&drv_data.inact_dev); + atomic_set(&drv_data.dev_count, 0); + mutex_init(&drv_data.drv_mutex); + drv_data.last_dev = NULL; cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info); + return 0; } @@ -245,18 +304,20 @@ static void __exit chcr_crypto_exit(void) { struct uld_ctx *u_ctx, *tmp; - if (atomic_read(&dev_count)) - stop_crypto(); + stop_crypto(); + cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); /* Remove all devices from list */ - mutex_lock(&dev_mutex); - list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) { - if (u_ctx->dev) - chcr_dev_remove(u_ctx); + mutex_lock(&drv_data.drv_mutex); + list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) { + list_del(&u_ctx->entry); kfree(u_ctx); } - mutex_unlock(&dev_mutex); - cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); + list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) { + list_del(&u_ctx->entry); + kfree(u_ctx); + } + mutex_unlock(&drv_data.drv_mutex); } module_init(chcr_crypto_init); diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h index de3a9c085daf..ad874d548aa5 100644 --- a/drivers/crypto/chelsio/chcr_core.h +++ b/drivers/crypto/chelsio/chcr_core.h @@ -47,7 +47,7 @@ #define MAX_PENDING_REQ_TO_HW 20 #define CHCR_TEST_RESPONSE_TIMEOUT 1000 - +#define WQ_DETACH_TM (msecs_to_jiffies(50)) #define PAD_ERROR_BIT 1 #define CHK_PAD_ERR_BIT(x) (((x) >> PAD_ERROR_BIT) & 1) @@ -61,9 +61,6 @@ #define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \ DUMMY_BYTES + \ sizeof(struct ulptx_sgl)) - -#define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev) - struct uld_ctx; struct _key_ctx { @@ -121,6 +118,20 @@ struct _key_ctx { #define KEYCTX_TX_WR_AUTHIN_G(x) \ (((x) >> KEYCTX_TX_WR_AUTHIN_S) & KEYCTX_TX_WR_AUTHIN_M) +#define WQ_RETRY 5 +struct chcr_driver_data { + struct list_head act_dev; + struct list_head inact_dev; + atomic_t dev_count; + struct mutex drv_mutex; + struct uld_ctx *last_dev; +}; + +enum chcr_state { + CHCR_INIT = 0, + CHCR_ATTACH, + CHCR_DETACH, +}; struct chcr_wr { struct fw_crypto_lookaside_wr wreq; struct ulp_txpkt ulptx; @@ -131,15 +142,18 @@ struct chcr_wr { struct chcr_dev { spinlock_t lock_chcr_dev; - struct uld_ctx *u_ctx; + enum chcr_state state; + atomic_t inflight; + int wqretry; + struct delayed_work detach_work; + struct completion detach_comp; unsigned char tx_channel_id; - unsigned char rx_channel_id; }; struct uld_ctx { struct list_head entry; struct cxgb4_lld_info lldi; - struct chcr_dev *dev; + struct chcr_dev dev; }; struct sge_opaque_hdr { @@ -159,8 +173,17 @@ struct chcr_ipsec_wr { struct chcr_ipsec_req req; }; +#define ESN_IV_INSERT_OFFSET 12 +struct chcr_ipsec_aadiv { + __be32 spi; + u8 seq_no[8]; + u8 iv[8]; +}; + struct ipsec_sa_entry { int hmac_ctrl; + u16 esn; + u16 resv; unsigned int enckey_len; unsigned int kctx_len; unsigned int authsize; @@ -181,6 +204,13 @@ static inline unsigned int sgl_len(unsigned int n) return (3 * n) / 2 + (n & 1) + 2; } +static inline void *padap(struct chcr_dev *dev) +{ + struct uld_ctx *u_ctx = container_of(dev, struct uld_ctx, dev); + + return pci_get_drvdata(u_ctx->lldi.pdev); +} + struct uld_ctx *assign_chcr_device(void); int chcr_send_wr(struct sk_buff *skb); int start_crypto(void); diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index d37ef41f9ebe..655606f2e4d0 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -41,7 +41,8 @@ #define CCM_B0_SIZE 16 #define CCM_AAD_FIELD_SIZE 2 -#define T6_MAX_AAD_SIZE 511 +// 511 - 16(For IV) +#define T6_MAX_AAD_SIZE 495 /* Define following if h/w is not dropping the AAD and IV data before @@ -185,9 +186,6 @@ struct chcr_aead_reqctx { dma_addr_t b0_dma; unsigned int b0_len; unsigned int op; - short int aad_nents; - short int src_nents; - short int dst_nents; u16 imm; u16 verify; u8 iv[CHCR_MAX_CRYPTO_IV_LEN + MAX_SCRATCH_PAD_SIZE]; @@ -322,10 +320,8 @@ void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req, unsigned short op_type); void chcr_add_aead_dst_ent(struct aead_request *req, struct cpl_rx_phys_dsgl *phys_cpl, - unsigned int assoclen, unsigned short qid); -void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx, - unsigned int assoclen); +void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx); void chcr_add_cipher_src_ent(struct ablkcipher_request *req, void *ulptx, struct cipher_wr_param *wrparam); diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c index 461b97e2f1fd..2f60049361ef 100644 --- a/drivers/crypto/chelsio/chcr_ipsec.c +++ b/drivers/crypto/chelsio/chcr_ipsec.c @@ -76,12 +76,14 @@ static int chcr_xfrm_add_state(struct xfrm_state *x); static void chcr_xfrm_del_state(struct xfrm_state *x); static void chcr_xfrm_free_state(struct xfrm_state *x); static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x); +static void chcr_advance_esn_state(struct xfrm_state *x); static const struct xfrmdev_ops chcr_xfrmdev_ops = { .xdo_dev_state_add = chcr_xfrm_add_state, .xdo_dev_state_delete = chcr_xfrm_del_state, .xdo_dev_state_free = chcr_xfrm_free_state, .xdo_dev_offload_ok = chcr_ipsec_offload_ok, + .xdo_dev_state_advance_esn = chcr_advance_esn_state, }; /* Add offload xfrms to Chelsio Interface */ @@ -210,10 +212,6 @@ static int chcr_xfrm_add_state(struct xfrm_state *x) pr_debug("CHCR: Cannot offload compressed xfrm states\n"); return -EINVAL; } - if (x->props.flags & XFRM_STATE_ESN) { - pr_debug("CHCR: Cannot offload ESN xfrm states\n"); - return -EINVAL; - } if (x->props.family != AF_INET && x->props.family != AF_INET6) { pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n"); @@ -266,6 +264,8 @@ static int chcr_xfrm_add_state(struct xfrm_state *x) } sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry); + if (x->props.flags & XFRM_STATE_ESN) + sa_entry->esn = 1; chcr_ipsec_setkey(x, sa_entry); x->xso.offload_handle = (unsigned long)sa_entry; try_module_get(THIS_MODULE); @@ -294,36 +294,71 @@ static void chcr_xfrm_free_state(struct xfrm_state *x) static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) { - /* Offload with IP options is not supported yet */ - if (ip_hdr(skb)->ihl > 5) + if (x->props.family == AF_INET) { + /* Offload with IP options is not supported yet */ + if (ip_hdr(skb)->ihl > 5) + return false; + } else { + /* Offload with IPv6 extension headers is not support yet */ + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) + return false; + } + /* Inline single pdu */ + if (skb_shinfo(skb)->gso_size) return false; - return true; } -static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len) +static void chcr_advance_esn_state(struct xfrm_state *x) +{ + /* do nothing */ + if (!x->xso.offload_handle) + return; +} + +static inline int is_eth_imm(const struct sk_buff *skb, + struct ipsec_sa_entry *sa_entry) { - int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len; + unsigned int kctx_len; + int hdrlen; + + kctx_len = sa_entry->kctx_len; + hdrlen = sizeof(struct fw_ulptx_wr) + + sizeof(struct chcr_ipsec_req) + kctx_len; hdrlen += sizeof(struct cpl_tx_pkt); + if (sa_entry->esn) + hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) + << 4); if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) return hdrlen; return 0; } static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb, - unsigned int kctx_len) + struct ipsec_sa_entry *sa_entry, + bool *immediate) { + unsigned int kctx_len; unsigned int flits; - int hdrlen = is_eth_imm(skb, kctx_len); + int aadivlen; + int hdrlen; + + kctx_len = sa_entry->kctx_len; + hdrlen = is_eth_imm(skb, sa_entry); + aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), + 16) : 0; + aadivlen <<= 4; /* If the skb is small enough, we can pump it out as a work request * with only immediate data. In that case we just have to have the * TX Packet header plus the skb data in the Work Request. */ - if (hdrlen) + if (hdrlen) { + *immediate = true; return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); + } flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); @@ -338,13 +373,69 @@ static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb, flits += (sizeof(struct fw_ulptx_wr) + sizeof(struct chcr_ipsec_req) + kctx_len + - sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + sizeof(struct cpl_tx_pkt_core) + + aadivlen) / sizeof(__be64); return flits; } +inline void *copy_esn_pktxt(struct sk_buff *skb, + struct net_device *dev, + void *pos, + struct ipsec_sa_entry *sa_entry) +{ + struct chcr_ipsec_aadiv *aadiv; + struct ulptx_idata *sc_imm; + struct ip_esp_hdr *esphdr; + struct xfrm_offload *xo; + struct sge_eth_txq *q; + struct adapter *adap; + struct port_info *pi; + __be64 seqno; + u32 qidx; + u32 seqlo; + u8 *iv; + int eoq; + int len; + + pi = netdev_priv(dev); + adap = pi->adapter; + qidx = skb->queue_mapping; + q = &adap->sge.ethtxq[qidx + pi->first_qset]; + + /* end of queue, reset pos to start of queue */ + eoq = (void *)q->q.stat - pos; + if (!eoq) + pos = q->q.desc; + + len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4; + memset(pos, 0, len); + aadiv = (struct chcr_ipsec_aadiv *)pos; + esphdr = (struct ip_esp_hdr *)skb_transport_header(skb); + iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr); + xo = xfrm_offload(skb); + + aadiv->spi = (esphdr->spi); + seqlo = htonl(esphdr->seq_no); + seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32)); + memcpy(aadiv->seq_no, &seqno, 8); + iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr); + memcpy(aadiv->iv, iv, 8); + + if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) { + sc_imm = (struct ulptx_idata *)(pos + + (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), + sizeof(__be64)) << 3)); + sc_imm->cmd_more = FILL_CMD_MORE(0); + sc_imm->len = cpu_to_be32(skb->len); + } + pos += len; + return pos; +} + inline void *copy_cpltx_pktxt(struct sk_buff *skb, - struct net_device *dev, - void *pos) + struct net_device *dev, + void *pos, + struct ipsec_sa_entry *sa_entry) { struct cpl_tx_pkt_core *cpl; struct sge_eth_txq *q; @@ -379,6 +470,9 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb, cpl->ctrl1 = cpu_to_be64(cntrl); pos += sizeof(struct cpl_tx_pkt_core); + /* Copy ESN info for HW */ + if (sa_entry->esn) + pos = copy_esn_pktxt(skb, dev, pos, sa_entry); return pos; } @@ -425,7 +519,7 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb, pos = (u8 *)q->q.desc + (key_len - left); } /* Copy CPL TX PKT XT */ - pos = copy_cpltx_pktxt(skb, dev, pos); + pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry); return pos; } @@ -438,11 +532,20 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb, { struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; - unsigned int immdatalen = 0; unsigned int ivsize = GCM_ESP_IV_SIZE; struct chcr_ipsec_wr *wr; + bool immediate = false; + u16 immdatalen = 0; unsigned int flits; + u32 ivinoffset; + u32 aadstart; + u32 aadstop; + u32 ciphstart; + u16 sc_more = 0; + u32 ivdrop = 0; + u32 esnlen = 0; u32 wr_mid; + u16 ndesc; int qidx = skb_get_queue_mapping(skb); struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset]; unsigned int kctx_len = sa_entry->kctx_len; @@ -450,15 +553,24 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb, atomic_inc(&adap->chcr_stats.ipsec_cnt); - flits = calc_tx_sec_flits(skb, kctx_len); + flits = calc_tx_sec_flits(skb, sa_entry, &immediate); + ndesc = DIV_ROUND_UP(flits, 2); + if (sa_entry->esn) + ivdrop = 1; - if (is_eth_imm(skb, kctx_len)) + if (immediate) immdatalen = skb->len; + if (sa_entry->esn) { + esnlen = sizeof(struct chcr_ipsec_aadiv); + if (!skb_is_nonlinear(skb)) + sc_more = 1; + } + /* WR Header */ wr = (struct chcr_ipsec_wr *)pos; wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); - wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); + wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc); if (unlikely(credits < ETHTXQ_STOP_THRES)) { netif_tx_stop_queue(q->txq); @@ -470,41 +582,46 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb, /* ULPTX */ wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid); - wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1); + wr->req.ulptx.len = htonl(ndesc - 1); /* Sub-command */ - wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen); + wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more); wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + sizeof(wr->req.key_ctx) + kctx_len + sizeof(struct cpl_tx_pkt_core) + - immdatalen); + esnlen + + (esnlen ? 0 : immdatalen)); /* CPL_SEC_PDU */ + ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) : + (skb_transport_offset(skb) + + sizeof(struct ip_esp_hdr) + 1); wr->req.sec_cpl.op_ivinsrtofst = htonl( CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | CPL_TX_SEC_PDU_CPLLEN_V(2) | CPL_TX_SEC_PDU_PLACEHOLDER_V(1) | CPL_TX_SEC_PDU_IVINSRTOFST_V( - (skb_transport_offset(skb) + - sizeof(struct ip_esp_hdr) + 1))); + ivinoffset)); - wr->req.sec_cpl.pldlen = htonl(skb->len); + wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen); + aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1); + aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET : + (skb_transport_offset(skb) + + sizeof(struct ip_esp_hdr)); + ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) + + GCM_ESP_IV_SIZE + 1; + ciphstart += sa_entry->esn ? esnlen : 0; wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( - (skb_transport_offset(skb) + 1), - (skb_transport_offset(skb) + - sizeof(struct ip_esp_hdr)), - (skb_transport_offset(skb) + - sizeof(struct ip_esp_hdr) + - GCM_ESP_IV_SIZE + 1), 0); + aadstart, + aadstop, + ciphstart, 0); wr->req.sec_cpl.cipherstop_lo_authinsert = - FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) + - sizeof(struct ip_esp_hdr) + - GCM_ESP_IV_SIZE + 1, - sa_entry->authsize, - sa_entry->authsize); + FILL_SEC_CPL_AUTHINSERT(0, ciphstart, + sa_entry->authsize, + sa_entry->authsize); wr->req.sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1, CHCR_SCMD_CIPHER_MODE_AES_GCM, @@ -512,7 +629,7 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb, sa_entry->hmac_ctrl, ivsize >> 1); wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, - 0, 0, 0); + 0, ivdrop, 0); pos += sizeof(struct fw_ulptx_wr) + sizeof(struct ulp_txpkt) + @@ -565,20 +682,21 @@ int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev) struct ipsec_sa_entry *sa_entry; u64 *pos, *end, *before, *sgl; int qidx, left, credits; - unsigned int flits = 0, ndesc, kctx_len; + unsigned int flits = 0, ndesc; struct adapter *adap; struct sge_eth_txq *q; struct port_info *pi; dma_addr_t addr[MAX_SKB_FRAGS + 1]; + struct sec_path *sp; bool immediate = false; if (!x->xso.offload_handle) return NETDEV_TX_BUSY; sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle; - kctx_len = sa_entry->kctx_len; - if (skb->sp->len != 1) { + sp = skb_sec_path(skb); + if (sp->len != 1) { out_free: dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -590,7 +708,7 @@ out_free: dev_kfree_skb_any(skb); cxgb4_reclaim_completed_tx(adap, &q->q, true); - flits = calc_tx_sec_flits(skb, sa_entry->kctx_len); + flits = calc_tx_sec_flits(skb, sa_entry, &immediate); ndesc = flits_to_desc(flits); credits = txq_avail(&q->q) - ndesc; @@ -603,9 +721,6 @@ out_free: dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } - if (is_eth_imm(skb, kctx_len)) - immediate = true; - if (!immediate && unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { q->mapping_err++; diff --git a/drivers/crypto/chelsio/chtls/Makefile b/drivers/crypto/chelsio/chtls/Makefile index df1379570a8e..b958f1b8ec39 100644 --- a/drivers/crypto/chelsio/chtls/Makefile +++ b/drivers/crypto/chelsio/chtls/Makefile @@ -1,4 +1,5 @@ -ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4 -Idrivers/crypto/chelsio/ +ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4 \ + -I $(srctree)/drivers/crypto/chelsio obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls.o chtls-objs := chtls_main.o chtls_cm.o chtls_io.o chtls_hw.o diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index 931b96c220af..4e22332496c5 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -24,6 +24,7 @@ #include <net/inet_common.h> #include <net/tcp.h> #include <net/dst.h> +#include <net/tls.h> #include "chtls.h" #include "chtls_cm.h" @@ -615,7 +616,7 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk) pi = netdev_priv(ndev); adap = pi->adapter; - if (!(adap->flags & FULL_INIT_DONE)) + if (!(adap->flags & CXGB4_FULL_INIT_DONE)) return -EBADF; if (listen_hash_find(cdev, sk) >= 0) /* already have it */ @@ -1015,6 +1016,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk, { struct inet_sock *newinet; const struct iphdr *iph; + struct tls_context *ctx; struct net_device *ndev; struct chtls_sock *csk; struct dst_entry *dst; @@ -1063,6 +1065,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk, oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid)); sk_setup_caps(newsk, dst); + ctx = tls_get_ctx(lsk); + newsk->sk_destruct = ctx->sk_destruct; csk->sk = newsk; csk->passive_reap_next = oreq; csk->tx_chan = cxgb4_port_chan(ndev); @@ -1079,8 +1083,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk, csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx : port_id * step; csk->sndbuf = newsk->sk_sndbuf; - csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi->adapter_type, - cxgb4_port_viid(ndev)); + csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx; RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk), sock_net(newsk)-> ipv4.sysctl_tcp_window_scaling, diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 18f553fcc167..1285a1bceda7 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -922,14 +922,13 @@ static int csk_wait_memory(struct chtls_dev *cdev, struct sock *sk, long *timeo_p) { DEFINE_WAIT_FUNC(wait, woken_wake_function); - int sndbuf, err = 0; + int err = 0; long current_timeo; long vm_wait = 0; bool noblock; current_timeo = *timeo_p; noblock = (*timeo_p ? false : true); - sndbuf = cdev->max_host_sndbuf; if (csk_mem_free(cdev, sk)) { current_timeo = (prandom_u32() % (HZ / 5)) + 2; vm_wait = (prandom_u32() % (HZ / 5)) + 2; @@ -1401,23 +1400,18 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); - struct net_device *dev = csk->egress_dev; struct chtls_hws *hws = &csk->tlshws; struct tcp_sock *tp = tcp_sk(sk); - struct adapter *adap; unsigned long avail; int buffers_freed; int copied = 0; - int request; int target; long timeo; - adap = netdev2adap(dev); buffers_freed = 0; timeo = sock_rcvtimeo(sk, nonblock); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); - request = len; if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND))) chtls_cleanup_rbuf(sk, copied); @@ -1694,11 +1688,9 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, { struct tcp_sock *tp = tcp_sk(sk); struct chtls_sock *csk; - struct chtls_hws *hws; unsigned long avail; /* amount of available data in current skb */ int buffers_freed; int copied = 0; - int request; long timeo; int target; /* Read at least this many bytes */ @@ -1718,7 +1710,6 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, lock_sock(sk); csk = rcu_dereference_sk_user_data(sk); - hws = &csk->tlshws; if (is_tls_rx(csk)) return chtls_pt_recvmsg(sk, msg, len, nonblock, @@ -1726,7 +1717,6 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, timeo = sock_rcvtimeo(sk, nonblock); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); - request = len; if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND))) chtls_cleanup_rbuf(sk, copied); diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 563f8fe7686a..dd2daf2a54e0 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -30,7 +30,6 @@ */ static LIST_HEAD(cdev_list); static DEFINE_MUTEX(cdev_mutex); -static DEFINE_MUTEX(cdev_list_lock); static DEFINE_MUTEX(notify_mutex); static RAW_NOTIFIER_HEAD(listen_notify_list); diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index eb2a0a73cbed..b4c24a35b3d0 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -261,7 +261,7 @@ static int fallback_init_cip(struct crypto_tfm *tfm) struct geode_aes_op *op = crypto_tfm_ctx(tfm); op->fallback.cip = crypto_alloc_cipher(name, 0, - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback.cip)) { printk(KERN_ERR "Error allocating fallback algo %s\n", name); diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index a5a36fe7bf2c..dad212cabe63 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -1961,7 +1961,8 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key, u32 tmp[DES_EXPKEY_WORDS]; int ret = des_ekey(tmp, key); - if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (unlikely(ret == 0) && + (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index cdc4f9a171d9..adc0cd8ae97b 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -241,8 +241,8 @@ static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm, memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); } else { /* new key */ - ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY, - &ctx->pkey, GFP_KERNEL); + ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY, + &ctx->pkey, GFP_KERNEL); if (!ctx->key) { mutex_unlock(&ctx->lock); return -ENOMEM; diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c index c1ee4e7bf996..91ee2bb575df 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.c +++ b/drivers/crypto/hisilicon/sec/sec_drv.c @@ -1082,9 +1082,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue) struct sec_queue_ring_db *ring_db = &queue->ring_db; int ret; - ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE, - &ring_cmd->paddr, - GFP_KERNEL); + ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE, + &ring_cmd->paddr, GFP_KERNEL); if (!ring_cmd->vaddr) return -ENOMEM; @@ -1092,17 +1091,15 @@ static int sec_queue_res_cfg(struct sec_queue *queue) mutex_init(&ring_cmd->lock); ring_cmd->callback = sec_alg_callback; - ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE, - &ring_cq->paddr, - GFP_KERNEL); + ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE, + &ring_cq->paddr, GFP_KERNEL); if (!ring_cq->vaddr) { ret = -ENOMEM; goto err_free_ring_cmd; } - ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE, - &ring_db->paddr, - GFP_KERNEL); + ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE, + &ring_db->paddr, GFP_KERNEL); if (!ring_db->vaddr) { ret = -ENOMEM; goto err_free_ring_cq; diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 3aef1d43e435..7ef30a98cb24 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -940,7 +940,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, } ret = des_ekey(tmp, key); - if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } @@ -970,7 +970,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des = { .cra_name = "cbc(des)", .cra_driver_name = "safexcel-cbc-des", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1010,7 +1010,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des = { .cra_name = "ecb(des)", .cra_driver_name = "safexcel-ecb-des", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1074,7 +1074,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des3_ede = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "safexcel-cbc-des3_ede", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1114,7 +1114,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "safexcel-ecb-des3_ede", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 27f7dad2d45d..5c4659b04d70 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -260,9 +260,9 @@ static int setup_crypt_desc(void) { struct device *dev = &pdev->dev; BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); - crypt_virt = dma_zalloc_coherent(dev, - NPE_QLEN * sizeof(struct crypt_ctl), - &crypt_phys, GFP_ATOMIC); + crypt_virt = dma_alloc_coherent(dev, + NPE_QLEN * sizeof(struct crypt_ctl), + &crypt_phys, GFP_ATOMIC); if (!crypt_virt) return -ENOMEM; return 0; @@ -847,7 +847,7 @@ static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key, goto out; if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { - if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) { + if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { ret = -EINVAL; } else { *flags &= ~CRYPTO_TFM_RES_WEAK_KEY; @@ -1125,7 +1125,7 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) goto out; if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { - if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) { + if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { ret = -EINVAL; goto out; } else { @@ -1194,7 +1194,6 @@ static struct ixp_alg ixp4xx_algos[] = { .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, - .geniv = "eseqiv", } } }, @@ -1221,7 +1220,6 @@ static struct ixp_alg ixp4xx_algos[] = { .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, - .geniv = "eseqiv", } } }, @@ -1247,7 +1245,6 @@ static struct ixp_alg ixp4xx_algos[] = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, - .geniv = "eseqiv", } } }, @@ -1273,7 +1270,6 @@ static struct ixp_alg ixp4xx_algos[] = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, - .geniv = "eseqiv", } } }, @@ -1287,7 +1283,6 @@ static struct ixp_alg ixp4xx_algos[] = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, - .geniv = "eseqiv", .setkey = ablk_rfc3686_setkey, .encrypt = ablk_rfc3686_crypt, .decrypt = ablk_rfc3686_crypt } diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index 0ae84ec9e21c..fb279b3a1ca1 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -286,7 +286,7 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key, } ret = des_ekey(tmp, key); - if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } @@ -322,7 +322,6 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req, struct mv_cesa_skcipher_dma_iter iter; bool skip_ctx = false; int ret; - unsigned int ivsize; basereq->chain.first = NULL; basereq->chain.last = NULL; @@ -381,7 +380,6 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req, } while (mv_cesa_skcipher_req_iter_next_op(&iter)); /* Add output data for IV */ - ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req)); ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET, CESA_SA_DATA_SRAM_OFFSET, CESA_TDMA_SRC_IN_SRAM, flags); diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c index ee0404e27a0f..5660e5e5e022 100644 --- a/drivers/crypto/mediatek/mtk-platform.c +++ b/drivers/crypto/mediatek/mtk-platform.c @@ -453,17 +453,17 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) if (!ring[i]) goto err_cleanup; - ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev, - MTK_DESC_RING_SZ, - &ring[i]->cmd_dma, - GFP_KERNEL); + ring[i]->cmd_base = dma_alloc_coherent(cryp->dev, + MTK_DESC_RING_SZ, + &ring[i]->cmd_dma, + GFP_KERNEL); if (!ring[i]->cmd_base) goto err_cleanup; - ring[i]->res_base = dma_zalloc_coherent(cryp->dev, - MTK_DESC_RING_SZ, - &ring[i]->res_dma, - GFP_KERNEL); + ring[i]->res_base = dma_alloc_coherent(cryp->dev, + MTK_DESC_RING_SZ, + &ring[i]->res_dma, + GFP_KERNEL); if (!ring[i]->res_base) goto err_cleanup; diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c index e01c46387df8..519086730791 100644 --- a/drivers/crypto/mxc-scc.c +++ b/drivers/crypto/mxc-scc.c @@ -178,12 +178,12 @@ static int mxc_scc_get_data(struct mxc_scc_ctx *ctx, else from = scc->black_memory; - dev_dbg(scc->dev, "pcopy: from 0x%p %d bytes\n", from, + dev_dbg(scc->dev, "pcopy: from 0x%p %zu bytes\n", from, ctx->dst_nents * 8); len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents, from, ctx->size, ctx->offset); if (!len) { - dev_err(scc->dev, "pcopy err from 0x%p (len=%d)\n", from, len); + dev_err(scc->dev, "pcopy err from 0x%p (len=%zu)\n", from, len); return -EINVAL; } @@ -274,7 +274,7 @@ static int mxc_scc_put_data(struct mxc_scc_ctx *ctx, len = sg_pcopy_to_buffer(req->src, ctx->src_nents, to, len, ctx->offset); if (!len) { - dev_err(scc->dev, "pcopy err to 0x%p (len=%d)\n", to, len); + dev_err(scc->dev, "pcopy err to 0x%p (len=%zu)\n", to, len); return -EINVAL; } @@ -335,9 +335,9 @@ static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx, return; } - dev_dbg(scc->dev, "Start encryption (0x%p/0x%p)\n", - (void *)readl(scc->base + SCC_SCM_RED_START), - (void *)readl(scc->base + SCC_SCM_BLACK_START)); + dev_dbg(scc->dev, "Start encryption (0x%x/0x%x)\n", + readl(scc->base + SCC_SCM_RED_START), + readl(scc->base + SCC_SCM_BLACK_START)); /* clear interrupt control registers */ writel(SCC_SCM_INTR_CTRL_CLR_INTR, diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index 4e6ff32f8a7e..a2105cf33abb 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -20,6 +20,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/stmp_device.h> +#include <linux/clk.h> #include <crypto/aes.h> #include <crypto/sha.h> @@ -82,6 +83,7 @@ struct dcp { spinlock_t lock[DCP_MAX_CHANS]; struct task_struct *thread[DCP_MAX_CHANS]; struct crypto_queue queue[DCP_MAX_CHANS]; + struct clk *dcp_clk; }; enum dcp_chan { @@ -1053,11 +1055,24 @@ static int mxs_dcp_probe(struct platform_device *pdev) /* Re-align the structure so it fits the DCP constraints. */ sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); - /* Restart the DCP block. */ - ret = stmp_reset_block(sdcp->base); + /* DCP clock is optional, only used on some SOCs */ + sdcp->dcp_clk = devm_clk_get(dev, "dcp"); + if (IS_ERR(sdcp->dcp_clk)) { + if (sdcp->dcp_clk != ERR_PTR(-ENOENT)) + return PTR_ERR(sdcp->dcp_clk); + sdcp->dcp_clk = NULL; + } + ret = clk_prepare_enable(sdcp->dcp_clk); if (ret) return ret; + /* Restart the DCP block. */ + ret = stmp_reset_block(sdcp->base); + if (ret) { + dev_err(dev, "Failed reset\n"); + goto err_disable_unprepare_clk; + } + /* Initialize control register. */ writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf, @@ -1094,7 +1109,8 @@ static int mxs_dcp_probe(struct platform_device *pdev) NULL, "mxs_dcp_chan/sha"); if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { dev_err(dev, "Error starting SHA thread!\n"); - return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); + ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); + goto err_disable_unprepare_clk; } sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, @@ -1151,6 +1167,10 @@ err_destroy_aes_thread: err_destroy_sha_thread: kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); + +err_disable_unprepare_clk: + clk_disable_unprepare(sdcp->dcp_clk); + return ret; } @@ -1170,6 +1190,8 @@ static int mxs_dcp_remove(struct platform_device *pdev) kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); + clk_disable_unprepare(sdcp->dcp_clk); + platform_set_drvdata(pdev, NULL); global_sdcp = NULL; diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 55f34cfc43ff..9450c41211b2 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -772,7 +772,7 @@ static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, } err = des_ekey(tmp, key); - if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index 898c0a280511..5a26fcd75d2d 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c @@ -159,7 +159,6 @@ struct crypto_alg nx_ctr3686_aes_alg = { .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .ivsize = CTR_RFC3686_IV_SIZE, - .geniv = "seqiv", .setkey = ctr3686_aes_nx_set_key, .encrypt = ctr3686_aes_nx_crypt, .decrypt = ctr3686_aes_nx_crypt, diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index a553ffddb11b..0120feb2d746 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -749,7 +749,6 @@ static struct crypto_alg algs_ctr[] = { .cra_u.ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, - .geniv = "eseqiv", .ivsize = AES_BLOCK_SIZE, .setkey = omap_aes_setkey, .encrypt = omap_aes_ctr_encrypt, @@ -1222,7 +1221,6 @@ static int omap_aes_probe(struct platform_device *pdev) algp = &dd->pdata->algs_info[i].algs_list[j]; pr_debug("reg alg: %s\n", algp->cra_name); - INIT_LIST_HEAD(&algp->cra_list); err = crypto_register_alg(algp); if (err) @@ -1240,7 +1238,6 @@ static int omap_aes_probe(struct platform_device *pdev) algp = &aalg->base; pr_debug("reg alg: %s\n", algp->cra_name); - INIT_LIST_HEAD(&algp->cra_list); err = crypto_register_aead(aalg); if (err) diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index eb95b0d7f184..1ba2633e90d6 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -662,7 +662,7 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, pr_debug("enter, keylen: %d\n", keylen); /* Do we need to test against weak key? */ - if (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY) { + if (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { u32 tmp[DES_EXPKEY_WORDS]; int ret = des_ekey(tmp, key); @@ -1069,7 +1069,6 @@ static int omap_des_probe(struct platform_device *pdev) algp = &dd->pdata->algs_info[i].algs_list[j]; pr_debug("reg alg: %s\n", algp->cra_name); - INIT_LIST_HEAD(&algp->cra_list); err = crypto_register_alg(algp); if (err) diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index a28f1d18fe01..1b3acdeffede 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -759,7 +759,8 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, } if (unlikely(!des_ekey(tmp, key)) && - (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) { + (crypto_ablkcipher_get_flags(cipher) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } @@ -1585,8 +1586,7 @@ static struct spacc_alg l2_engine_algs[] = { .cra_name = "f8(kasumi)", .cra_driver_name = "f8-kasumi-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | - CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 8, .cra_ctxsize = sizeof(struct spacc_ablk_ctx), diff --git a/drivers/crypto/qat/qat_c3xxx/Makefile b/drivers/crypto/qat/qat_c3xxx/Makefile index 8f5fd4838a96..822b5de58ec6 100644 --- a/drivers/crypto/qat/qat_c3xxx/Makefile +++ b/drivers/crypto/qat/qat_c3xxx/Makefile @@ -1,3 +1,3 @@ -ccflags-y := -I$(src)/../qat_common +ccflags-y := -I $(srctree)/$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c index 763c2166ee0e..d937cc7248a5 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c @@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) PCI_FUNC(pdev->devfn)); accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); - if (!accel_dev->debugfs_dir) { - dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); - ret = -EINVAL; - goto out_err; - } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); diff --git a/drivers/crypto/qat/qat_c3xxxvf/Makefile b/drivers/crypto/qat/qat_c3xxxvf/Makefile index 16d178e2eaa2..8f56d27c7479 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/Makefile +++ b/drivers/crypto/qat/qat_c3xxxvf/Makefile @@ -1,3 +1,3 @@ -ccflags-y := -I$(src)/../qat_common +ccflags-y := -I $(srctree)/$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c index 613c7d5644ce..1dc5ac859f7b 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c @@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) PCI_FUNC(pdev->devfn)); accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); - if (!accel_dev->debugfs_dir) { - dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); - ret = -EINVAL; - goto out_err; - } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); diff --git a/drivers/crypto/qat/qat_c62x/Makefile b/drivers/crypto/qat/qat_c62x/Makefile index bd75ace59b76..6dcd404578fc 100644 --- a/drivers/crypto/qat/qat_c62x/Makefile +++ b/drivers/crypto/qat/qat_c62x/Makefile @@ -1,3 +1,3 @@ -ccflags-y := -I$(src)/../qat_common +ccflags-y := -I $(srctree)/$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c index 9cb832963357..2bc06c89d2fe 100644 --- a/drivers/crypto/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c @@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) PCI_FUNC(pdev->devfn)); accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); - if (!accel_dev->debugfs_dir) { - dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); - ret = -EINVAL; - goto out_err; - } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); diff --git a/drivers/crypto/qat/qat_c62xvf/Makefile b/drivers/crypto/qat/qat_c62xvf/Makefile index ecd708c213b2..1e5d51de778f 100644 --- a/drivers/crypto/qat/qat_c62xvf/Makefile +++ b/drivers/crypto/qat/qat_c62xvf/Makefile @@ -1,3 +1,3 @@ -ccflags-y := -I$(src)/../qat_common +ccflags-y := -I $(srctree)/$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c index 278452b8ef81..a68358b31292 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c @@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) PCI_FUNC(pdev->devfn)); accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); - if (!accel_dev->debugfs_dir) { - dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); - ret = -EINVAL; - goto out_err; - } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index 3744b22f0c46..d28cba34773e 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c @@ -244,18 +244,18 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev) dev_to_node(&GET_DEV(accel_dev))); if (!admin) return -ENOMEM; - admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, - &admin->phy_addr, GFP_KERNEL); + admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, + &admin->phy_addr, GFP_KERNEL); if (!admin->virt_addr) { dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); kfree(admin); return -ENOMEM; } - admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), - PAGE_SIZE, - &admin->const_tbl_addr, - GFP_KERNEL); + admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev), + PAGE_SIZE, + &admin->const_tbl_addr, + GFP_KERNEL); if (!admin->virt_tbl_addr) { dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n"); dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c index d0879790561f..5c7fdb0fc53d 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/qat/qat_common/adf_cfg.c @@ -141,13 +141,6 @@ int adf_cfg_dev_add(struct adf_accel_dev *accel_dev) accel_dev->debugfs_dir, dev_cfg_data, &qat_dev_cfg_fops); - if (!dev_cfg_data->debug) { - dev_err(&GET_DEV(accel_dev), - "Failed to create qat cfg debugfs entry.\n"); - kfree(dev_cfg_data); - accel_dev->cfg = NULL; - return -EFAULT; - } return 0; } EXPORT_SYMBOL_GPL(adf_cfg_dev_add); diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 57d2622728a5..2136cbe4bf6c 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -486,12 +486,6 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev) /* accel_dev->debugfs_dir should always be non-NULL here */ etr_data->debug = debugfs_create_dir("transport", accel_dev->debugfs_dir); - if (!etr_data->debug) { - dev_err(&GET_DEV(accel_dev), - "Unable to create transport debugfs entry\n"); - ret = -ENOENT; - goto err_bank_debug; - } for (i = 0; i < num_banks; i++) { ret = adf_init_bank(accel_dev, &etr_data->banks[i], i, @@ -504,7 +498,6 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev) err_bank_all: debugfs_remove(etr_data->debug); -err_bank_debug: kfree(etr_data->banks); err_bank: kfree(etr_data); diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c index 52340b9bb387..e794e9d97b2c 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_debug.c +++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c @@ -163,11 +163,6 @@ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name) ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR, ring->bank->bank_debug_dir, ring, &adf_ring_debug_fops); - if (!ring_debug->debug) { - pr_err("QAT: Failed to create ring debug entry.\n"); - kfree(ring_debug); - return -EFAULT; - } ring->ring_debug = ring_debug; return 0; } @@ -271,19 +266,9 @@ int adf_bank_debugfs_add(struct adf_etr_bank_data *bank) snprintf(name, sizeof(name), "bank_%02d", bank->bank_number); bank->bank_debug_dir = debugfs_create_dir(name, parent); - if (!bank->bank_debug_dir) { - pr_err("QAT: Failed to create bank debug dir.\n"); - return -EFAULT; - } - bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR, bank->bank_debug_dir, bank, &adf_bank_debug_fops); - if (!bank->bank_debug_cfg) { - pr_err("QAT: Failed to create bank debug entry.\n"); - debugfs_remove(bank->bank_debug_dir); - return -EFAULT; - } return 0; } diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index d2698299896f..975c75198f56 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -601,15 +601,15 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, dev = &GET_DEV(inst->accel_dev); ctx->inst = inst; - ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), - &ctx->enc_cd_paddr, - GFP_ATOMIC); + ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), + &ctx->enc_cd_paddr, + GFP_ATOMIC); if (!ctx->enc_cd) { return -ENOMEM; } - ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), - &ctx->dec_cd_paddr, - GFP_ATOMIC); + ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), + &ctx->dec_cd_paddr, + GFP_ATOMIC); if (!ctx->dec_cd) { goto out_free_enc; } @@ -933,16 +933,16 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, dev = &GET_DEV(inst->accel_dev); ctx->inst = inst; - ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), - &ctx->enc_cd_paddr, - GFP_ATOMIC); + ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), + &ctx->enc_cd_paddr, + GFP_ATOMIC); if (!ctx->enc_cd) { spin_unlock(&ctx->lock); return -ENOMEM; } - ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), - &ctx->dec_cd_paddr, - GFP_ATOMIC); + ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), + &ctx->dec_cd_paddr, + GFP_ATOMIC); if (!ctx->dec_cd) { spin_unlock(&ctx->lock); goto out_free_enc; diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 320e7854b4ee..c9f324730d71 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -332,10 +332,10 @@ static int qat_dh_compute_value(struct kpp_request *req) } else { int shift = ctx->p_size - req->src_len; - qat_req->src_align = dma_zalloc_coherent(dev, - ctx->p_size, - &qat_req->in.dh.in.b, - GFP_KERNEL); + qat_req->src_align = dma_alloc_coherent(dev, + ctx->p_size, + &qat_req->in.dh.in.b, + GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret; @@ -360,9 +360,9 @@ static int qat_dh_compute_value(struct kpp_request *req) goto unmap_src; } else { - qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size, - &qat_req->out.dh.r, - GFP_KERNEL); + qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size, + &qat_req->out.dh.r, + GFP_KERNEL); if (unlikely(!qat_req->dst_align)) goto unmap_src; } @@ -447,7 +447,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) return -EINVAL; ctx->p_size = params->p_size; - ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); + ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); if (!ctx->p) return -ENOMEM; memcpy(ctx->p, params->p, ctx->p_size); @@ -458,7 +458,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) return 0; } - ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); + ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); if (!ctx->g) return -ENOMEM; memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, @@ -503,8 +503,8 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf, if (ret < 0) goto err_clear_ctx; - ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, - GFP_KERNEL); + ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa, + GFP_KERNEL); if (!ctx->xa) { ret = -ENOMEM; goto err_clear_ctx; @@ -737,9 +737,9 @@ static int qat_rsa_enc(struct akcipher_request *req) } else { int shift = ctx->key_sz - req->src_len; - qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, - &qat_req->in.rsa.enc.m, - GFP_KERNEL); + qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, + &qat_req->in.rsa.enc.m, + GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret; @@ -756,9 +756,9 @@ static int qat_rsa_enc(struct akcipher_request *req) goto unmap_src; } else { - qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, - &qat_req->out.rsa.enc.c, - GFP_KERNEL); + qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, + &qat_req->out.rsa.enc.c, + GFP_KERNEL); if (unlikely(!qat_req->dst_align)) goto unmap_src; @@ -881,9 +881,9 @@ static int qat_rsa_dec(struct akcipher_request *req) } else { int shift = ctx->key_sz - req->src_len; - qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, - &qat_req->in.rsa.dec.c, - GFP_KERNEL); + qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, + &qat_req->in.rsa.dec.c, + GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret; @@ -900,9 +900,9 @@ static int qat_rsa_dec(struct akcipher_request *req) goto unmap_src; } else { - qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, - &qat_req->out.rsa.dec.m, - GFP_KERNEL); + qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, + &qat_req->out.rsa.dec.m, + GFP_KERNEL); if (unlikely(!qat_req->dst_align)) goto unmap_src; @@ -989,7 +989,7 @@ static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, goto err; ret = -ENOMEM; - ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); + ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); if (!ctx->n) goto err; @@ -1018,7 +1018,7 @@ static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, return -EINVAL; } - ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); + ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); if (!ctx->e) return -ENOMEM; @@ -1044,7 +1044,7 @@ static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, goto err; ret = -ENOMEM; - ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); + ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); if (!ctx->d) goto err; @@ -1077,7 +1077,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) qat_rsa_drop_leading_zeros(&ptr, &len); if (!len) goto err; - ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); + ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); if (!ctx->p) goto err; memcpy(ctx->p + (half_key_sz - len), ptr, len); @@ -1088,7 +1088,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) qat_rsa_drop_leading_zeros(&ptr, &len); if (!len) goto free_p; - ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); + ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); if (!ctx->q) goto free_p; memcpy(ctx->q + (half_key_sz - len), ptr, len); @@ -1099,8 +1099,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) qat_rsa_drop_leading_zeros(&ptr, &len); if (!len) goto free_q; - ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp, - GFP_KERNEL); + ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp, + GFP_KERNEL); if (!ctx->dp) goto free_q; memcpy(ctx->dp + (half_key_sz - len), ptr, len); @@ -1111,8 +1111,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) qat_rsa_drop_leading_zeros(&ptr, &len); if (!len) goto free_dp; - ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq, - GFP_KERNEL); + ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq, + GFP_KERNEL); if (!ctx->dq) goto free_dp; memcpy(ctx->dq + (half_key_sz - len), ptr, len); @@ -1123,8 +1123,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) qat_rsa_drop_leading_zeros(&ptr, &len); if (!len) goto free_dq; - ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv, - GFP_KERNEL); + ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv, + GFP_KERNEL); if (!ctx->qinv) goto free_dq; memcpy(ctx->qinv + (half_key_sz - len), ptr, len); diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile index 180a00ed7f89..0fc06b1e1632 100644 --- a/drivers/crypto/qat/qat_dh895xcc/Makefile +++ b/drivers/crypto/qat/qat_dh895xcc/Makefile @@ -1,3 +1,3 @@ -ccflags-y := -I$(src)/../qat_common +ccflags-y := -I $(srctree)/$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index 3a9708ef4ce2..b11bf8c0e683 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) PCI_FUNC(pdev->devfn)); accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); - if (!accel_dev->debugfs_dir) { - dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); - ret = -EINVAL; - goto out_err; - } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile index 5c3ccf8267eb..9ce906af6034 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/Makefile +++ b/drivers/crypto/qat/qat_dh895xccvf/Makefile @@ -1,3 +1,3 @@ -ccflags-y := -I$(src)/../qat_common +ccflags-y := -I $(srctree)/$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index 3da0f951cb59..1b762eefc6c1 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c @@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) PCI_FUNC(pdev->devfn)); accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); - if (!accel_dev->debugfs_dir) { - dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); - ret = -EINVAL; - goto out_err; - } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c index 585e1cab9ae3..154b6baa124e 100644 --- a/drivers/crypto/qce/ablkcipher.c +++ b/drivers/crypto/qce/ablkcipher.c @@ -180,8 +180,8 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, u32 tmp[DES_EXPKEY_WORDS]; ret = des_ekey(tmp, key); - if (!ret && crypto_ablkcipher_get_flags(ablk) & - CRYPTO_TFM_REQ_WEAK_KEY) + if (!ret && (crypto_ablkcipher_get_flags(ablk) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) goto weakkey; } @@ -376,7 +376,6 @@ static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def, alg->cra_module = THIS_MODULE; alg->cra_init = qce_ablkcipher_init; alg->cra_exit = qce_ablkcipher_exit; - INIT_LIST_HEAD(&alg->cra_list); INIT_LIST_HEAD(&tmpl->entry); tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER; diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index d8a5db11b7ea..fc45f5ea6fdd 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -508,7 +508,6 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def, base->cra_alignmask = 0; base->cra_module = THIS_MODULE; base->cra_init = qce_ahash_cra_init; - INIT_LIST_HEAD(&base->cra_list); snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index c9d622abd90c..0ce4a65b95f5 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -119,7 +119,7 @@ static int rk_load_data(struct rk_crypto_info *dev, count = (dev->left_bytes > PAGE_SIZE) ? PAGE_SIZE : dev->left_bytes; - if (!sg_pcopy_to_buffer(dev->first, dev->nents, + if (!sg_pcopy_to_buffer(dev->first, dev->src_nents, dev->addr_vir, count, dev->total - dev->left_bytes)) { dev_err(dev->dev, "[%s:%d] pcopy err\n", diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h index d5fb4013fb42..54ee5b3ed9db 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.h +++ b/drivers/crypto/rockchip/rk3288_crypto.h @@ -207,7 +207,8 @@ struct rk_crypto_info { void *addr_vir; int aligned; int align_size; - size_t nents; + size_t src_nents; + size_t dst_nents; unsigned int total; unsigned int count; dma_addr_t addr_in; @@ -244,6 +245,7 @@ struct rk_cipher_ctx { struct rk_crypto_info *dev; unsigned int keylen; u32 mode; + u8 iv[AES_BLOCK_SIZE]; }; enum alg_type { diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c index 639c15c5364b..02dac6ae7e53 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c @@ -60,7 +60,7 @@ static int rk_tdes_setkey(struct crypto_ablkcipher *cipher, if (keylen == DES_KEY_SIZE) { if (!des_ekey(tmp, key) && - (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } @@ -242,6 +242,17 @@ static void crypto_dma_start(struct rk_crypto_info *dev) static int rk_set_data_start(struct rk_crypto_info *dev) { int err; + struct ablkcipher_request *req = + ablkcipher_request_cast(dev->async_req); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + u32 ivsize = crypto_ablkcipher_ivsize(tfm); + u8 *src_last_blk = page_address(sg_page(dev->sg_src)) + + dev->sg_src->offset + dev->sg_src->length - ivsize; + + /* store the iv that need to be updated in chain mode */ + if (ctx->mode & RK_CRYPTO_DEC) + memcpy(ctx->iv, src_last_blk, ivsize); err = dev->load_data(dev, dev->sg_src, dev->sg_dst); if (!err) @@ -260,8 +271,9 @@ static int rk_ablk_start(struct rk_crypto_info *dev) dev->total = req->nbytes; dev->sg_src = req->src; dev->first = req->src; - dev->nents = sg_nents(req->src); + dev->src_nents = sg_nents(req->src); dev->sg_dst = req->dst; + dev->dst_nents = sg_nents(req->dst); dev->aligned = 1; spin_lock_irqsave(&dev->lock, flags); @@ -285,6 +297,28 @@ static void rk_iv_copyback(struct rk_crypto_info *dev) memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize); } +static void rk_update_iv(struct rk_crypto_info *dev) +{ + struct ablkcipher_request *req = + ablkcipher_request_cast(dev->async_req); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + u32 ivsize = crypto_ablkcipher_ivsize(tfm); + u8 *new_iv = NULL; + + if (ctx->mode & RK_CRYPTO_DEC) { + new_iv = ctx->iv; + } else { + new_iv = page_address(sg_page(dev->sg_dst)) + + dev->sg_dst->offset + dev->sg_dst->length - ivsize; + } + + if (ivsize == DES_BLOCK_SIZE) + memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize); + else if (ivsize == AES_BLOCK_SIZE) + memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize); +} + /* return: * true some err was occurred * fault no err, continue @@ -297,7 +331,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev) dev->unload_data(dev); if (!dev->aligned) { - if (!sg_pcopy_from_buffer(req->dst, dev->nents, + if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents, dev->addr_vir, dev->count, dev->total - dev->left_bytes - dev->count)) { @@ -306,6 +340,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev) } } if (dev->left_bytes) { + rk_update_iv(dev); if (dev->aligned) { if (sg_is_last(dev->sg_src)) { dev_err(dev->dev, "[%s:%d] Lack of data\n", diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index 821a506b9e17..c336ae75e361 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -206,7 +206,7 @@ static int rk_ahash_start(struct rk_crypto_info *dev) dev->sg_dst = NULL; dev->sg_src = req->src; dev->first = req->src; - dev->nents = sg_nents(req->src); + dev->src_nents = sg_nents(req->src); rctx = ahash_request_ctx(req); rctx->mode = 0; diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 0064be0e3941..1afdcb81d8ed 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -232,6 +232,7 @@ * struct samsung_aes_variant - platform specific SSS driver data * @aes_offset: AES register offset from SSS module's base. * @hash_offset: HASH register offset from SSS module's base. + * @clk_names: names of clocks needed to run SSS IP * * Specifies platform specific configuration of SSS module. * Note: A structure for driver specific platform data is used for future @@ -240,6 +241,7 @@ struct samsung_aes_variant { unsigned int aes_offset; unsigned int hash_offset; + const char *clk_names[2]; }; struct s5p_aes_reqctx { @@ -296,6 +298,7 @@ struct s5p_aes_ctx { struct s5p_aes_dev { struct device *dev; struct clk *clk; + struct clk *pclk; void __iomem *ioaddr; void __iomem *aes_ioaddr; int irq_fc; @@ -384,11 +387,19 @@ struct s5p_hash_ctx { static const struct samsung_aes_variant s5p_aes_data = { .aes_offset = 0x4000, .hash_offset = 0x6000, + .clk_names = { "secss", }, }; static const struct samsung_aes_variant exynos_aes_data = { .aes_offset = 0x200, .hash_offset = 0x400, + .clk_names = { "secss", }, +}; + +static const struct samsung_aes_variant exynos5433_slim_aes_data = { + .aes_offset = 0x400, + .hash_offset = 0x800, + .clk_names = { "pclk", "aclk", }, }; static const struct of_device_id s5p_sss_dt_match[] = { @@ -400,6 +411,10 @@ static const struct of_device_id s5p_sss_dt_match[] = { .compatible = "samsung,exynos4210-secss", .data = &exynos_aes_data, }, + { + .compatible = "samsung,exynos5433-slim-sss", + .data = &exynos5433_slim_aes_data, + }, { }, }; MODULE_DEVICE_TABLE(of, s5p_sss_dt_match); @@ -463,6 +478,9 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg, static void s5p_sg_done(struct s5p_aes_dev *dev) { + struct ablkcipher_request *req = dev->req; + struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req); + if (dev->sg_dst_cpy) { dev_dbg(dev->dev, "Copying %d bytes of output data back to original place\n", @@ -472,6 +490,11 @@ static void s5p_sg_done(struct s5p_aes_dev *dev) } s5p_free_sg_cpy(dev, &dev->sg_src_cpy); s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); + if (reqctx->mode & FLAGS_AES_CBC) + memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE); + + else if (reqctx->mode & FLAGS_AES_CTR) + memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE); } /* Calls the completion. Cannot be called with dev->lock hold. */ @@ -1819,10 +1842,12 @@ static void s5p_set_aes(struct s5p_aes_dev *dev, void __iomem *keystart; if (iv) - memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10); + memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, + AES_BLOCK_SIZE); if (ctr) - memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr, 0x10); + memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr, + AES_BLOCK_SIZE); if (keylen == AES_KEYSIZE_256) keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0); @@ -2208,18 +2233,39 @@ static int s5p_aes_probe(struct platform_device *pdev) return PTR_ERR(pdata->ioaddr); } - pdata->clk = devm_clk_get(dev, "secss"); + pdata->clk = devm_clk_get(dev, variant->clk_names[0]); if (IS_ERR(pdata->clk)) { - dev_err(dev, "failed to find secss clock source\n"); + dev_err(dev, "failed to find secss clock %s\n", + variant->clk_names[0]); return -ENOENT; } err = clk_prepare_enable(pdata->clk); if (err < 0) { - dev_err(dev, "Enabling SSS clk failed, err %d\n", err); + dev_err(dev, "Enabling clock %s failed, err %d\n", + variant->clk_names[0], err); return err; } + if (variant->clk_names[1]) { + pdata->pclk = devm_clk_get(dev, variant->clk_names[1]); + if (IS_ERR(pdata->pclk)) { + dev_err(dev, "failed to find clock %s\n", + variant->clk_names[1]); + err = -ENOENT; + goto err_clk; + } + + err = clk_prepare_enable(pdata->pclk); + if (err < 0) { + dev_err(dev, "Enabling clock %s failed, err %d\n", + variant->clk_names[0], err); + goto err_clk; + } + } else { + pdata->pclk = NULL; + } + spin_lock_init(&pdata->lock); spin_lock_init(&pdata->hash_lock); @@ -2295,8 +2341,11 @@ err_algs: tasklet_kill(&pdata->tasklet); err_irq: - clk_disable_unprepare(pdata->clk); + if (pdata->pclk) + clk_disable_unprepare(pdata->pclk); +err_clk: + clk_disable_unprepare(pdata->clk); s5p_dev = NULL; return err; @@ -2323,6 +2372,9 @@ static int s5p_aes_remove(struct platform_device *pdev) pdata->use_hash = false; } + if (pdata->pclk) + clk_disable_unprepare(pdata->pclk); + clk_disable_unprepare(pdata->clk); s5p_dev = NULL; diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index bbf166a97ad3..8c32a3059b4a 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -1321,7 +1321,6 @@ static int sahara_register_algs(struct sahara_dev *dev) unsigned int i, j, k, l; for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { - INIT_LIST_HEAD(&aes_algs[i].cra_list); err = crypto_register_alg(&aes_algs[i]); if (err) goto err_aes_algs; diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 590d7352837e..4a6cc8a3045d 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -1564,7 +1564,7 @@ err_engine: static int stm32_hash_remove(struct platform_device *pdev) { - static struct stm32_hash_dev *hdev; + struct stm32_hash_dev *hdev; int ret; hdev = platform_get_drvdata(pdev); diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index 5cf64746731a..54fd714d53ca 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c @@ -517,7 +517,7 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, flags = crypto_skcipher_get_flags(tfm); ret = des_ekey(tmp, key); - if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); dev_dbg(ss->dev, "Weak key %u\n", keylen); return -EINVAL; diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 6988012deca4..de78b54bcfb1 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; - void *err; if (cryptlen + authsize > max_len) { dev_err(dev, "length exceeds h/w max limit\n"); return ERR_PTR(-EINVAL); } - if (ivsize) - iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); - if (!dst || dst == src) { src_len = assoclen + cryptlen + authsize; src_nents = sg_nents_for_len(src, src_len); if (src_nents < 0) { dev_err(dev, "Invalid number of src SG.\n"); - err = ERR_PTR(-EINVAL); - goto error_sg; + return ERR_PTR(-EINVAL); } src_nents = (src_nents == 1) ? 0 : src_nents; dst_nents = dst ? src_nents : 0; @@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, src_nents = sg_nents_for_len(src, src_len); if (src_nents < 0) { dev_err(dev, "Invalid number of src SG.\n"); - err = ERR_PTR(-EINVAL); - goto error_sg; + return ERR_PTR(-EINVAL); } src_nents = (src_nents == 1) ? 0 : src_nents; dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); dst_nents = sg_nents_for_len(dst, dst_len); if (dst_nents < 0) { dev_err(dev, "Invalid number of dst SG.\n"); - err = ERR_PTR(-EINVAL); - goto error_sg; + return ERR_PTR(-EINVAL); } dst_nents = (dst_nents == 1) ? 0 : dst_nents; } @@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, /* if its a ahash, add space for a second desc next to the first one */ if (is_sec1 && !dst) alloc_len += sizeof(struct talitos_desc); + alloc_len += ivsize; edesc = kmalloc(alloc_len, GFP_DMA | flags); - if (!edesc) { - err = ERR_PTR(-ENOMEM); - goto error_sg; + if (!edesc) + return ERR_PTR(-ENOMEM); + if (ivsize) { + iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize); + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); } memset(&edesc->desc, 0, sizeof(edesc->desc)); @@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, DMA_BIDIRECTIONAL); } return edesc; -error_sg: - if (iv_dma) - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); - return err; } static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, @@ -1543,7 +1535,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, } if (unlikely(crypto_ablkcipher_get_flags(cipher) & - CRYPTO_TFM_REQ_WEAK_KEY) && + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) && !des_ekey(tmp, key)) { crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY); return -EINVAL; @@ -3155,7 +3147,6 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, alg->cra_ablkcipher.setkey = ablkcipher_setkey; alg->cra_ablkcipher.encrypt = ablkcipher_encrypt; alg->cra_ablkcipher.decrypt = ablkcipher_decrypt; - alg->cra_ablkcipher.geniv = "eseqiv"; break; case CRYPTO_ALG_TYPE_AEAD: alg = &t_alg->algt.alg.aead.base; diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index d2663a4e1f5e..3235611928f2 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -556,7 +556,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, desc = dmaengine_prep_slave_sg(channel, ctx->device->dma.sg_src, ctx->device->dma.sg_src_len, - direction, DMA_CTRL_ACK); + DMA_MEM_TO_DEV, DMA_CTRL_ACK); break; case DMA_FROM_DEVICE: @@ -580,7 +580,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, desc = dmaengine_prep_slave_sg(channel, ctx->device->dma.sg_dst, ctx->device->dma.sg_dst_len, - direction, + DMA_DEV_TO_MEM, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); @@ -595,6 +595,12 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, } cookie = dmaengine_submit(desc); + if (dma_submit_error(cookie)) { + dev_dbg(ctx->device->dev, "[%s]: DMA submission failed\n", + __func__); + return cookie; + } + dma_async_issue_pending(channel); return 0; @@ -994,10 +1000,11 @@ static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher, } ret = des_ekey(tmp, key); - if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (unlikely(ret == 0) && + (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; - pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", - __func__); + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY", + __func__); return -EINVAL; } @@ -1028,18 +1035,19 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher, /* Checking key interdependency for weak key detection. */ if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && - (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; - pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", - __func__); + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY", + __func__); return -EINVAL; } for (i = 0; i < 3; i++) { ret = des_ekey(tmp, key + i*DES_KEY_SIZE); - if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (unlikely(ret == 0) && + (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; - pr_debug(DEV_DBG_NAME " [%s]: " - "CRYPTO_TFM_REQ_WEAK_KEY", __func__); + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY", + __func__); return -EINVAL; } } diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 633321a8dd03..a0bb8a6eec3f 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -166,7 +166,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, __func__); desc = dmaengine_prep_slave_sg(channel, ctx->device->dma.sg, ctx->device->dma.sg_len, - direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); if (!desc) { dev_err(ctx->device->dev, "%s: dmaengine_prep_slave_sg() failed!\n", __func__); diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index 2c573d1aaa64..0704833ece92 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -406,7 +406,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, } else { req_data->header.session_id = cpu_to_le64(ctx->dec_sess_info.session_id); - req_data->header.opcode = + req_data->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT); } req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER); |